xref: /linux/drivers/infiniband/hw/irdma/ctrl.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include <linux/etherdevice.h>
4 
5 #include "osdep.h"
6 #include "hmc.h"
7 #include "defs.h"
8 #include "type.h"
9 #include "ws.h"
10 #include "protos.h"
11 
12 /**
13  * irdma_get_qp_from_list - get next qp from a list
14  * @head: Listhead of qp's
15  * @qp: current qp
16  */
17 struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
18 					   struct irdma_sc_qp *qp)
19 {
20 	struct list_head *lastentry;
21 	struct list_head *entry = NULL;
22 
23 	if (list_empty(head))
24 		return NULL;
25 
26 	if (!qp) {
27 		entry = head->next;
28 	} else {
29 		lastentry = &qp->list;
30 		entry = lastentry->next;
31 		if (entry == head)
32 			return NULL;
33 	}
34 
35 	return container_of(entry, struct irdma_sc_qp, list);
36 }
37 
38 /**
39  * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
40  * @vsi: the VSI struct pointer
41  * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
42  */
43 void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
44 {
45 	struct irdma_sc_qp *qp = NULL;
46 	u8 i;
47 
48 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
49 		mutex_lock(&vsi->qos[i].qos_mutex);
50 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
51 		while (qp) {
52 			if (op == IRDMA_OP_RESUME) {
53 				if (!qp->dev->ws_add(vsi, i)) {
54 					qp->qs_handle =
55 						vsi->qos[qp->user_pri].qs_handle;
56 					irdma_cqp_qp_suspend_resume(qp, op);
57 				} else {
58 					irdma_cqp_qp_suspend_resume(qp, op);
59 					irdma_modify_qp_to_err(qp);
60 				}
61 			} else if (op == IRDMA_OP_SUSPEND) {
62 				/* issue cqp suspend command */
63 				if (!irdma_cqp_qp_suspend_resume(qp, op))
64 					atomic_inc(&vsi->qp_suspend_reqs);
65 			}
66 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
67 		}
68 		mutex_unlock(&vsi->qos[i].qos_mutex);
69 	}
70 }
71 
72 static void irdma_set_qos_info(struct irdma_sc_vsi  *vsi,
73 			       struct irdma_l2params *l2p)
74 {
75 	u8 i;
76 
77 	vsi->qos_rel_bw = l2p->vsi_rel_bw;
78 	vsi->qos_prio_type = l2p->vsi_prio_type;
79 	vsi->dscp_mode = l2p->dscp_mode;
80 	if (l2p->dscp_mode) {
81 		memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
82 		for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
83 			l2p->up2tc[i] = i;
84 	}
85 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
86 		if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
87 			vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
88 		vsi->qos[i].traffic_class = l2p->up2tc[i];
89 		vsi->qos[i].rel_bw =
90 			l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
91 		vsi->qos[i].prio_type =
92 			l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
93 		vsi->qos[i].valid = false;
94 	}
95 }
96 
97 /**
98  * irdma_change_l2params - given the new l2 parameters, change all qp
99  * @vsi: RDMA VSI pointer
100  * @l2params: New parameters from l2
101  */
102 void irdma_change_l2params(struct irdma_sc_vsi *vsi,
103 			   struct irdma_l2params *l2params)
104 {
105 	if (l2params->mtu_changed) {
106 		vsi->mtu = l2params->mtu;
107 		if (vsi->ieq)
108 			irdma_reinitialize_ieq(vsi);
109 	}
110 
111 	if (!l2params->tc_changed)
112 		return;
113 
114 	vsi->tc_change_pending = false;
115 	irdma_set_qos_info(vsi, l2params);
116 	irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
117 }
118 
119 /**
120  * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
121  * @qp: qp to be removed from qos
122  */
123 void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
124 {
125 	struct irdma_sc_vsi *vsi = qp->vsi;
126 
127 	ibdev_dbg(to_ibdev(qp->dev),
128 		  "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
129 		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
130 		  qp->on_qoslist);
131 	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
132 	if (qp->on_qoslist) {
133 		qp->on_qoslist = false;
134 		list_del(&qp->list);
135 	}
136 	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
137 }
138 
139 /**
140  * irdma_qp_add_qos - called during setctx for qp to be added to qos
141  * @qp: qp to be added to qos
142  */
143 void irdma_qp_add_qos(struct irdma_sc_qp *qp)
144 {
145 	struct irdma_sc_vsi *vsi = qp->vsi;
146 
147 	ibdev_dbg(to_ibdev(qp->dev),
148 		  "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
149 		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
150 		  qp->on_qoslist);
151 	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
152 	if (!qp->on_qoslist) {
153 		list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
154 		qp->on_qoslist = true;
155 		qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
156 	}
157 	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
158 }
159 
160 /**
161  * irdma_sc_pd_init - initialize sc pd struct
162  * @dev: sc device struct
163  * @pd: sc pd ptr
164  * @pd_id: pd_id for allocated pd
165  * @abi_ver: User/Kernel ABI version
166  */
167 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
168 		      int abi_ver)
169 {
170 	pd->pd_id = pd_id;
171 	pd->abi_ver = abi_ver;
172 	pd->dev = dev;
173 }
174 
175 /**
176  * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
177  * @cqp: struct for cqp hw
178  * @info: arp entry information
179  * @scratch: u64 saved to be used during cqp completion
180  * @post_sq: flag for cqp db to ring
181  */
182 static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
183 					struct irdma_add_arp_cache_entry_info *info,
184 					u64 scratch, bool post_sq)
185 {
186 	__le64 *wqe;
187 	u64 hdr;
188 
189 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
190 	if (!wqe)
191 		return -ENOMEM;
192 	set_64bit_val(wqe, 8, info->reach_max);
193 	set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
194 
195 	hdr = info->arp_index |
196 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
197 	      FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
198 	      FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
199 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
200 	dma_wmb(); /* make sure WQE is written before valid bit is set */
201 
202 	set_64bit_val(wqe, 24, hdr);
203 
204 	print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
205 			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
206 	if (post_sq)
207 		irdma_sc_cqp_post_sq(cqp);
208 
209 	return 0;
210 }
211 
212 /**
213  * irdma_sc_del_arp_cache_entry - dele arp cache entry
214  * @cqp: struct for cqp hw
215  * @scratch: u64 saved to be used during cqp completion
216  * @arp_index: arp index to delete arp entry
217  * @post_sq: flag for cqp db to ring
218  */
219 static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
220 					u16 arp_index, bool post_sq)
221 {
222 	__le64 *wqe;
223 	u64 hdr;
224 
225 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
226 	if (!wqe)
227 		return -ENOMEM;
228 
229 	hdr = arp_index |
230 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
231 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
232 	dma_wmb(); /* make sure WQE is written before valid bit is set */
233 
234 	set_64bit_val(wqe, 24, hdr);
235 
236 	print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
237 			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
238 			     IRDMA_CQP_WQE_SIZE * 8, false);
239 	if (post_sq)
240 		irdma_sc_cqp_post_sq(cqp);
241 
242 	return 0;
243 }
244 
245 /**
246  * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
247  * @cqp: struct for cqp hw
248  * @info: info for apbvt entry to add or delete
249  * @scratch: u64 saved to be used during cqp completion
250  * @post_sq: flag for cqp db to ring
251  */
252 static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
253 				       struct irdma_apbvt_info *info,
254 				       u64 scratch, bool post_sq)
255 {
256 	__le64 *wqe;
257 	u64 hdr;
258 
259 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
260 	if (!wqe)
261 		return -ENOMEM;
262 
263 	set_64bit_val(wqe, 16, info->port);
264 
265 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
266 	      FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
267 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
268 	dma_wmb(); /* make sure WQE is written before valid bit is set */
269 
270 	set_64bit_val(wqe, 24, hdr);
271 
272 	print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
273 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
274 	if (post_sq)
275 		irdma_sc_cqp_post_sq(cqp);
276 
277 	return 0;
278 }
279 
280 /**
281  * irdma_sc_manage_qhash_table_entry - manage quad hash entries
282  * @cqp: struct for cqp hw
283  * @info: info for quad hash to manage
284  * @scratch: u64 saved to be used during cqp completion
285  * @post_sq: flag for cqp db to ring
286  *
287  * This is called before connection establishment is started.
288  * For passive connections, when listener is created, it will
289  * call with entry type of  IRDMA_QHASH_TYPE_TCP_SYN with local
290  * ip address and tcp port. When SYN is received (passive
291  * connections) or sent (active connections), this routine is
292  * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
293  * and quad is passed in info.
294  *
295  * When iwarp connection is done and its state moves to RTS, the
296  * quad hash entry in the hardware will point to iwarp's qp
297  * number and requires no calls from the driver.
298  */
299 static int
300 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
301 				  struct irdma_qhash_table_info *info,
302 				  u64 scratch, bool post_sq)
303 {
304 	__le64 *wqe;
305 	u64 qw1 = 0;
306 	u64 qw2 = 0;
307 	u64 temp;
308 	struct irdma_sc_vsi *vsi = info->vsi;
309 
310 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
311 	if (!wqe)
312 		return -ENOMEM;
313 
314 	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
315 
316 	qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
317 	      FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
318 	if (info->ipv4_valid) {
319 		set_64bit_val(wqe, 48,
320 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
321 	} else {
322 		set_64bit_val(wqe, 56,
323 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
324 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
325 
326 		set_64bit_val(wqe, 48,
327 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
328 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
329 	}
330 	qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
331 			 vsi->qos[info->user_pri].qs_handle);
332 	if (info->vlan_valid)
333 		qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
334 	set_64bit_val(wqe, 16, qw2);
335 	if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
336 		qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
337 		if (!info->ipv4_valid) {
338 			set_64bit_val(wqe, 40,
339 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
340 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
341 			set_64bit_val(wqe, 32,
342 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
343 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
344 		} else {
345 			set_64bit_val(wqe, 32,
346 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
347 		}
348 	}
349 
350 	set_64bit_val(wqe, 8, qw1);
351 	temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
352 	       FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
353 			  IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
354 	       FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
355 	       FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
356 	       FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
357 	       FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
358 	dma_wmb(); /* make sure WQE is written before valid bit is set */
359 
360 	set_64bit_val(wqe, 24, temp);
361 
362 	print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
363 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
364 	if (post_sq)
365 		irdma_sc_cqp_post_sq(cqp);
366 
367 	return 0;
368 }
369 
370 /**
371  * irdma_sc_qp_init - initialize qp
372  * @qp: sc qp
373  * @info: initialization qp info
374  */
375 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
376 {
377 	int ret_code;
378 	u32 pble_obj_cnt;
379 	u16 wqe_size;
380 
381 	if (info->qp_uk_init_info.max_sq_frag_cnt >
382 	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
383 	    info->qp_uk_init_info.max_rq_frag_cnt >
384 	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
385 		return -EINVAL;
386 
387 	qp->dev = info->pd->dev;
388 	qp->vsi = info->vsi;
389 	qp->ieq_qp = info->vsi->exception_lan_q;
390 	qp->sq_pa = info->sq_pa;
391 	qp->rq_pa = info->rq_pa;
392 	qp->hw_host_ctx_pa = info->host_ctx_pa;
393 	qp->q2_pa = info->q2_pa;
394 	qp->shadow_area_pa = info->shadow_area_pa;
395 	qp->q2_buf = info->q2;
396 	qp->pd = info->pd;
397 	qp->hw_host_ctx = info->host_ctx;
398 	info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
399 	ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
400 	if (ret_code)
401 		return ret_code;
402 
403 	qp->virtual_map = info->virtual_map;
404 	pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
405 
406 	if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
407 	    (info->virtual_map && info->rq_pa >= pble_obj_cnt))
408 		return -EINVAL;
409 
410 	qp->llp_stream_handle = (void *)(-1);
411 	qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
412 						    IRDMA_QUEUE_TYPE_SQ_RQ);
413 	ibdev_dbg(to_ibdev(qp->dev),
414 		  "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
415 		  qp->hw_sq_size, qp->qp_uk.sq_ring.size);
416 	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
417 		wqe_size = IRDMA_WQE_SIZE_128;
418 	else
419 		ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
420 						       &wqe_size);
421 	if (ret_code)
422 		return ret_code;
423 
424 	qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
425 				(wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
426 	ibdev_dbg(to_ibdev(qp->dev),
427 		  "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
428 		  qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
429 	qp->sq_tph_val = info->sq_tph_val;
430 	qp->rq_tph_val = info->rq_tph_val;
431 	qp->sq_tph_en = info->sq_tph_en;
432 	qp->rq_tph_en = info->rq_tph_en;
433 	qp->rcv_tph_en = info->rcv_tph_en;
434 	qp->xmit_tph_en = info->xmit_tph_en;
435 	qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
436 	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
437 
438 	return 0;
439 }
440 
441 /**
442  * irdma_sc_qp_create - create qp
443  * @qp: sc qp
444  * @info: qp create info
445  * @scratch: u64 saved to be used during cqp completion
446  * @post_sq: flag for cqp db to ring
447  */
448 int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
449 		       u64 scratch, bool post_sq)
450 {
451 	struct irdma_sc_cqp *cqp;
452 	__le64 *wqe;
453 	u64 hdr;
454 
455 	cqp = qp->dev->cqp;
456 	if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
457 	    qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
458 		return -EINVAL;
459 
460 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
461 	if (!wqe)
462 		return -ENOMEM;
463 
464 	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
465 	set_64bit_val(wqe, 40, qp->shadow_area_pa);
466 
467 	hdr = qp->qp_uk.qp_id |
468 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
469 	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
470 	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
471 	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
472 	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
473 	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
474 	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
475 	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
476 	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
477 			 info->arp_cache_idx_valid) |
478 	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
479 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
480 	dma_wmb(); /* make sure WQE is written before valid bit is set */
481 
482 	set_64bit_val(wqe, 24, hdr);
483 
484 	print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
485 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
486 	if (post_sq)
487 		irdma_sc_cqp_post_sq(cqp);
488 
489 	return 0;
490 }
491 
492 /**
493  * irdma_sc_qp_modify - modify qp cqp wqe
494  * @qp: sc qp
495  * @info: modify qp info
496  * @scratch: u64 saved to be used during cqp completion
497  * @post_sq: flag for cqp db to ring
498  */
499 int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
500 		       u64 scratch, bool post_sq)
501 {
502 	__le64 *wqe;
503 	struct irdma_sc_cqp *cqp;
504 	u64 hdr;
505 	u8 term_actions = 0;
506 	u8 term_len = 0;
507 
508 	cqp = qp->dev->cqp;
509 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
510 	if (!wqe)
511 		return -ENOMEM;
512 
513 	if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
514 		if (info->dont_send_fin)
515 			term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
516 		if (info->dont_send_term)
517 			term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
518 		if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
519 		    term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
520 			term_len = info->termlen;
521 	}
522 
523 	set_64bit_val(wqe, 8,
524 		      FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
525 		      FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
526 	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
527 	set_64bit_val(wqe, 40, qp->shadow_area_pa);
528 
529 	hdr = qp->qp_uk.qp_id |
530 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
531 	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
532 	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
533 	      FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
534 			 info->cached_var_valid) |
535 	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
536 	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
537 	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
538 	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
539 	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
540 	      FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
541 	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
542 			 info->remove_hash_idx) |
543 	      FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
544 	      FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
545 	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
546 			 info->arp_cache_idx_valid) |
547 	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
548 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
549 	dma_wmb(); /* make sure WQE is written before valid bit is set */
550 
551 	set_64bit_val(wqe, 24, hdr);
552 
553 	print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
554 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
555 	if (post_sq)
556 		irdma_sc_cqp_post_sq(cqp);
557 
558 	return 0;
559 }
560 
561 /**
562  * irdma_sc_qp_destroy - cqp destroy qp
563  * @qp: sc qp
564  * @scratch: u64 saved to be used during cqp completion
565  * @remove_hash_idx: flag if to remove hash idx
566  * @ignore_mw_bnd: memory window bind flag
567  * @post_sq: flag for cqp db to ring
568  */
569 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
570 			bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
571 {
572 	__le64 *wqe;
573 	struct irdma_sc_cqp *cqp;
574 	u64 hdr;
575 
576 	cqp = qp->dev->cqp;
577 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
578 	if (!wqe)
579 		return -ENOMEM;
580 
581 	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
582 	set_64bit_val(wqe, 40, qp->shadow_area_pa);
583 
584 	hdr = qp->qp_uk.qp_id |
585 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
586 	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
587 	      FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
588 	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
589 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
590 	dma_wmb(); /* make sure WQE is written before valid bit is set */
591 
592 	set_64bit_val(wqe, 24, hdr);
593 
594 	print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
595 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
596 	if (post_sq)
597 		irdma_sc_cqp_post_sq(cqp);
598 
599 	return 0;
600 }
601 
602 /**
603  * irdma_sc_get_encoded_ird_size -
604  * @ird_size: IRD size
605  * The ird from the connection is rounded to a supported HW setting and then encoded
606  * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
607  * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
608  */
609 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
610 {
611 	switch (ird_size ?
612 		roundup_pow_of_two(2 * ird_size) : 4) {
613 	case 256:
614 		return IRDMA_IRD_HW_SIZE_256;
615 	case 128:
616 		return IRDMA_IRD_HW_SIZE_128;
617 	case 64:
618 	case 32:
619 		return IRDMA_IRD_HW_SIZE_64;
620 	case 16:
621 	case 8:
622 		return IRDMA_IRD_HW_SIZE_16;
623 	case 4:
624 	default:
625 		break;
626 	}
627 
628 	return IRDMA_IRD_HW_SIZE_4;
629 }
630 
631 /**
632  * irdma_sc_qp_setctx_roce - set qp's context
633  * @qp: sc qp
634  * @qp_ctx: context ptr
635  * @info: ctx info
636  */
637 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
638 			     struct irdma_qp_host_ctx_info *info)
639 {
640 	struct irdma_roce_offload_info *roce_info;
641 	struct irdma_udp_offload_info *udp;
642 	u8 push_mode_en;
643 	u32 push_idx;
644 
645 	roce_info = info->roce_info;
646 	udp = info->udp_info;
647 	qp->user_pri = info->user_pri;
648 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
649 		push_mode_en = 0;
650 		push_idx = 0;
651 	} else {
652 		push_mode_en = 1;
653 		push_idx = qp->push_idx;
654 	}
655 	set_64bit_val(qp_ctx, 0,
656 		      FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
657 		      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
658 		      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
659 		      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
660 		      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
661 		      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
662 		      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
663 		      FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
664 		      FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
665 		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
666 		      FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
667 		      FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
668 		      FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
669 		      FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
670 	set_64bit_val(qp_ctx, 8, qp->sq_pa);
671 	set_64bit_val(qp_ctx, 16, qp->rq_pa);
672 	if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
673 	    !(udp->tos & 0x03))
674 		udp->tos |= ECN_CODE_PT_VAL;
675 	set_64bit_val(qp_ctx, 24,
676 		      FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
677 		      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
678 		      FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
679 		      FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
680 		      FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
681 	set_64bit_val(qp_ctx, 32,
682 		      FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
683 		      FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
684 	set_64bit_val(qp_ctx, 40,
685 		      FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
686 		      FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
687 	set_64bit_val(qp_ctx, 48,
688 		      FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
689 		      FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
690 		      FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
691 	set_64bit_val(qp_ctx, 56,
692 		      FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
693 		      FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
694 		      FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
695 		      FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
696 	set_64bit_val(qp_ctx, 64,
697 		      FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
698 		      FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
699 	set_64bit_val(qp_ctx, 80,
700 		      FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
701 		      FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
702 	set_64bit_val(qp_ctx, 88,
703 		      FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
704 	set_64bit_val(qp_ctx, 96,
705 		      FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
706 		      FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
707 	set_64bit_val(qp_ctx, 112,
708 		      FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
709 	set_64bit_val(qp_ctx, 128,
710 		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
711 		      FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
712 		      FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
713 		      FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
714 	set_64bit_val(qp_ctx, 136,
715 		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
716 		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
717 	set_64bit_val(qp_ctx, 144,
718 		      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
719 	set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
720 	set_64bit_val(qp_ctx, 160,
721 		      FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
722 		      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
723 		      FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
724 		      FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
725 		      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
726 		      FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
727 		      FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
728 		      FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
729 		      FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
730 		      FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
731 		      FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
732 		      FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
733 		      FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
734 	set_64bit_val(qp_ctx, 168,
735 		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
736 	set_64bit_val(qp_ctx, 176,
737 		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
738 		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
739 		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
740 	set_64bit_val(qp_ctx, 184,
741 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
742 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
743 	set_64bit_val(qp_ctx, 192,
744 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
745 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
746 	set_64bit_val(qp_ctx, 200,
747 		      FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
748 		      FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
749 	set_64bit_val(qp_ctx, 208,
750 		      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
751 
752 	print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
753 			     8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
754 }
755 
756 /* irdma_sc_alloc_local_mac_entry - allocate a mac entry
757  * @cqp: struct for cqp hw
758  * @scratch: u64 saved to be used during cqp completion
759  * @post_sq: flag for cqp db to ring
760  */
761 static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
762 					  bool post_sq)
763 {
764 	__le64 *wqe;
765 	u64 hdr;
766 
767 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
768 	if (!wqe)
769 		return -ENOMEM;
770 
771 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
772 			 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
773 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
774 
775 	dma_wmb(); /* make sure WQE is written before valid bit is set */
776 
777 	set_64bit_val(wqe, 24, hdr);
778 
779 	print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
780 			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
781 			     IRDMA_CQP_WQE_SIZE * 8, false);
782 
783 	if (post_sq)
784 		irdma_sc_cqp_post_sq(cqp);
785 	return 0;
786 }
787 
788 /**
789  * irdma_sc_add_local_mac_entry - add mac enry
790  * @cqp: struct for cqp hw
791  * @info:mac addr info
792  * @scratch: u64 saved to be used during cqp completion
793  * @post_sq: flag for cqp db to ring
794  */
795 static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
796 					struct irdma_local_mac_entry_info *info,
797 					u64 scratch, bool post_sq)
798 {
799 	__le64 *wqe;
800 	u64 header;
801 
802 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
803 	if (!wqe)
804 		return -ENOMEM;
805 
806 	set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
807 
808 	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
809 		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
810 			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
811 		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
812 
813 	dma_wmb(); /* make sure WQE is written before valid bit is set */
814 
815 	set_64bit_val(wqe, 24, header);
816 
817 	print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
818 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
819 
820 	if (post_sq)
821 		irdma_sc_cqp_post_sq(cqp);
822 	return 0;
823 }
824 
825 /**
826  * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
827  * @cqp: struct for cqp hw
828  * @scratch: u64 saved to be used during cqp completion
829  * @entry_idx: index of mac entry
830  * @ignore_ref_count: to force mac adde delete
831  * @post_sq: flag for cqp db to ring
832  */
833 static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
834 					u16 entry_idx, u8 ignore_ref_count,
835 					bool post_sq)
836 {
837 	__le64 *wqe;
838 	u64 header;
839 
840 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
841 	if (!wqe)
842 		return -ENOMEM;
843 	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
844 		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
845 			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
846 		 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
847 		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
848 		 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
849 
850 	dma_wmb(); /* make sure WQE is written before valid bit is set */
851 
852 	set_64bit_val(wqe, 24, header);
853 
854 	print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
855 			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
856 			     IRDMA_CQP_WQE_SIZE * 8, false);
857 
858 	if (post_sq)
859 		irdma_sc_cqp_post_sq(cqp);
860 	return 0;
861 }
862 
863 /**
864  * irdma_sc_qp_setctx - set qp's context
865  * @qp: sc qp
866  * @qp_ctx: context ptr
867  * @info: ctx info
868  */
869 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
870 			struct irdma_qp_host_ctx_info *info)
871 {
872 	struct irdma_iwarp_offload_info *iw;
873 	struct irdma_tcp_offload_info *tcp;
874 	struct irdma_sc_dev *dev;
875 	u8 push_mode_en;
876 	u32 push_idx;
877 	u64 qw0, qw3, qw7 = 0, qw16 = 0;
878 	u64 mac = 0;
879 
880 	iw = info->iwarp_info;
881 	tcp = info->tcp_info;
882 	dev = qp->dev;
883 	if (iw->rcv_mark_en) {
884 		qp->pfpdu.marker_len = 4;
885 		qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
886 	}
887 	qp->user_pri = info->user_pri;
888 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
889 		push_mode_en = 0;
890 		push_idx = 0;
891 	} else {
892 		push_mode_en = 1;
893 		push_idx = qp->push_idx;
894 	}
895 	qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
896 	      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
897 	      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
898 	      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
899 	      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
900 	      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
901 	      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
902 
903 	set_64bit_val(qp_ctx, 8, qp->sq_pa);
904 	set_64bit_val(qp_ctx, 16, qp->rq_pa);
905 
906 	qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
907 	      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
908 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
909 		qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
910 				  qp->src_mac_addr_idx);
911 	set_64bit_val(qp_ctx, 136,
912 		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
913 		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
914 	set_64bit_val(qp_ctx, 168,
915 		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
916 	set_64bit_val(qp_ctx, 176,
917 		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
918 		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
919 		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
920 		      FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
921 	if (info->iwarp_info_valid) {
922 		qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
923 		       FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
924 		       FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
925 		       FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
926 		       FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
927 		       FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
928 		       FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
929 				  iw->err_rq_idx_valid);
930 		qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
931 		qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
932 			FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
933 		set_64bit_val(qp_ctx, 144,
934 			      FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
935 			      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
936 
937 		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
938 			mac = ether_addr_to_u64(iw->mac_addr);
939 
940 		set_64bit_val(qp_ctx, 152,
941 			      mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
942 		set_64bit_val(qp_ctx, 160,
943 			      FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
944 			      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
945 			      FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
946 			      FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
947 			      FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
948 			      FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
949 			      FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
950 			      FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
951 			      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
952 			      FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
953 			      FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
954 			      FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
955 			      FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
956 			      FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
957 			      FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
958 			      FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
959 	}
960 	if (info->tcp_info_valid) {
961 		qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
962 		       FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
963 		       FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
964 				  tcp->insert_vlan_tag) |
965 		       FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
966 		       FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
967 		       FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
968 		       FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
969 
970 		if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
971 			tcp->tos |= ECN_CODE_PT_VAL;
972 
973 		qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
974 		       FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
975 		       FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
976 		       FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
977 		       FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
978 		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
979 			qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
980 
981 			qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
982 		}
983 		set_64bit_val(qp_ctx, 32,
984 			      FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
985 			      FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
986 		set_64bit_val(qp_ctx, 40,
987 			      FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
988 			      FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
989 		set_64bit_val(qp_ctx, 48,
990 			      FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
991 			      FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
992 			      FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
993 			      FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
994 		qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
995 		       FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
996 		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
997 				  tcp->ignore_tcp_opt) |
998 		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
999 				  tcp->ignore_tcp_uns_opt) |
1000 		       FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
1001 		       FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
1002 		       FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
1003 		set_64bit_val(qp_ctx, 72,
1004 			      FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
1005 			      FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
1006 		set_64bit_val(qp_ctx, 80,
1007 			      FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
1008 			      FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
1009 		set_64bit_val(qp_ctx, 88,
1010 			      FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
1011 			      FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
1012 		set_64bit_val(qp_ctx, 96,
1013 			      FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
1014 			      FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
1015 		set_64bit_val(qp_ctx, 104,
1016 			      FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
1017 			      FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
1018 		set_64bit_val(qp_ctx, 112,
1019 			      FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
1020 			      FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
1021 		set_64bit_val(qp_ctx, 120,
1022 			      FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
1023 			      FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
1024 		qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
1025 			FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
1026 		set_64bit_val(qp_ctx, 184,
1027 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
1028 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
1029 		set_64bit_val(qp_ctx, 192,
1030 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
1031 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
1032 		set_64bit_val(qp_ctx, 200,
1033 			      FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
1034 			      FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
1035 		set_64bit_val(qp_ctx, 208,
1036 			      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
1037 	}
1038 
1039 	set_64bit_val(qp_ctx, 0, qw0);
1040 	set_64bit_val(qp_ctx, 24, qw3);
1041 	set_64bit_val(qp_ctx, 56, qw7);
1042 	set_64bit_val(qp_ctx, 128, qw16);
1043 
1044 	print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
1045 			     qp_ctx, IRDMA_QP_CTX_SIZE, false);
1046 }
1047 
1048 /**
1049  * irdma_sc_alloc_stag - mr stag alloc
1050  * @dev: sc device struct
1051  * @info: stag info
1052  * @scratch: u64 saved to be used during cqp completion
1053  * @post_sq: flag for cqp db to ring
1054  */
1055 static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
1056 			       struct irdma_allocate_stag_info *info,
1057 			       u64 scratch, bool post_sq)
1058 {
1059 	__le64 *wqe;
1060 	struct irdma_sc_cqp *cqp;
1061 	u64 hdr;
1062 	enum irdma_page_size page_size;
1063 
1064 	if (info->page_size == 0x40000000)
1065 		page_size = IRDMA_PAGE_SIZE_1G;
1066 	else if (info->page_size == 0x200000)
1067 		page_size = IRDMA_PAGE_SIZE_2M;
1068 	else
1069 		page_size = IRDMA_PAGE_SIZE_4K;
1070 
1071 	cqp = dev->cqp;
1072 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1073 	if (!wqe)
1074 		return -ENOMEM;
1075 
1076 	set_64bit_val(wqe, 8,
1077 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
1078 		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
1079 	set_64bit_val(wqe, 16,
1080 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1081 	set_64bit_val(wqe, 40,
1082 		      FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
1083 
1084 	if (info->chunk_size)
1085 		set_64bit_val(wqe, 48,
1086 			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
1087 
1088 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1089 	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1090 	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1091 	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1092 	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1093 	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
1094 	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1095 	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1096 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1097 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1098 
1099 	set_64bit_val(wqe, 24, hdr);
1100 
1101 	print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1102 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1103 	if (post_sq)
1104 		irdma_sc_cqp_post_sq(cqp);
1105 
1106 	return 0;
1107 }
1108 
1109 /**
1110  * irdma_sc_mr_reg_non_shared - non-shared mr registration
1111  * @dev: sc device struct
1112  * @info: mr info
1113  * @scratch: u64 saved to be used during cqp completion
1114  * @post_sq: flag for cqp db to ring
1115  */
1116 static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
1117 				      struct irdma_reg_ns_stag_info *info,
1118 				      u64 scratch, bool post_sq)
1119 {
1120 	__le64 *wqe;
1121 	u64 fbo;
1122 	struct irdma_sc_cqp *cqp;
1123 	u64 hdr;
1124 	u32 pble_obj_cnt;
1125 	bool remote_access;
1126 	u8 addr_type;
1127 	enum irdma_page_size page_size;
1128 
1129 	if (info->page_size == 0x40000000)
1130 		page_size = IRDMA_PAGE_SIZE_1G;
1131 	else if (info->page_size == 0x200000)
1132 		page_size = IRDMA_PAGE_SIZE_2M;
1133 	else if (info->page_size == 0x1000)
1134 		page_size = IRDMA_PAGE_SIZE_4K;
1135 	else
1136 		return -EINVAL;
1137 
1138 	if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
1139 				   IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
1140 		remote_access = true;
1141 	else
1142 		remote_access = false;
1143 
1144 	pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
1145 	if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
1146 		return -EINVAL;
1147 
1148 	cqp = dev->cqp;
1149 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1150 	if (!wqe)
1151 		return -ENOMEM;
1152 	fbo = info->va & (info->page_size - 1);
1153 
1154 	set_64bit_val(wqe, 0,
1155 		      (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
1156 		      info->va : fbo));
1157 	set_64bit_val(wqe, 8,
1158 		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
1159 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1160 	set_64bit_val(wqe, 16,
1161 		      FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
1162 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1163 	if (!info->chunk_size) {
1164 		set_64bit_val(wqe, 32, info->reg_addr_pa);
1165 		set_64bit_val(wqe, 48, 0);
1166 	} else {
1167 		set_64bit_val(wqe, 32, 0);
1168 		set_64bit_val(wqe, 48,
1169 			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
1170 	}
1171 	set_64bit_val(wqe, 40, info->hmc_fcn_index);
1172 	set_64bit_val(wqe, 56, 0);
1173 
1174 	addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
1175 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
1176 	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1177 	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1178 	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1179 	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1180 	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
1181 	      FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
1182 	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1183 	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1184 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1185 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1186 
1187 	set_64bit_val(wqe, 24, hdr);
1188 
1189 	print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
1190 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1191 	if (post_sq)
1192 		irdma_sc_cqp_post_sq(cqp);
1193 
1194 	return 0;
1195 }
1196 
1197 /**
1198  * irdma_sc_dealloc_stag - deallocate stag
1199  * @dev: sc device struct
1200  * @info: dealloc stag info
1201  * @scratch: u64 saved to be used during cqp completion
1202  * @post_sq: flag for cqp db to ring
1203  */
1204 static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
1205 				 struct irdma_dealloc_stag_info *info,
1206 				 u64 scratch, bool post_sq)
1207 {
1208 	u64 hdr;
1209 	__le64 *wqe;
1210 	struct irdma_sc_cqp *cqp;
1211 
1212 	cqp = dev->cqp;
1213 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1214 	if (!wqe)
1215 		return -ENOMEM;
1216 
1217 	set_64bit_val(wqe, 8,
1218 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1219 	set_64bit_val(wqe, 16,
1220 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1221 
1222 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
1223 	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
1224 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1225 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1226 
1227 	set_64bit_val(wqe, 24, hdr);
1228 
1229 	print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
1230 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1231 	if (post_sq)
1232 		irdma_sc_cqp_post_sq(cqp);
1233 
1234 	return 0;
1235 }
1236 
1237 /**
1238  * irdma_sc_mw_alloc - mw allocate
1239  * @dev: sc device struct
1240  * @info: memory window allocation information
1241  * @scratch: u64 saved to be used during cqp completion
1242  * @post_sq: flag for cqp db to ring
1243  */
1244 static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
1245 			     struct irdma_mw_alloc_info *info, u64 scratch,
1246 			     bool post_sq)
1247 {
1248 	u64 hdr;
1249 	struct irdma_sc_cqp *cqp;
1250 	__le64 *wqe;
1251 
1252 	cqp = dev->cqp;
1253 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1254 	if (!wqe)
1255 		return -ENOMEM;
1256 
1257 	set_64bit_val(wqe, 8,
1258 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1259 	set_64bit_val(wqe, 16,
1260 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
1261 
1262 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1263 	      FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
1264 	      FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
1265 			 info->mw1_bind_dont_vldt_key) |
1266 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1267 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1268 
1269 	set_64bit_val(wqe, 24, hdr);
1270 
1271 	print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
1272 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
1273 	if (post_sq)
1274 		irdma_sc_cqp_post_sq(cqp);
1275 
1276 	return 0;
1277 }
1278 
1279 /**
1280  * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
1281  * @qp: sc qp struct
1282  * @info: fast mr info
1283  * @post_sq: flag for cqp db to ring
1284  */
1285 int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
1286 			      struct irdma_fast_reg_stag_info *info,
1287 			      bool post_sq)
1288 {
1289 	u64 temp, hdr;
1290 	__le64 *wqe;
1291 	u32 wqe_idx;
1292 	enum irdma_page_size page_size;
1293 	struct irdma_post_sq_info sq_info = {};
1294 
1295 	if (info->page_size == 0x40000000)
1296 		page_size = IRDMA_PAGE_SIZE_1G;
1297 	else if (info->page_size == 0x200000)
1298 		page_size = IRDMA_PAGE_SIZE_2M;
1299 	else
1300 		page_size = IRDMA_PAGE_SIZE_4K;
1301 
1302 	sq_info.wr_id = info->wr_id;
1303 	sq_info.signaled = info->signaled;
1304 	sq_info.push_wqe = info->push_wqe;
1305 
1306 	wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
1307 					 IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
1308 	if (!wqe)
1309 		return -ENOMEM;
1310 
1311 	irdma_clr_wqes(&qp->qp_uk, wqe_idx);
1312 
1313 	ibdev_dbg(to_ibdev(qp->dev),
1314 		  "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
1315 		  info->wr_id, wqe_idx,
1316 		  &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
1317 
1318 	temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
1319 		(uintptr_t)info->va : info->fbo;
1320 	set_64bit_val(wqe, 0, temp);
1321 
1322 	temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
1323 			 info->first_pm_pbl_index >> 16);
1324 	set_64bit_val(wqe, 8,
1325 		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
1326 		      FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
1327 	set_64bit_val(wqe, 16,
1328 		      info->total_len |
1329 		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
1330 
1331 	hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
1332 	      FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
1333 	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
1334 	      FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
1335 	      FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
1336 	      FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
1337 	      FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
1338 	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
1339 	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
1340 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
1341 	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
1342 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1343 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1344 
1345 	set_64bit_val(wqe, 24, hdr);
1346 
1347 	print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
1348 			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1349 	if (sq_info.push_wqe) {
1350 		irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
1351 				  wqe_idx, post_sq);
1352 	} else {
1353 		if (post_sq)
1354 			irdma_uk_qp_post_wr(&qp->qp_uk);
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 /**
1361  * irdma_sc_gen_rts_ae - request AE generated after RTS
1362  * @qp: sc qp struct
1363  */
1364 static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
1365 {
1366 	__le64 *wqe;
1367 	u64 hdr;
1368 	struct irdma_qp_uk *qp_uk;
1369 
1370 	qp_uk = &qp->qp_uk;
1371 
1372 	wqe = qp_uk->sq_base[1].elem;
1373 
1374 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1375 	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
1376 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1377 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1378 
1379 	set_64bit_val(wqe, 24, hdr);
1380 	print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
1381 			     16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1382 
1383 	wqe = qp_uk->sq_base[2].elem;
1384 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
1385 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1386 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1387 
1388 	set_64bit_val(wqe, 24, hdr);
1389 	print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
1390 			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1391 }
1392 
1393 /**
1394  * irdma_sc_send_lsmm - send last streaming mode message
1395  * @qp: sc qp struct
1396  * @lsmm_buf: buffer with lsmm message
1397  * @size: size of lsmm buffer
1398  * @stag: stag of lsmm buffer
1399  */
1400 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1401 			irdma_stag stag)
1402 {
1403 	__le64 *wqe;
1404 	u64 hdr;
1405 	struct irdma_qp_uk *qp_uk;
1406 
1407 	qp_uk = &qp->qp_uk;
1408 	wqe = qp_uk->sq_base->elem;
1409 
1410 	set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
1411 	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1412 		set_64bit_val(wqe, 8,
1413 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
1414 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
1415 	} else {
1416 		set_64bit_val(wqe, 8,
1417 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1418 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
1419 			      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1420 	}
1421 	set_64bit_val(wqe, 16, 0);
1422 
1423 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1424 	      FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1425 	      FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1426 	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1427 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1428 
1429 	set_64bit_val(wqe, 24, hdr);
1430 
1431 	print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
1432 			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
1433 
1434 	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1435 		irdma_sc_gen_rts_ae(qp);
1436 }
1437 
1438 /**
1439  * irdma_sc_send_rtt - send last read0 or write0
1440  * @qp: sc qp struct
1441  * @read: Do read0 or write0
1442  */
1443 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
1444 {
1445 	__le64 *wqe;
1446 	u64 hdr;
1447 	struct irdma_qp_uk *qp_uk;
1448 
1449 	qp_uk = &qp->qp_uk;
1450 	wqe = qp_uk->sq_base->elem;
1451 
1452 	set_64bit_val(wqe, 0, 0);
1453 	set_64bit_val(wqe, 16, 0);
1454 	if (read) {
1455 		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1456 			set_64bit_val(wqe, 8,
1457 				      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
1458 		} else {
1459 			set_64bit_val(wqe, 8,
1460 				      (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1461 		}
1462 		hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
1463 		      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
1464 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1465 
1466 	} else {
1467 		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1468 			set_64bit_val(wqe, 8, 0);
1469 		} else {
1470 			set_64bit_val(wqe, 8,
1471 				      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1472 		}
1473 		hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
1474 		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1475 	}
1476 
1477 	dma_wmb(); /* make sure WQE is written before valid bit is set */
1478 
1479 	set_64bit_val(wqe, 24, hdr);
1480 
1481 	print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
1482 			     IRDMA_QP_WQE_MIN_SIZE, false);
1483 
1484 	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1485 		irdma_sc_gen_rts_ae(qp);
1486 }
1487 
1488 /**
1489  * irdma_iwarp_opcode - determine if incoming is rdma layer
1490  * @info: aeq info for the packet
1491  * @pkt: packet for error
1492  */
1493 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
1494 {
1495 	__be16 *mpa;
1496 	u32 opcode = 0xffffffff;
1497 
1498 	if (info->q2_data_written) {
1499 		mpa = (__be16 *)pkt;
1500 		opcode = ntohs(mpa[1]) & 0xf;
1501 	}
1502 
1503 	return opcode;
1504 }
1505 
1506 /**
1507  * irdma_locate_mpa - return pointer to mpa in the pkt
1508  * @pkt: packet with data
1509  */
1510 static u8 *irdma_locate_mpa(u8 *pkt)
1511 {
1512 	/* skip over ethernet header */
1513 	pkt += IRDMA_MAC_HLEN;
1514 
1515 	/* Skip over IP and TCP headers */
1516 	pkt += 4 * (pkt[0] & 0x0f);
1517 	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
1518 
1519 	return pkt;
1520 }
1521 
1522 /**
1523  * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
1524  * @qp: sc qp ptr for pkt
1525  * @hdr: term hdr
1526  * @opcode: flush opcode for termhdr
1527  * @layer_etype: error layer + error type
1528  * @err: error cod ein the header
1529  */
1530 static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
1531 				   struct irdma_terminate_hdr *hdr,
1532 				   enum irdma_flush_opcode opcode,
1533 				   u8 layer_etype, u8 err)
1534 {
1535 	qp->flush_code = opcode;
1536 	hdr->layer_etype = layer_etype;
1537 	hdr->error_code = err;
1538 }
1539 
1540 /**
1541  * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
1542  * @pkt: ptr to mpa in offending pkt
1543  * @hdr: term hdr
1544  * @copy_len: offending pkt length to be copied to term hdr
1545  * @is_tagged: DDP tagged or untagged
1546  */
1547 static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
1548 				       int *copy_len, u8 *is_tagged)
1549 {
1550 	u16 ddp_seg_len;
1551 
1552 	ddp_seg_len = ntohs(*(__be16 *)pkt);
1553 	if (ddp_seg_len) {
1554 		*copy_len = 2;
1555 		hdr->hdrct = DDP_LEN_FLAG;
1556 		if (pkt[2] & 0x80) {
1557 			*is_tagged = 1;
1558 			if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
1559 				*copy_len += TERM_DDP_LEN_TAGGED;
1560 				hdr->hdrct |= DDP_HDR_FLAG;
1561 			}
1562 		} else {
1563 			if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
1564 				*copy_len += TERM_DDP_LEN_UNTAGGED;
1565 				hdr->hdrct |= DDP_HDR_FLAG;
1566 			}
1567 			if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
1568 			    ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
1569 				*copy_len += TERM_RDMA_LEN;
1570 				hdr->hdrct |= RDMA_HDR_FLAG;
1571 			}
1572 		}
1573 	}
1574 }
1575 
1576 /**
1577  * irdma_bld_terminate_hdr - build terminate message header
1578  * @qp: qp associated with received terminate AE
1579  * @info: the struct contiaing AE information
1580  */
1581 static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
1582 				   struct irdma_aeqe_info *info)
1583 {
1584 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1585 	int copy_len = 0;
1586 	u8 is_tagged = 0;
1587 	u32 opcode;
1588 	struct irdma_terminate_hdr *termhdr;
1589 
1590 	termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
1591 	memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
1592 
1593 	if (info->q2_data_written) {
1594 		pkt = irdma_locate_mpa(pkt);
1595 		irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
1596 	}
1597 
1598 	opcode = irdma_iwarp_opcode(info, pkt);
1599 	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1600 	qp->sq_flush_code = info->sq;
1601 	qp->rq_flush_code = info->rq;
1602 
1603 	switch (info->ae_id) {
1604 	case IRDMA_AE_AMP_UNALLOCATED_STAG:
1605 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1606 		if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
1607 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1608 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1609 					       DDP_TAGGED_INV_STAG);
1610 		else
1611 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1612 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1613 					       RDMAP_INV_STAG);
1614 		break;
1615 	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
1616 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1617 		if (info->q2_data_written)
1618 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1619 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1620 					       DDP_TAGGED_BOUNDS);
1621 		else
1622 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1623 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1624 					       RDMAP_INV_BOUNDS);
1625 		break;
1626 	case IRDMA_AE_AMP_BAD_PD:
1627 		switch (opcode) {
1628 		case IRDMA_OP_TYPE_RDMA_WRITE:
1629 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1630 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1631 					       DDP_TAGGED_UNASSOC_STAG);
1632 			break;
1633 		case IRDMA_OP_TYPE_SEND_INV:
1634 		case IRDMA_OP_TYPE_SEND_SOL_INV:
1635 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1636 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1637 					       RDMAP_CANT_INV_STAG);
1638 			break;
1639 		default:
1640 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1641 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1642 					       RDMAP_UNASSOC_STAG);
1643 		}
1644 		break;
1645 	case IRDMA_AE_AMP_INVALID_STAG:
1646 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1647 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1648 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1649 				       RDMAP_INV_STAG);
1650 		break;
1651 	case IRDMA_AE_AMP_BAD_QP:
1652 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
1653 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1654 				       DDP_UNTAGGED_INV_QN);
1655 		break;
1656 	case IRDMA_AE_AMP_BAD_STAG_KEY:
1657 	case IRDMA_AE_AMP_BAD_STAG_INDEX:
1658 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1659 		switch (opcode) {
1660 		case IRDMA_OP_TYPE_SEND_INV:
1661 		case IRDMA_OP_TYPE_SEND_SOL_INV:
1662 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1663 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1664 					       RDMAP_CANT_INV_STAG);
1665 			break;
1666 		default:
1667 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1668 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1669 					       RDMAP_INV_STAG);
1670 		}
1671 		break;
1672 	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
1673 	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
1674 	case IRDMA_AE_PRIV_OPERATION_DENIED:
1675 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1676 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1677 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1678 				       RDMAP_ACCESS);
1679 		break;
1680 	case IRDMA_AE_AMP_TO_WRAP:
1681 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1682 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1683 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1684 				       RDMAP_TO_WRAP);
1685 		break;
1686 	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1687 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1688 				       (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
1689 		break;
1690 	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
1691 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1692 				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1693 				       DDP_CATASTROPHIC_LOCAL);
1694 		break;
1695 	case IRDMA_AE_LCE_QP_CATASTROPHIC:
1696 	case IRDMA_AE_DDP_NO_L_BIT:
1697 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1698 				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1699 				       DDP_CATASTROPHIC_LOCAL);
1700 		break;
1701 	case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
1702 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1703 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1704 				       DDP_UNTAGGED_INV_MSN_RANGE);
1705 		break;
1706 	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
1707 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1708 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1709 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1710 				       DDP_UNTAGGED_INV_TOO_LONG);
1711 		break;
1712 	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
1713 		if (is_tagged)
1714 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1715 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1716 					       DDP_TAGGED_INV_DDP_VER);
1717 		else
1718 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1719 					       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1720 					       DDP_UNTAGGED_INV_DDP_VER);
1721 		break;
1722 	case IRDMA_AE_DDP_UBE_INVALID_MO:
1723 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1724 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1725 				       DDP_UNTAGGED_INV_MO);
1726 		break;
1727 	case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
1728 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1729 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1730 				       DDP_UNTAGGED_INV_MSN_NO_BUF);
1731 		break;
1732 	case IRDMA_AE_DDP_UBE_INVALID_QN:
1733 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1734 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1735 				       DDP_UNTAGGED_INV_QN);
1736 		break;
1737 	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1738 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1739 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1740 				       RDMAP_INV_RDMAP_VER);
1741 		break;
1742 	default:
1743 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1744 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1745 				       RDMAP_UNSPECIFIED);
1746 		break;
1747 	}
1748 
1749 	if (copy_len)
1750 		memcpy(termhdr + 1, pkt, copy_len);
1751 
1752 	return sizeof(struct irdma_terminate_hdr) + copy_len;
1753 }
1754 
1755 /**
1756  * irdma_terminate_send_fin() - Send fin for terminate message
1757  * @qp: qp associated with received terminate AE
1758  */
1759 void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
1760 {
1761 	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1762 			     IRDMAQP_TERM_SEND_FIN_ONLY, 0);
1763 }
1764 
1765 /**
1766  * irdma_terminate_connection() - Bad AE and send terminate to remote QP
1767  * @qp: qp associated with received terminate AE
1768  * @info: the struct contiaing AE information
1769  */
1770 void irdma_terminate_connection(struct irdma_sc_qp *qp,
1771 				struct irdma_aeqe_info *info)
1772 {
1773 	u8 termlen = 0;
1774 
1775 	if (qp->term_flags & IRDMA_TERM_SENT)
1776 		return;
1777 
1778 	termlen = irdma_bld_terminate_hdr(qp, info);
1779 	irdma_terminate_start_timer(qp);
1780 	qp->term_flags |= IRDMA_TERM_SENT;
1781 	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1782 			     IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
1783 }
1784 
1785 /**
1786  * irdma_terminate_received - handle terminate received AE
1787  * @qp: qp associated with received terminate AE
1788  * @info: the struct contiaing AE information
1789  */
1790 void irdma_terminate_received(struct irdma_sc_qp *qp,
1791 			      struct irdma_aeqe_info *info)
1792 {
1793 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1794 	__be32 *mpa;
1795 	u8 ddp_ctl;
1796 	u8 rdma_ctl;
1797 	u16 aeq_id = 0;
1798 	struct irdma_terminate_hdr *termhdr;
1799 
1800 	mpa = (__be32 *)irdma_locate_mpa(pkt);
1801 	if (info->q2_data_written) {
1802 		/* did not validate the frame - do it now */
1803 		ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
1804 		rdma_ctl = ntohl(mpa[0]) & 0xff;
1805 		if ((ddp_ctl & 0xc0) != 0x40)
1806 			aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
1807 		else if ((ddp_ctl & 0x03) != 1)
1808 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
1809 		else if (ntohl(mpa[2]) != 2)
1810 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
1811 		else if (ntohl(mpa[3]) != 1)
1812 			aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
1813 		else if (ntohl(mpa[4]) != 0)
1814 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
1815 		else if ((rdma_ctl & 0xc0) != 0x40)
1816 			aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
1817 
1818 		info->ae_id = aeq_id;
1819 		if (info->ae_id) {
1820 			/* Bad terminate recvd - send back a terminate */
1821 			irdma_terminate_connection(qp, info);
1822 			return;
1823 		}
1824 	}
1825 
1826 	qp->term_flags |= IRDMA_TERM_RCVD;
1827 	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1828 	termhdr = (struct irdma_terminate_hdr *)&mpa[5];
1829 	if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
1830 	    termhdr->layer_etype == RDMAP_REMOTE_OP) {
1831 		irdma_terminate_done(qp, 0);
1832 	} else {
1833 		irdma_terminate_start_timer(qp);
1834 		irdma_terminate_send_fin(qp);
1835 	}
1836 }
1837 
1838 static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
1839 {
1840 	return 0;
1841 }
1842 
1843 static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
1844 {
1845 	/* do nothing */
1846 }
1847 
1848 static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
1849 {
1850 	/* do nothing */
1851 }
1852 
1853 /**
1854  * irdma_sc_vsi_init - Init the vsi structure
1855  * @vsi: pointer to vsi structure to initialize
1856  * @info: the info used to initialize the vsi struct
1857  */
1858 void irdma_sc_vsi_init(struct irdma_sc_vsi  *vsi,
1859 		       struct irdma_vsi_init_info *info)
1860 {
1861 	int i;
1862 
1863 	vsi->dev = info->dev;
1864 	vsi->back_vsi = info->back_vsi;
1865 	vsi->register_qset = info->register_qset;
1866 	vsi->unregister_qset = info->unregister_qset;
1867 	vsi->mtu = info->params->mtu;
1868 	vsi->exception_lan_q = info->exception_lan_q;
1869 	vsi->vsi_idx = info->pf_data_vsi_num;
1870 
1871 	irdma_set_qos_info(vsi, info->params);
1872 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
1873 		mutex_init(&vsi->qos[i].qos_mutex);
1874 		INIT_LIST_HEAD(&vsi->qos[i].qplist);
1875 	}
1876 	if (vsi->register_qset) {
1877 		vsi->dev->ws_add = irdma_ws_add;
1878 		vsi->dev->ws_remove = irdma_ws_remove;
1879 		vsi->dev->ws_reset = irdma_ws_reset;
1880 	} else {
1881 		vsi->dev->ws_add = irdma_null_ws_add;
1882 		vsi->dev->ws_remove = irdma_null_ws_remove;
1883 		vsi->dev->ws_reset = irdma_null_ws_reset;
1884 	}
1885 }
1886 
1887 /**
1888  * irdma_get_stats_idx - Return stats index
1889  * @vsi: pointer to the vsi
1890  */
1891 static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
1892 {
1893 	struct irdma_stats_inst_info stats_info = {};
1894 	struct irdma_sc_dev *dev = vsi->dev;
1895 	u8 i;
1896 
1897 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1898 		if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
1899 					      &stats_info))
1900 			return stats_info.stats_idx;
1901 	}
1902 
1903 	for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
1904 		if (!dev->stats_idx_array[i]) {
1905 			dev->stats_idx_array[i] = true;
1906 			return i;
1907 		}
1908 	}
1909 
1910 	return IRDMA_INVALID_STATS_IDX;
1911 }
1912 
1913 /**
1914  * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
1915  * @vsi: vsi structure where hw_regs are set
1916  *
1917  * Populate the HW stats table
1918  */
1919 static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
1920 {
1921 	struct irdma_sc_dev *dev = vsi->dev;
1922 	const struct irdma_hw_stat_map *map;
1923 	u64 *stat_reg = vsi->hw_stats_regs;
1924 	u64 *regs = dev->hw_stats_regs;
1925 	u16 i, stats_reg_set = vsi->stats_idx;
1926 
1927 	map = dev->hw_stats_map;
1928 
1929 	/* First 4 stat instances are reserved for port level statistics. */
1930 	stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
1931 
1932 	for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
1933 		if (map[i].bitmask <= IRDMA_MAX_STATS_32)
1934 			stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32);
1935 		else
1936 			stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64);
1937 	}
1938 }
1939 
1940 /**
1941  * irdma_vsi_stats_init - Initialize the vsi statistics
1942  * @vsi: pointer to the vsi structure
1943  * @info: The info structure used for initialization
1944  */
1945 int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
1946 			 struct irdma_vsi_stats_info *info)
1947 {
1948 	struct irdma_dma_mem *stats_buff_mem;
1949 
1950 	vsi->pestat = info->pestat;
1951 	vsi->pestat->hw = vsi->dev->hw;
1952 	vsi->pestat->vsi = vsi;
1953 	stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
1954 	stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
1955 	stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
1956 						stats_buff_mem->size,
1957 						&stats_buff_mem->pa,
1958 						GFP_KERNEL);
1959 	if (!stats_buff_mem->va)
1960 		return -ENOMEM;
1961 
1962 	vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
1963 	vsi->pestat->gather_info.last_gather_stats_va =
1964 		(void *)((uintptr_t)stats_buff_mem->va +
1965 			 IRDMA_GATHER_STATS_BUF_SIZE);
1966 
1967 	irdma_hw_stats_start_timer(vsi);
1968 
1969 	/* when stat allocation is not required default to fcn_id. */
1970 	vsi->stats_idx = info->fcn_id;
1971 	if (info->alloc_stats_inst) {
1972 		u8 stats_idx = irdma_get_stats_idx(vsi);
1973 
1974 		if (stats_idx != IRDMA_INVALID_STATS_IDX) {
1975 			vsi->stats_inst_alloc = true;
1976 			vsi->stats_idx = stats_idx;
1977 			vsi->pestat->gather_info.use_stats_inst = true;
1978 			vsi->pestat->gather_info.stats_inst_index = stats_idx;
1979 		}
1980 	}
1981 
1982 	if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1983 		irdma_hw_stats_init_gen1(vsi);
1984 
1985 	return 0;
1986 }
1987 
1988 /**
1989  * irdma_vsi_stats_free - Free the vsi stats
1990  * @vsi: pointer to the vsi structure
1991  */
1992 void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
1993 {
1994 	struct irdma_stats_inst_info stats_info = {};
1995 	struct irdma_sc_dev *dev = vsi->dev;
1996 	u8 stats_idx = vsi->stats_idx;
1997 
1998 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1999 		if (vsi->stats_inst_alloc) {
2000 			stats_info.stats_idx = vsi->stats_idx;
2001 			irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
2002 						 &stats_info);
2003 		}
2004 	} else {
2005 		if (vsi->stats_inst_alloc &&
2006 		    stats_idx < vsi->dev->hw_attrs.max_stat_inst)
2007 			vsi->dev->stats_idx_array[stats_idx] = false;
2008 	}
2009 
2010 	if (!vsi->pestat)
2011 		return;
2012 	irdma_hw_stats_stop_timer(vsi);
2013 	dma_free_coherent(vsi->pestat->hw->device,
2014 			  vsi->pestat->gather_info.stats_buff_mem.size,
2015 			  vsi->pestat->gather_info.stats_buff_mem.va,
2016 			  vsi->pestat->gather_info.stats_buff_mem.pa);
2017 	vsi->pestat->gather_info.stats_buff_mem.va = NULL;
2018 }
2019 
2020 /**
2021  * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
2022  * @wqsize: size of the wq (sq, rq) to encoded_size
2023  * @queue_type: queue type selected for the calculation algorithm
2024  */
2025 u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
2026 {
2027 	u8 encoded_size = 0;
2028 
2029 	/* cqp sq's hw coded value starts from 1 for size of 4
2030 	 * while it starts from 0 for qp' wq's.
2031 	 */
2032 	if (queue_type == IRDMA_QUEUE_TYPE_CQP)
2033 		encoded_size = 1;
2034 	wqsize >>= 2;
2035 	while (wqsize >>= 1)
2036 		encoded_size++;
2037 
2038 	return encoded_size;
2039 }
2040 
2041 /**
2042  * irdma_sc_gather_stats - collect the statistics
2043  * @cqp: struct for cqp hw
2044  * @info: gather stats info structure
2045  * @scratch: u64 saved to be used during cqp completion
2046  */
2047 static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
2048 				 struct irdma_stats_gather_info *info,
2049 				 u64 scratch)
2050 {
2051 	__le64 *wqe;
2052 	u64 temp;
2053 
2054 	if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
2055 		return -ENOMEM;
2056 
2057 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2058 	if (!wqe)
2059 		return -ENOMEM;
2060 
2061 	set_64bit_val(wqe, 40,
2062 		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
2063 	set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
2064 
2065 	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2066 	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
2067 	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
2068 			  info->stats_inst_index) |
2069 	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2070 			  info->use_hmc_fcn_index) |
2071 	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
2072 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2073 
2074 	set_64bit_val(wqe, 24, temp);
2075 
2076 	print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
2077 			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2078 
2079 	irdma_sc_cqp_post_sq(cqp);
2080 	ibdev_dbg(to_ibdev(cqp->dev),
2081 		  "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
2082 		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
2083 
2084 	return 0;
2085 }
2086 
2087 /**
2088  * irdma_sc_manage_stats_inst - allocate or free stats instance
2089  * @cqp: struct for cqp hw
2090  * @info: stats info structure
2091  * @alloc: alloc vs. delete flag
2092  * @scratch: u64 saved to be used during cqp completion
2093  */
2094 static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
2095 				      struct irdma_stats_inst_info *info,
2096 				      bool alloc, u64 scratch)
2097 {
2098 	__le64 *wqe;
2099 	u64 temp;
2100 
2101 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2102 	if (!wqe)
2103 		return -ENOMEM;
2104 
2105 	set_64bit_val(wqe, 40,
2106 		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
2107 	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2108 	       FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
2109 	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2110 			  info->use_hmc_fcn_index) |
2111 	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
2112 	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
2113 
2114 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2115 
2116 	set_64bit_val(wqe, 24, temp);
2117 
2118 	print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
2119 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2120 
2121 	irdma_sc_cqp_post_sq(cqp);
2122 	return 0;
2123 }
2124 
2125 /**
2126  * irdma_sc_set_up_map - set the up map table
2127  * @cqp: struct for cqp hw
2128  * @info: User priority map info
2129  * @scratch: u64 saved to be used during cqp completion
2130  */
2131 static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
2132 			       struct irdma_up_info *info, u64 scratch)
2133 {
2134 	__le64 *wqe;
2135 	u64 temp = 0;
2136 	int i;
2137 
2138 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2139 	if (!wqe)
2140 		return -ENOMEM;
2141 
2142 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
2143 		temp |= (u64)info->map[i] << (i * 8);
2144 
2145 	set_64bit_val(wqe, 0, temp);
2146 	set_64bit_val(wqe, 40,
2147 		      FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
2148 		      FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
2149 
2150 	temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
2151 	       FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
2152 	       FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
2153 			  info->use_cnp_up_override) |
2154 	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
2155 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2156 
2157 	set_64bit_val(wqe, 24, temp);
2158 
2159 	print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
2160 			     IRDMA_CQP_WQE_SIZE * 8, false);
2161 	irdma_sc_cqp_post_sq(cqp);
2162 
2163 	return 0;
2164 }
2165 
2166 /**
2167  * irdma_sc_manage_ws_node - create/modify/destroy WS node
2168  * @cqp: struct for cqp hw
2169  * @info: node info structure
2170  * @node_op: 0 for add 1 for modify, 2 for delete
2171  * @scratch: u64 saved to be used during cqp completion
2172  */
2173 static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
2174 				   struct irdma_ws_node_info *info,
2175 				   enum irdma_ws_node_op node_op, u64 scratch)
2176 {
2177 	__le64 *wqe;
2178 	u64 temp = 0;
2179 
2180 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2181 	if (!wqe)
2182 		return -ENOMEM;
2183 
2184 	set_64bit_val(wqe, 32,
2185 		      FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
2186 		      FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
2187 
2188 	temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
2189 	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
2190 	       FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
2191 	       FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
2192 	       FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
2193 	       FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
2194 	       FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
2195 	       FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
2196 	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
2197 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2198 
2199 	set_64bit_val(wqe, 24, temp);
2200 
2201 	print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
2202 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2203 	irdma_sc_cqp_post_sq(cqp);
2204 
2205 	return 0;
2206 }
2207 
2208 /**
2209  * irdma_sc_qp_flush_wqes - flush qp's wqe
2210  * @qp: sc qp
2211  * @info: dlush information
2212  * @scratch: u64 saved to be used during cqp completion
2213  * @post_sq: flag for cqp db to ring
2214  */
2215 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
2216 			   struct irdma_qp_flush_info *info, u64 scratch,
2217 			   bool post_sq)
2218 {
2219 	u64 temp = 0;
2220 	__le64 *wqe;
2221 	struct irdma_sc_cqp *cqp;
2222 	u64 hdr;
2223 	bool flush_sq = false, flush_rq = false;
2224 
2225 	if (info->rq && !qp->flush_rq)
2226 		flush_rq = true;
2227 	if (info->sq && !qp->flush_sq)
2228 		flush_sq = true;
2229 	qp->flush_sq |= flush_sq;
2230 	qp->flush_rq |= flush_rq;
2231 
2232 	if (!flush_sq && !flush_rq) {
2233 		ibdev_dbg(to_ibdev(qp->dev),
2234 			  "CQP: Additional flush request ignored for qp %x\n",
2235 			  qp->qp_uk.qp_id);
2236 		return -EALREADY;
2237 	}
2238 
2239 	cqp = qp->pd->dev->cqp;
2240 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2241 	if (!wqe)
2242 		return -ENOMEM;
2243 
2244 	if (info->userflushcode) {
2245 		if (flush_rq)
2246 			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
2247 					   info->rq_minor_code) |
2248 				FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
2249 					   info->rq_major_code);
2250 		if (flush_sq)
2251 			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
2252 					   info->sq_minor_code) |
2253 				FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
2254 					   info->sq_major_code);
2255 	}
2256 	set_64bit_val(wqe, 16, temp);
2257 
2258 	temp = (info->generate_ae) ?
2259 		info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2260 					   info->ae_src) : 0;
2261 	set_64bit_val(wqe, 8, temp);
2262 
2263 	hdr = qp->qp_uk.qp_id |
2264 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
2265 	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
2266 	      FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
2267 	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
2268 	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
2269 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2270 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2271 
2272 	set_64bit_val(wqe, 24, hdr);
2273 
2274 	print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
2275 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2276 	if (post_sq)
2277 		irdma_sc_cqp_post_sq(cqp);
2278 
2279 	return 0;
2280 }
2281 
2282 /**
2283  * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
2284  * @qp: sc qp
2285  * @info: gen ae information
2286  * @scratch: u64 saved to be used during cqp completion
2287  * @post_sq: flag for cqp db to ring
2288  */
2289 static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
2290 			   struct irdma_gen_ae_info *info, u64 scratch,
2291 			   bool post_sq)
2292 {
2293 	u64 temp;
2294 	__le64 *wqe;
2295 	struct irdma_sc_cqp *cqp;
2296 	u64 hdr;
2297 
2298 	cqp = qp->pd->dev->cqp;
2299 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2300 	if (!wqe)
2301 		return -ENOMEM;
2302 
2303 	temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2304 					  info->ae_src);
2305 	set_64bit_val(wqe, 8, temp);
2306 
2307 	hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
2308 					   IRDMA_CQP_OP_GEN_AE) |
2309 	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
2310 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2311 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2312 
2313 	set_64bit_val(wqe, 24, hdr);
2314 
2315 	print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2316 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2317 	if (post_sq)
2318 		irdma_sc_cqp_post_sq(cqp);
2319 
2320 	return 0;
2321 }
2322 
2323 /*** irdma_sc_qp_upload_context - upload qp's context
2324  * @dev: sc device struct
2325  * @info: upload context info ptr for return
2326  * @scratch: u64 saved to be used during cqp completion
2327  * @post_sq: flag for cqp db to ring
2328  */
2329 static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
2330 				      struct irdma_upload_context_info *info,
2331 				      u64 scratch, bool post_sq)
2332 {
2333 	__le64 *wqe;
2334 	struct irdma_sc_cqp *cqp;
2335 	u64 hdr;
2336 
2337 	cqp = dev->cqp;
2338 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2339 	if (!wqe)
2340 		return -ENOMEM;
2341 
2342 	set_64bit_val(wqe, 16, info->buf_pa);
2343 
2344 	hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
2345 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
2346 	      FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
2347 	      FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
2348 	      FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
2349 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2350 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2351 
2352 	set_64bit_val(wqe, 24, hdr);
2353 
2354 	print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
2355 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2356 	if (post_sq)
2357 		irdma_sc_cqp_post_sq(cqp);
2358 
2359 	return 0;
2360 }
2361 
2362 /**
2363  * irdma_sc_manage_push_page - Handle push page
2364  * @cqp: struct for cqp hw
2365  * @info: push page info
2366  * @scratch: u64 saved to be used during cqp completion
2367  * @post_sq: flag for cqp db to ring
2368  */
2369 static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
2370 				     struct irdma_cqp_manage_push_page_info *info,
2371 				     u64 scratch, bool post_sq)
2372 {
2373 	__le64 *wqe;
2374 	u64 hdr;
2375 
2376 	if (info->free_page &&
2377 	    info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2378 		return -EINVAL;
2379 
2380 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2381 	if (!wqe)
2382 		return -ENOMEM;
2383 
2384 	set_64bit_val(wqe, 16, info->qs_handle);
2385 	hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
2386 	      FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
2387 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
2388 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
2389 	      FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
2390 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2391 
2392 	set_64bit_val(wqe, 24, hdr);
2393 
2394 	print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
2395 			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2396 	if (post_sq)
2397 		irdma_sc_cqp_post_sq(cqp);
2398 
2399 	return 0;
2400 }
2401 
2402 /**
2403  * irdma_sc_suspend_qp - suspend qp for param change
2404  * @cqp: struct for cqp hw
2405  * @qp: sc qp struct
2406  * @scratch: u64 saved to be used during cqp completion
2407  */
2408 static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2409 			       u64 scratch)
2410 {
2411 	u64 hdr;
2412 	__le64 *wqe;
2413 
2414 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2415 	if (!wqe)
2416 		return -ENOMEM;
2417 
2418 	hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
2419 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
2420 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2421 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2422 
2423 	set_64bit_val(wqe, 24, hdr);
2424 
2425 	print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2426 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2427 	irdma_sc_cqp_post_sq(cqp);
2428 
2429 	return 0;
2430 }
2431 
2432 /**
2433  * irdma_sc_resume_qp - resume qp after suspend
2434  * @cqp: struct for cqp hw
2435  * @qp: sc qp struct
2436  * @scratch: u64 saved to be used during cqp completion
2437  */
2438 static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2439 			      u64 scratch)
2440 {
2441 	u64 hdr;
2442 	__le64 *wqe;
2443 
2444 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2445 	if (!wqe)
2446 		return -ENOMEM;
2447 
2448 	set_64bit_val(wqe, 16,
2449 		      FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
2450 
2451 	hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
2452 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
2453 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2454 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2455 
2456 	set_64bit_val(wqe, 24, hdr);
2457 
2458 	print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
2459 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2460 	irdma_sc_cqp_post_sq(cqp);
2461 
2462 	return 0;
2463 }
2464 
2465 /**
2466  * irdma_sc_cq_ack - acknowledge completion q
2467  * @cq: cq struct
2468  */
2469 static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
2470 {
2471 	writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
2472 }
2473 
2474 /**
2475  * irdma_sc_cq_init - initialize completion q
2476  * @cq: cq struct
2477  * @info: cq initialization info
2478  */
2479 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
2480 {
2481 	u32 pble_obj_cnt;
2482 
2483 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2484 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
2485 		return -EINVAL;
2486 
2487 	cq->cq_pa = info->cq_base_pa;
2488 	cq->dev = info->dev;
2489 	cq->ceq_id = info->ceq_id;
2490 	info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
2491 	info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
2492 	irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
2493 
2494 	cq->virtual_map = info->virtual_map;
2495 	cq->pbl_chunk_size = info->pbl_chunk_size;
2496 	cq->ceqe_mask = info->ceqe_mask;
2497 	cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
2498 	cq->shadow_area_pa = info->shadow_area_pa;
2499 	cq->shadow_read_threshold = info->shadow_read_threshold;
2500 	cq->ceq_id_valid = info->ceq_id_valid;
2501 	cq->tph_en = info->tph_en;
2502 	cq->tph_val = info->tph_val;
2503 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2504 	cq->vsi = info->vsi;
2505 
2506 	return 0;
2507 }
2508 
2509 /**
2510  * irdma_sc_cq_create - create completion q
2511  * @cq: cq struct
2512  * @scratch: u64 saved to be used during cqp completion
2513  * @check_overflow: flag for overflow check
2514  * @post_sq: flag for cqp db to ring
2515  */
2516 static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
2517 			      bool check_overflow, bool post_sq)
2518 {
2519 	__le64 *wqe;
2520 	struct irdma_sc_cqp *cqp;
2521 	u64 hdr;
2522 	struct irdma_sc_ceq *ceq;
2523 	int ret_code = 0;
2524 
2525 	cqp = cq->dev->cqp;
2526 	if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
2527 		return -EINVAL;
2528 
2529 	if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
2530 		return -EINVAL;
2531 
2532 	ceq = cq->dev->ceq[cq->ceq_id];
2533 	if (ceq && ceq->reg_cq)
2534 		ret_code = irdma_sc_add_cq_ctx(ceq, cq);
2535 
2536 	if (ret_code)
2537 		return ret_code;
2538 
2539 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2540 	if (!wqe) {
2541 		if (ceq && ceq->reg_cq)
2542 			irdma_sc_remove_cq_ctx(ceq, cq);
2543 		return -ENOMEM;
2544 	}
2545 
2546 	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2547 	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2548 	set_64bit_val(wqe, 16,
2549 		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
2550 	set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2551 	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2552 	set_64bit_val(wqe, 48,
2553 		      FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
2554 	set_64bit_val(wqe, 56,
2555 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2556 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2557 
2558 	hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
2559 	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2560 			IRDMA_CQPSQ_CQ_CEQID) |
2561 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
2562 	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2563 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
2564 	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2565 	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2566 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2567 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2568 	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2569 			 cq->cq_uk.avoid_mem_cflct) |
2570 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2571 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2572 
2573 	set_64bit_val(wqe, 24, hdr);
2574 
2575 	print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
2576 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2577 	if (post_sq)
2578 		irdma_sc_cqp_post_sq(cqp);
2579 
2580 	return 0;
2581 }
2582 
2583 /**
2584  * irdma_sc_cq_destroy - destroy completion q
2585  * @cq: cq struct
2586  * @scratch: u64 saved to be used during cqp completion
2587  * @post_sq: flag for cqp db to ring
2588  */
2589 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
2590 {
2591 	struct irdma_sc_cqp *cqp;
2592 	__le64 *wqe;
2593 	u64 hdr;
2594 	struct irdma_sc_ceq *ceq;
2595 
2596 	cqp = cq->dev->cqp;
2597 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2598 	if (!wqe)
2599 		return -ENOMEM;
2600 
2601 	ceq = cq->dev->ceq[cq->ceq_id];
2602 	if (ceq && ceq->reg_cq)
2603 		irdma_sc_remove_cq_ctx(ceq, cq);
2604 
2605 	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2606 	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2607 	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2608 	set_64bit_val(wqe, 48,
2609 		      (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2610 
2611 	hdr = cq->cq_uk.cq_id |
2612 	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2613 			IRDMA_CQPSQ_CQ_CEQID) |
2614 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
2615 	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2616 	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2617 	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2618 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2619 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2620 	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
2621 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2622 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2623 
2624 	set_64bit_val(wqe, 24, hdr);
2625 
2626 	print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2627 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2628 	if (post_sq)
2629 		irdma_sc_cqp_post_sq(cqp);
2630 
2631 	return 0;
2632 }
2633 
2634 /**
2635  * irdma_sc_cq_resize - set resized cq buffer info
2636  * @cq: resized cq
2637  * @info: resized cq buffer info
2638  */
2639 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
2640 {
2641 	cq->virtual_map = info->virtual_map;
2642 	cq->cq_pa = info->cq_pa;
2643 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2644 	cq->pbl_chunk_size = info->pbl_chunk_size;
2645 	irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
2646 }
2647 
2648 /**
2649  * irdma_sc_cq_modify - modify a Completion Queue
2650  * @cq: cq struct
2651  * @info: modification info struct
2652  * @scratch: u64 saved to be used during cqp completion
2653  * @post_sq: flag to post to sq
2654  */
2655 static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
2656 			      struct irdma_modify_cq_info *info, u64 scratch,
2657 			      bool post_sq)
2658 {
2659 	struct irdma_sc_cqp *cqp;
2660 	__le64 *wqe;
2661 	u64 hdr;
2662 	u32 pble_obj_cnt;
2663 
2664 	pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2665 	if (info->cq_resize && info->virtual_map &&
2666 	    info->first_pm_pbl_idx >= pble_obj_cnt)
2667 		return -EINVAL;
2668 
2669 	cqp = cq->dev->cqp;
2670 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2671 	if (!wqe)
2672 		return -ENOMEM;
2673 
2674 	set_64bit_val(wqe, 0, info->cq_size);
2675 	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
2676 	set_64bit_val(wqe, 16,
2677 		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
2678 	set_64bit_val(wqe, 32, info->cq_pa);
2679 	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2680 	set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
2681 	set_64bit_val(wqe, 56,
2682 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2683 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2684 
2685 	hdr = cq->cq_uk.cq_id |
2686 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
2687 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
2688 	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
2689 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
2690 	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
2691 	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2692 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2693 	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2694 			 cq->cq_uk.avoid_mem_cflct) |
2695 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2696 	dma_wmb(); /* make sure WQE is written before valid bit is set */
2697 
2698 	set_64bit_val(wqe, 24, hdr);
2699 
2700 	print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
2701 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
2702 	if (post_sq)
2703 		irdma_sc_cqp_post_sq(cqp);
2704 
2705 	return 0;
2706 }
2707 
2708 /**
2709  * irdma_check_cqp_progress - check cqp processing progress
2710  * @timeout: timeout info struct
2711  * @dev: sc device struct
2712  */
2713 void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
2714 {
2715 	if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
2716 		timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
2717 		timeout->count = 0;
2718 	} else {
2719 		if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
2720 		    timeout->compl_cqp_cmds)
2721 			timeout->count++;
2722 	}
2723 }
2724 
2725 /**
2726  * irdma_get_cqp_reg_info - get head and tail for cqp using registers
2727  * @cqp: struct for cqp hw
2728  * @val: cqp tail register value
2729  * @tail: wqtail register value
2730  * @error: cqp processing err
2731  */
2732 static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
2733 					  u32 *tail, u32 *error)
2734 {
2735 	*val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
2736 	*tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
2737 	*error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
2738 }
2739 
2740 /**
2741  * irdma_cqp_poll_registers - poll cqp registers
2742  * @cqp: struct for cqp hw
2743  * @tail: wqtail register value
2744  * @count: how many times to try for completion
2745  */
2746 static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
2747 				    u32 count)
2748 {
2749 	u32 i = 0;
2750 	u32 newtail, error, val;
2751 
2752 	while (i++ < count) {
2753 		irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
2754 		if (error) {
2755 			error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
2756 			ibdev_dbg(to_ibdev(cqp->dev),
2757 				  "CQP: CQPERRCODES error_code[x%08X]\n",
2758 				  error);
2759 			return -EIO;
2760 		}
2761 		if (newtail != tail) {
2762 			/* SUCCESS */
2763 			IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
2764 			cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
2765 			return 0;
2766 		}
2767 		udelay(cqp->dev->hw_attrs.max_sleep_count);
2768 	}
2769 
2770 	return -ETIMEDOUT;
2771 }
2772 
2773 /**
2774  * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
2775  * @dev: sc device struct
2776  * @buf: pointer to commit buffer
2777  * @buf_idx: buffer index
2778  * @obj_info: object info pointer
2779  * @rsrc_idx: indexs of memory resource
2780  */
2781 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
2782 				      u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
2783 				      u32 rsrc_idx)
2784 {
2785 	u64 temp;
2786 
2787 	get_64bit_val(buf, buf_idx, &temp);
2788 
2789 	switch (rsrc_idx) {
2790 	case IRDMA_HMC_IW_QP:
2791 		obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
2792 		break;
2793 	case IRDMA_HMC_IW_CQ:
2794 		obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
2795 		break;
2796 	case IRDMA_HMC_IW_APBVT_ENTRY:
2797 		obj_info[rsrc_idx].cnt = 1;
2798 		break;
2799 	default:
2800 		obj_info[rsrc_idx].cnt = (u32)temp;
2801 		break;
2802 	}
2803 
2804 	obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
2805 
2806 	return temp;
2807 }
2808 
2809 /**
2810  * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
2811  * @dev: pointer to dev struct
2812  * @buf: ptr to fpm commit buffer
2813  * @info: ptr to irdma_hmc_obj_info struct
2814  * @sd: number of SDs for HMC objects
2815  *
2816  * parses fpm commit info and copy base value
2817  * of hmc objects in hmc_info
2818  */
2819 static void
2820 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
2821 			      struct irdma_hmc_obj_info *info, u32 *sd)
2822 {
2823 	u64 size;
2824 	u32 i;
2825 	u64 max_base = 0;
2826 	u32 last_hmc_obj = 0;
2827 
2828 	irdma_sc_decode_fpm_commit(dev, buf, 0, info,
2829 				   IRDMA_HMC_IW_QP);
2830 	irdma_sc_decode_fpm_commit(dev, buf, 8, info,
2831 				   IRDMA_HMC_IW_CQ);
2832 	/* skiping RSRVD */
2833 	irdma_sc_decode_fpm_commit(dev, buf, 24, info,
2834 				   IRDMA_HMC_IW_HTE);
2835 	irdma_sc_decode_fpm_commit(dev, buf, 32, info,
2836 				   IRDMA_HMC_IW_ARP);
2837 	irdma_sc_decode_fpm_commit(dev, buf, 40, info,
2838 				   IRDMA_HMC_IW_APBVT_ENTRY);
2839 	irdma_sc_decode_fpm_commit(dev, buf, 48, info,
2840 				   IRDMA_HMC_IW_MR);
2841 	irdma_sc_decode_fpm_commit(dev, buf, 56, info,
2842 				   IRDMA_HMC_IW_XF);
2843 	irdma_sc_decode_fpm_commit(dev, buf, 64, info,
2844 				   IRDMA_HMC_IW_XFFL);
2845 	irdma_sc_decode_fpm_commit(dev, buf, 72, info,
2846 				   IRDMA_HMC_IW_Q1);
2847 	irdma_sc_decode_fpm_commit(dev, buf, 80, info,
2848 				   IRDMA_HMC_IW_Q1FL);
2849 	irdma_sc_decode_fpm_commit(dev, buf, 88, info,
2850 				   IRDMA_HMC_IW_TIMER);
2851 	irdma_sc_decode_fpm_commit(dev, buf, 112, info,
2852 				   IRDMA_HMC_IW_PBLE);
2853 	/* skipping RSVD. */
2854 	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
2855 		irdma_sc_decode_fpm_commit(dev, buf, 96, info,
2856 					   IRDMA_HMC_IW_FSIMC);
2857 		irdma_sc_decode_fpm_commit(dev, buf, 104, info,
2858 					   IRDMA_HMC_IW_FSIAV);
2859 		irdma_sc_decode_fpm_commit(dev, buf, 128, info,
2860 					   IRDMA_HMC_IW_RRF);
2861 		irdma_sc_decode_fpm_commit(dev, buf, 136, info,
2862 					   IRDMA_HMC_IW_RRFFL);
2863 		irdma_sc_decode_fpm_commit(dev, buf, 144, info,
2864 					   IRDMA_HMC_IW_HDR);
2865 		irdma_sc_decode_fpm_commit(dev, buf, 152, info,
2866 					   IRDMA_HMC_IW_MD);
2867 		irdma_sc_decode_fpm_commit(dev, buf, 160, info,
2868 					   IRDMA_HMC_IW_OOISC);
2869 		irdma_sc_decode_fpm_commit(dev, buf, 168, info,
2870 					   IRDMA_HMC_IW_OOISCFFL);
2871 	}
2872 
2873 	/* searching for the last object in HMC to find the size of the HMC area. */
2874 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
2875 		if (info[i].base > max_base) {
2876 			max_base = info[i].base;
2877 			last_hmc_obj = i;
2878 		}
2879 	}
2880 
2881 	size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
2882 	       info[last_hmc_obj].base;
2883 
2884 	if (size & 0x1FFFFF)
2885 		*sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
2886 	else
2887 		*sd = (u32)(size >> 21);
2888 
2889 }
2890 
2891 /**
2892  * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
2893  * @buf: ptr to fpm query buffer
2894  * @buf_idx: index into buf
2895  * @obj_info: ptr to irdma_hmc_obj_info struct
2896  * @rsrc_idx: resource index into info
2897  *
2898  * Decode a 64 bit value from fpm query buffer into max count and size
2899  */
2900 static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
2901 				     struct irdma_hmc_obj_info *obj_info,
2902 				     u32 rsrc_idx)
2903 {
2904 	u64 temp;
2905 	u32 size;
2906 
2907 	get_64bit_val(buf, buf_idx, &temp);
2908 	obj_info[rsrc_idx].max_cnt = (u32)temp;
2909 	size = (u32)(temp >> 32);
2910 	obj_info[rsrc_idx].size = BIT_ULL(size);
2911 
2912 	return temp;
2913 }
2914 
2915 /**
2916  * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
2917  * @dev: ptr to shared code device
2918  * @buf: ptr to fpm query buffer
2919  * @hmc_info: ptr to irdma_hmc_obj_info struct
2920  * @hmc_fpm_misc: ptr to fpm data
2921  *
2922  * parses fpm query buffer and copy max_cnt and
2923  * size value of hmc objects in hmc_info
2924  */
2925 static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
2926 					struct irdma_hmc_info *hmc_info,
2927 					struct irdma_hmc_fpm_misc *hmc_fpm_misc)
2928 {
2929 	struct irdma_hmc_obj_info *obj_info;
2930 	u64 temp;
2931 	u32 size;
2932 	u16 max_pe_sds;
2933 
2934 	obj_info = hmc_info->hmc_obj;
2935 
2936 	get_64bit_val(buf, 0, &temp);
2937 	hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
2938 	max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
2939 
2940 	hmc_fpm_misc->max_sds = max_pe_sds;
2941 	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
2942 	get_64bit_val(buf, 8, &temp);
2943 	obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
2944 	size = (u32)(temp >> 32);
2945 	obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
2946 
2947 	get_64bit_val(buf, 16, &temp);
2948 	obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
2949 	size = (u32)(temp >> 32);
2950 	obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
2951 
2952 	irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
2953 	irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
2954 
2955 	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
2956 	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
2957 
2958 	irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
2959 	irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
2960 
2961 	get_64bit_val(buf, 64, &temp);
2962 	obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
2963 	obj_info[IRDMA_HMC_IW_XFFL].size = 4;
2964 	hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
2965 	if (!hmc_fpm_misc->xf_block_size)
2966 		return -EINVAL;
2967 
2968 	irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
2969 	get_64bit_val(buf, 80, &temp);
2970 	obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
2971 	obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
2972 
2973 	hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
2974 	if (!hmc_fpm_misc->q1_block_size)
2975 		return -EINVAL;
2976 
2977 	irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
2978 
2979 	get_64bit_val(buf, 112, &temp);
2980 	obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
2981 	obj_info[IRDMA_HMC_IW_PBLE].size = 8;
2982 
2983 	get_64bit_val(buf, 120, &temp);
2984 	hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
2985 	hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
2986 	hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
2987 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
2988 		return 0;
2989 	irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
2990 	irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
2991 	irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
2992 
2993 	get_64bit_val(buf, 136, &temp);
2994 	obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
2995 	obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
2996 	hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
2997 	if (!hmc_fpm_misc->rrf_block_size &&
2998 	    obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
2999 		return -EINVAL;
3000 
3001 	irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
3002 	irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
3003 	irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
3004 
3005 	get_64bit_val(buf, 168, &temp);
3006 	obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
3007 	obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
3008 	hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
3009 	if (!hmc_fpm_misc->ooiscf_block_size &&
3010 	    obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
3011 		return -EINVAL;
3012 
3013 	return 0;
3014 }
3015 
3016 /**
3017  * irdma_sc_find_reg_cq - find cq ctx index
3018  * @ceq: ceq sc structure
3019  * @cq: cq sc structure
3020  */
3021 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
3022 				struct irdma_sc_cq *cq)
3023 {
3024 	u32 i;
3025 
3026 	for (i = 0; i < ceq->reg_cq_size; i++) {
3027 		if (cq == ceq->reg_cq[i])
3028 			return i;
3029 	}
3030 
3031 	return IRDMA_INVALID_CQ_IDX;
3032 }
3033 
3034 /**
3035  * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
3036  * @ceq: ceq sc structure
3037  * @cq: cq sc structure
3038  */
3039 int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3040 {
3041 	unsigned long flags;
3042 
3043 	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3044 
3045 	if (ceq->reg_cq_size == ceq->elem_cnt) {
3046 		spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3047 		return -ENOMEM;
3048 	}
3049 
3050 	ceq->reg_cq[ceq->reg_cq_size++] = cq;
3051 
3052 	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3053 
3054 	return 0;
3055 }
3056 
3057 /**
3058  * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
3059  * @ceq: ceq sc structure
3060  * @cq: cq sc structure
3061  */
3062 void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3063 {
3064 	unsigned long flags;
3065 	u32 cq_ctx_idx;
3066 
3067 	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3068 	cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
3069 	if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
3070 		goto exit;
3071 
3072 	ceq->reg_cq_size--;
3073 	if (cq_ctx_idx != ceq->reg_cq_size)
3074 		ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
3075 	ceq->reg_cq[ceq->reg_cq_size] = NULL;
3076 
3077 exit:
3078 	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3079 }
3080 
3081 /**
3082  * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
3083  * @cqp: IWARP control queue pair pointer
3084  * @info: IWARP control queue pair init info pointer
3085  *
3086  * Initializes the object and context buffers for a control Queue Pair.
3087  */
3088 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
3089 		      struct irdma_cqp_init_info *info)
3090 {
3091 	u8 hw_sq_size;
3092 
3093 	if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
3094 	    info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
3095 	    ((info->sq_size & (info->sq_size - 1))))
3096 		return -EINVAL;
3097 
3098 	hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
3099 						IRDMA_QUEUE_TYPE_CQP);
3100 	cqp->size = sizeof(*cqp);
3101 	cqp->sq_size = info->sq_size;
3102 	cqp->hw_sq_size = hw_sq_size;
3103 	cqp->sq_base = info->sq;
3104 	cqp->host_ctx = info->host_ctx;
3105 	cqp->sq_pa = info->sq_pa;
3106 	cqp->host_ctx_pa = info->host_ctx_pa;
3107 	cqp->dev = info->dev;
3108 	cqp->struct_ver = info->struct_ver;
3109 	cqp->hw_maj_ver = info->hw_maj_ver;
3110 	cqp->hw_min_ver = info->hw_min_ver;
3111 	cqp->scratch_array = info->scratch_array;
3112 	cqp->polarity = 0;
3113 	cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3114 	cqp->ena_vf_count = info->ena_vf_count;
3115 	cqp->hmc_profile = info->hmc_profile;
3116 	cqp->ceqs_per_vf = info->ceqs_per_vf;
3117 	cqp->disable_packed = info->disable_packed;
3118 	cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3119 	cqp->protocol_used = info->protocol_used;
3120 	memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3121 	info->dev->cqp = cqp;
3122 
3123 	IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
3124 	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
3125 	cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
3126 	/* for the cqp commands backlog. */
3127 	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
3128 
3129 	writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
3130 	writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
3131 	writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3132 
3133 	ibdev_dbg(to_ibdev(cqp->dev),
3134 		  "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
3135 		  cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
3136 		  (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
3137 	return 0;
3138 }
3139 
3140 /**
3141  * irdma_sc_cqp_create - create cqp during bringup
3142  * @cqp: struct for cqp hw
3143  * @maj_err: If error, major err number
3144  * @min_err: If error, minor err number
3145  */
3146 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
3147 {
3148 	u64 temp;
3149 	u8 hw_rev;
3150 	u32 cnt = 0, p1, p2, val = 0, err_code;
3151 	int ret_code;
3152 
3153 	hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
3154 	cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
3155 				IRDMA_SD_BUF_ALIGNMENT);
3156 	cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
3157 					   cqp->sdbuf.size, &cqp->sdbuf.pa,
3158 					   GFP_KERNEL);
3159 	if (!cqp->sdbuf.va)
3160 		return -ENOMEM;
3161 
3162 	spin_lock_init(&cqp->dev->cqp_lock);
3163 
3164 	temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
3165 	       FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
3166 	       FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
3167 	       FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
3168 	if (hw_rev >= IRDMA_GEN_2) {
3169 		temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
3170 				   cqp->rocev2_rto_policy) |
3171 			FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
3172 				   cqp->protocol_used);
3173 	}
3174 
3175 	set_64bit_val(cqp->host_ctx, 0, temp);
3176 	set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
3177 
3178 	temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
3179 	       FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
3180 	set_64bit_val(cqp->host_ctx, 16, temp);
3181 	set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
3182 	temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
3183 	       FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
3184 	if (hw_rev >= IRDMA_GEN_2) {
3185 		temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
3186 			FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
3187 	}
3188 	set_64bit_val(cqp->host_ctx, 32, temp);
3189 	set_64bit_val(cqp->host_ctx, 40, 0);
3190 	temp = 0;
3191 	if (hw_rev >= IRDMA_GEN_2) {
3192 		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
3193 			FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
3194 			FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
3195 	}
3196 	set_64bit_val(cqp->host_ctx, 48, temp);
3197 	temp = 0;
3198 	if (hw_rev >= IRDMA_GEN_2) {
3199 		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
3200 			FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
3201 			FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
3202 			FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
3203 	}
3204 	set_64bit_val(cqp->host_ctx, 56, temp);
3205 	print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
3206 			     8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
3207 	p1 = cqp->host_ctx_pa >> 32;
3208 	p2 = (u32)cqp->host_ctx_pa;
3209 
3210 	writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3211 	writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3212 
3213 	do {
3214 		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3215 			ret_code = -ETIMEDOUT;
3216 			goto err;
3217 		}
3218 		udelay(cqp->dev->hw_attrs.max_sleep_count);
3219 		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3220 	} while (!val);
3221 
3222 	if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
3223 		ret_code = -EOPNOTSUPP;
3224 		goto err;
3225 	}
3226 
3227 	cqp->process_cqp_sds = irdma_update_sds_noccq;
3228 	return 0;
3229 
3230 err:
3231 	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3232 			  cqp->sdbuf.va, cqp->sdbuf.pa);
3233 	cqp->sdbuf.va = NULL;
3234 	err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3235 	*min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
3236 	*maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
3237 	return ret_code;
3238 }
3239 
3240 /**
3241  * irdma_sc_cqp_post_sq - post of cqp's sq
3242  * @cqp: struct for cqp hw
3243  */
3244 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
3245 {
3246 	writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
3247 
3248 	ibdev_dbg(to_ibdev(cqp->dev),
3249 		  "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
3250 		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
3251 }
3252 
3253 /**
3254  * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
3255  * and pass back index
3256  * @cqp: CQP HW structure
3257  * @scratch: private data for CQP WQE
3258  * @wqe_idx: WQE index of CQP SQ
3259  */
3260 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
3261 					   u32 *wqe_idx)
3262 {
3263 	__le64 *wqe = NULL;
3264 	int ret_code;
3265 
3266 	if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
3267 		ibdev_dbg(to_ibdev(cqp->dev),
3268 			  "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
3269 			  cqp->sq_ring.head, cqp->sq_ring.tail,
3270 			  cqp->sq_ring.size);
3271 		return NULL;
3272 	}
3273 	IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
3274 	if (ret_code)
3275 		return NULL;
3276 
3277 	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
3278 	if (!*wqe_idx)
3279 		cqp->polarity = !cqp->polarity;
3280 	wqe = cqp->sq_base[*wqe_idx].elem;
3281 	cqp->scratch_array[*wqe_idx] = scratch;
3282 	IRDMA_CQP_INIT_WQE(wqe);
3283 
3284 	return wqe;
3285 }
3286 
3287 /**
3288  * irdma_sc_cqp_destroy - destroy cqp during close
3289  * @cqp: struct for cqp hw
3290  */
3291 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
3292 {
3293 	u32 cnt = 0, val;
3294 	int ret_code = 0;
3295 
3296 	writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3297 	writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3298 	do {
3299 		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3300 			ret_code = -ETIMEDOUT;
3301 			break;
3302 		}
3303 		udelay(cqp->dev->hw_attrs.max_sleep_count);
3304 		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3305 	} while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
3306 
3307 	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
3308 			  cqp->sdbuf.va, cqp->sdbuf.pa);
3309 	cqp->sdbuf.va = NULL;
3310 	return ret_code;
3311 }
3312 
3313 /**
3314  * irdma_sc_ccq_arm - enable intr for control cq
3315  * @ccq: ccq sc struct
3316  */
3317 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
3318 {
3319 	u64 temp_val;
3320 	u16 sw_cq_sel;
3321 	u8 arm_next_se;
3322 	u8 arm_seq_num;
3323 
3324 	get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
3325 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
3326 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
3327 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
3328 	arm_seq_num++;
3329 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
3330 		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
3331 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
3332 		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
3333 	set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
3334 
3335 	dma_wmb(); /* make sure shadow area is updated before arming */
3336 
3337 	writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
3338 }
3339 
3340 /**
3341  * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
3342  * @ccq: ccq sc struct
3343  * @info: completion q entry to return
3344  */
3345 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
3346 			      struct irdma_ccq_cqe_info *info)
3347 {
3348 	u64 qp_ctx, temp, temp1;
3349 	__le64 *cqe;
3350 	struct irdma_sc_cqp *cqp;
3351 	u32 wqe_idx;
3352 	u32 error;
3353 	u8 polarity;
3354 	int ret_code = 0;
3355 
3356 	if (ccq->cq_uk.avoid_mem_cflct)
3357 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
3358 	else
3359 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
3360 
3361 	get_64bit_val(cqe, 24, &temp);
3362 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
3363 	if (polarity != ccq->cq_uk.polarity)
3364 		return -ENOENT;
3365 
3366 	get_64bit_val(cqe, 8, &qp_ctx);
3367 	cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
3368 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
3369 	info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
3370 	info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
3371 	if (info->error) {
3372 		info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
3373 		error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3374 		ibdev_dbg(to_ibdev(cqp->dev),
3375 			  "CQP: CQPERRCODES error_code[x%08X]\n", error);
3376 	}
3377 
3378 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
3379 	info->scratch = cqp->scratch_array[wqe_idx];
3380 
3381 	get_64bit_val(cqe, 16, &temp1);
3382 	info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
3383 	get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
3384 	info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
3385 	info->cqp = cqp;
3386 
3387 	/*  move the head for cq */
3388 	IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
3389 	if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
3390 		ccq->cq_uk.polarity ^= 1;
3391 
3392 	/* update cq tail in cq shadow memory also */
3393 	IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
3394 	set_64bit_val(ccq->cq_uk.shadow_area, 0,
3395 		      IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
3396 
3397 	dma_wmb(); /* make sure shadow area is updated before moving tail */
3398 
3399 	IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
3400 	ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
3401 
3402 	return ret_code;
3403 }
3404 
3405 /**
3406  * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
3407  * @cqp: struct for cqp hw
3408  * @op_code: cqp opcode for completion
3409  * @compl_info: completion q entry to return
3410  */
3411 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
3412 				  struct irdma_ccq_cqe_info *compl_info)
3413 {
3414 	struct irdma_ccq_cqe_info info = {};
3415 	struct irdma_sc_cq *ccq;
3416 	int ret_code = 0;
3417 	u32 cnt = 0;
3418 
3419 	ccq = cqp->dev->ccq;
3420 	while (1) {
3421 		if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
3422 			return -ETIMEDOUT;
3423 
3424 		if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
3425 			udelay(cqp->dev->hw_attrs.max_sleep_count);
3426 			continue;
3427 		}
3428 		if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
3429 			ret_code = -EIO;
3430 			break;
3431 		}
3432 		/* make sure op code matches*/
3433 		if (op_code == info.op_code)
3434 			break;
3435 		ibdev_dbg(to_ibdev(cqp->dev),
3436 			  "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
3437 			  op_code, info.op_code);
3438 	}
3439 
3440 	if (compl_info)
3441 		memcpy(compl_info, &info, sizeof(*compl_info));
3442 
3443 	return ret_code;
3444 }
3445 
3446 /**
3447  * irdma_sc_manage_hmc_pm_func_table - manage of function table
3448  * @cqp: struct for cqp hw
3449  * @scratch: u64 saved to be used during cqp completion
3450  * @info: info for the manage function table operation
3451  * @post_sq: flag for cqp db to ring
3452  */
3453 static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
3454 					     struct irdma_hmc_fcn_info *info,
3455 					     u64 scratch, bool post_sq)
3456 {
3457 	__le64 *wqe;
3458 	u64 hdr;
3459 
3460 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3461 	if (!wqe)
3462 		return -ENOMEM;
3463 
3464 	set_64bit_val(wqe, 0, 0);
3465 	set_64bit_val(wqe, 8, 0);
3466 	set_64bit_val(wqe, 16, 0);
3467 	set_64bit_val(wqe, 32, 0);
3468 	set_64bit_val(wqe, 40, 0);
3469 	set_64bit_val(wqe, 48, 0);
3470 	set_64bit_val(wqe, 56, 0);
3471 
3472 	hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
3473 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE,
3474 			 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
3475 	      FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
3476 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3477 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3478 
3479 	set_64bit_val(wqe, 24, hdr);
3480 
3481 	print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
3482 			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
3483 			     IRDMA_CQP_WQE_SIZE * 8, false);
3484 	if (post_sq)
3485 		irdma_sc_cqp_post_sq(cqp);
3486 
3487 	return 0;
3488 }
3489 
3490 /**
3491  * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
3492  * for fpm commit
3493  * @cqp: struct for cqp hw
3494  */
3495 static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
3496 {
3497 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
3498 					     NULL);
3499 }
3500 
3501 /**
3502  * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
3503  * @cqp: struct for cqp hw
3504  * @scratch: u64 saved to be used during cqp completion
3505  * @hmc_fn_id: hmc function id
3506  * @commit_fpm_mem: Memory for fpm values
3507  * @post_sq: flag for cqp db to ring
3508  * @wait_type: poll ccq or cqp registers for cqp completion
3509  */
3510 static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3511 				   u8 hmc_fn_id,
3512 				   struct irdma_dma_mem *commit_fpm_mem,
3513 				   bool post_sq, u8 wait_type)
3514 {
3515 	__le64 *wqe;
3516 	u64 hdr;
3517 	u32 tail, val, error;
3518 	int ret_code = 0;
3519 
3520 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3521 	if (!wqe)
3522 		return -ENOMEM;
3523 
3524 	set_64bit_val(wqe, 16, hmc_fn_id);
3525 	set_64bit_val(wqe, 32, commit_fpm_mem->pa);
3526 
3527 	hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
3528 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
3529 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3530 
3531 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3532 
3533 	set_64bit_val(wqe, 24, hdr);
3534 
3535 	print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
3536 			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3537 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3538 
3539 	if (post_sq) {
3540 		irdma_sc_cqp_post_sq(cqp);
3541 		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3542 			ret_code = irdma_cqp_poll_registers(cqp, tail,
3543 							    cqp->dev->hw_attrs.max_done_count);
3544 		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3545 			ret_code = irdma_sc_commit_fpm_val_done(cqp);
3546 	}
3547 
3548 	return ret_code;
3549 }
3550 
3551 /**
3552  * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
3553  * query fpm
3554  * @cqp: struct for cqp hw
3555  */
3556 static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
3557 {
3558 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
3559 					     NULL);
3560 }
3561 
3562 /**
3563  * irdma_sc_query_fpm_val - cqp wqe query fpm values
3564  * @cqp: struct for cqp hw
3565  * @scratch: u64 saved to be used during cqp completion
3566  * @hmc_fn_id: hmc function id
3567  * @query_fpm_mem: memory for return fpm values
3568  * @post_sq: flag for cqp db to ring
3569  * @wait_type: poll ccq or cqp registers for cqp completion
3570  */
3571 static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3572 				  u8 hmc_fn_id,
3573 				  struct irdma_dma_mem *query_fpm_mem,
3574 				  bool post_sq, u8 wait_type)
3575 {
3576 	__le64 *wqe;
3577 	u64 hdr;
3578 	u32 tail, val, error;
3579 	int ret_code = 0;
3580 
3581 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3582 	if (!wqe)
3583 		return -ENOMEM;
3584 
3585 	set_64bit_val(wqe, 16, hmc_fn_id);
3586 	set_64bit_val(wqe, 32, query_fpm_mem->pa);
3587 
3588 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
3589 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3590 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3591 
3592 	set_64bit_val(wqe, 24, hdr);
3593 
3594 	print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
3595 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3596 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3597 
3598 	if (post_sq) {
3599 		irdma_sc_cqp_post_sq(cqp);
3600 		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3601 			ret_code = irdma_cqp_poll_registers(cqp, tail,
3602 							    cqp->dev->hw_attrs.max_done_count);
3603 		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3604 			ret_code = irdma_sc_query_fpm_val_done(cqp);
3605 	}
3606 
3607 	return ret_code;
3608 }
3609 
3610 /**
3611  * irdma_sc_ceq_init - initialize ceq
3612  * @ceq: ceq sc structure
3613  * @info: ceq initialization info
3614  */
3615 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
3616 		      struct irdma_ceq_init_info *info)
3617 {
3618 	u32 pble_obj_cnt;
3619 
3620 	if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
3621 	    info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
3622 		return -EINVAL;
3623 
3624 	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
3625 		return -EINVAL;
3626 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3627 
3628 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3629 		return -EINVAL;
3630 
3631 	ceq->size = sizeof(*ceq);
3632 	ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
3633 	ceq->ceq_id = info->ceq_id;
3634 	ceq->dev = info->dev;
3635 	ceq->elem_cnt = info->elem_cnt;
3636 	ceq->ceq_elem_pa = info->ceqe_pa;
3637 	ceq->virtual_map = info->virtual_map;
3638 	ceq->itr_no_expire = info->itr_no_expire;
3639 	ceq->reg_cq = info->reg_cq;
3640 	ceq->reg_cq_size = 0;
3641 	spin_lock_init(&ceq->req_cq_lock);
3642 	ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
3643 	ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
3644 	ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
3645 	ceq->tph_en = info->tph_en;
3646 	ceq->tph_val = info->tph_val;
3647 	ceq->vsi = info->vsi;
3648 	ceq->polarity = 1;
3649 	IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
3650 	ceq->dev->ceq[info->ceq_id] = ceq;
3651 
3652 	return 0;
3653 }
3654 
3655 /**
3656  * irdma_sc_ceq_create - create ceq wqe
3657  * @ceq: ceq sc structure
3658  * @scratch: u64 saved to be used during cqp completion
3659  * @post_sq: flag for cqp db to ring
3660  */
3661 
3662 static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
3663 			       bool post_sq)
3664 {
3665 	struct irdma_sc_cqp *cqp;
3666 	__le64 *wqe;
3667 	u64 hdr;
3668 
3669 	cqp = ceq->dev->cqp;
3670 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3671 	if (!wqe)
3672 		return -ENOMEM;
3673 	set_64bit_val(wqe, 16, ceq->elem_cnt);
3674 	set_64bit_val(wqe, 32,
3675 		      (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
3676 	set_64bit_val(wqe, 48,
3677 		      (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
3678 	set_64bit_val(wqe, 56,
3679 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
3680 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
3681 	hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
3682 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
3683 	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3684 	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3685 	      FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
3686 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3687 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3688 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3689 
3690 	set_64bit_val(wqe, 24, hdr);
3691 
3692 	print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3693 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3694 	if (post_sq)
3695 		irdma_sc_cqp_post_sq(cqp);
3696 
3697 	return 0;
3698 }
3699 
3700 /**
3701  * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
3702  * @ceq: ceq sc structure
3703  */
3704 static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
3705 {
3706 	struct irdma_sc_cqp *cqp;
3707 
3708 	cqp = ceq->dev->cqp;
3709 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
3710 					     NULL);
3711 }
3712 
3713 /**
3714  * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
3715  * @ceq: ceq sc structure
3716  */
3717 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
3718 {
3719 	struct irdma_sc_cqp *cqp;
3720 
3721 	if (ceq->reg_cq)
3722 		irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
3723 
3724 	cqp = ceq->dev->cqp;
3725 	cqp->process_cqp_sds = irdma_update_sds_noccq;
3726 
3727 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
3728 					     NULL);
3729 }
3730 
3731 /**
3732  * irdma_sc_cceq_create - create cceq
3733  * @ceq: ceq sc structure
3734  * @scratch: u64 saved to be used during cqp completion
3735  */
3736 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
3737 {
3738 	int ret_code;
3739 	struct irdma_sc_dev *dev = ceq->dev;
3740 
3741 	dev->ccq->vsi = ceq->vsi;
3742 	if (ceq->reg_cq) {
3743 		ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
3744 		if (ret_code)
3745 			return ret_code;
3746 	}
3747 
3748 	ret_code = irdma_sc_ceq_create(ceq, scratch, true);
3749 	if (!ret_code)
3750 		return irdma_sc_cceq_create_done(ceq);
3751 
3752 	return ret_code;
3753 }
3754 
3755 /**
3756  * irdma_sc_ceq_destroy - destroy ceq
3757  * @ceq: ceq sc structure
3758  * @scratch: u64 saved to be used during cqp completion
3759  * @post_sq: flag for cqp db to ring
3760  */
3761 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
3762 {
3763 	struct irdma_sc_cqp *cqp;
3764 	__le64 *wqe;
3765 	u64 hdr;
3766 
3767 	cqp = ceq->dev->cqp;
3768 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3769 	if (!wqe)
3770 		return -ENOMEM;
3771 
3772 	set_64bit_val(wqe, 16, ceq->elem_cnt);
3773 	set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
3774 	hdr = ceq->ceq_id |
3775 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
3776 	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3777 	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3778 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3779 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3780 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3781 
3782 	set_64bit_val(wqe, 24, hdr);
3783 
3784 	print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
3785 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3786 	if (post_sq)
3787 		irdma_sc_cqp_post_sq(cqp);
3788 
3789 	return 0;
3790 }
3791 
3792 /**
3793  * irdma_sc_process_ceq - process ceq
3794  * @dev: sc device struct
3795  * @ceq: ceq sc structure
3796  *
3797  * It is expected caller serializes this function with cleanup_ceqes()
3798  * because these functions manipulate the same ceq
3799  */
3800 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
3801 {
3802 	u64 temp;
3803 	__le64 *ceqe;
3804 	struct irdma_sc_cq *cq = NULL;
3805 	struct irdma_sc_cq *temp_cq;
3806 	u8 polarity;
3807 	u32 cq_idx;
3808 	unsigned long flags;
3809 
3810 	do {
3811 		cq_idx = 0;
3812 		ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
3813 		get_64bit_val(ceqe, 0, &temp);
3814 		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3815 		if (polarity != ceq->polarity)
3816 			return NULL;
3817 
3818 		temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3819 		if (!temp_cq) {
3820 			cq_idx = IRDMA_INVALID_CQ_IDX;
3821 			IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3822 
3823 			if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3824 				ceq->polarity ^= 1;
3825 			continue;
3826 		}
3827 
3828 		cq = temp_cq;
3829 		if (ceq->reg_cq) {
3830 			spin_lock_irqsave(&ceq->req_cq_lock, flags);
3831 			cq_idx = irdma_sc_find_reg_cq(ceq, cq);
3832 			spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3833 		}
3834 
3835 		IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3836 		if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3837 			ceq->polarity ^= 1;
3838 	} while (cq_idx == IRDMA_INVALID_CQ_IDX);
3839 
3840 	if (cq)
3841 		irdma_sc_cq_ack(cq);
3842 	return cq;
3843 }
3844 
3845 /**
3846  * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
3847  * @cq: cq for which the ceqes need to be cleaned up
3848  * @ceq: ceq ptr
3849  *
3850  * The function is called after the cq is destroyed to cleanup
3851  * its pending ceqe entries. It is expected caller serializes this
3852  * function with process_ceq() in interrupt context.
3853  */
3854 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
3855 {
3856 	struct irdma_sc_cq *next_cq;
3857 	u8 ceq_polarity = ceq->polarity;
3858 	__le64 *ceqe;
3859 	u8 polarity;
3860 	u64 temp;
3861 	int next;
3862 	u32 i;
3863 
3864 	next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
3865 
3866 	for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
3867 		ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
3868 
3869 		get_64bit_val(ceqe, 0, &temp);
3870 		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3871 		if (polarity != ceq_polarity)
3872 			return;
3873 
3874 		next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
3875 		if (cq == next_cq)
3876 			set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
3877 
3878 		next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
3879 		if (!next)
3880 			ceq_polarity ^= 1;
3881 	}
3882 }
3883 
3884 /**
3885  * irdma_sc_aeq_init - initialize aeq
3886  * @aeq: aeq structure ptr
3887  * @info: aeq initialization info
3888  */
3889 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
3890 		      struct irdma_aeq_init_info *info)
3891 {
3892 	u32 pble_obj_cnt;
3893 
3894 	if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
3895 	    info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
3896 		return -EINVAL;
3897 
3898 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3899 
3900 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3901 		return -EINVAL;
3902 
3903 	aeq->size = sizeof(*aeq);
3904 	aeq->polarity = 1;
3905 	aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
3906 	aeq->dev = info->dev;
3907 	aeq->elem_cnt = info->elem_cnt;
3908 	aeq->aeq_elem_pa = info->aeq_elem_pa;
3909 	IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
3910 	aeq->virtual_map = info->virtual_map;
3911 	aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
3912 	aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
3913 	aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
3914 	aeq->msix_idx = info->msix_idx;
3915 	info->dev->aeq = aeq;
3916 
3917 	return 0;
3918 }
3919 
3920 /**
3921  * irdma_sc_aeq_create - create aeq
3922  * @aeq: aeq structure ptr
3923  * @scratch: u64 saved to be used during cqp completion
3924  * @post_sq: flag for cqp db to ring
3925  */
3926 static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
3927 			       bool post_sq)
3928 {
3929 	__le64 *wqe;
3930 	struct irdma_sc_cqp *cqp;
3931 	u64 hdr;
3932 
3933 	cqp = aeq->dev->cqp;
3934 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3935 	if (!wqe)
3936 		return -ENOMEM;
3937 	set_64bit_val(wqe, 16, aeq->elem_cnt);
3938 	set_64bit_val(wqe, 32,
3939 		      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
3940 	set_64bit_val(wqe, 48,
3941 		      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
3942 
3943 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
3944 	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
3945 	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
3946 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3947 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3948 
3949 	set_64bit_val(wqe, 24, hdr);
3950 
3951 	print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
3952 			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3953 	if (post_sq)
3954 		irdma_sc_cqp_post_sq(cqp);
3955 
3956 	return 0;
3957 }
3958 
3959 /**
3960  * irdma_sc_aeq_destroy - destroy aeq during close
3961  * @aeq: aeq structure ptr
3962  * @scratch: u64 saved to be used during cqp completion
3963  * @post_sq: flag for cqp db to ring
3964  */
3965 static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
3966 				bool post_sq)
3967 {
3968 	__le64 *wqe;
3969 	struct irdma_sc_cqp *cqp;
3970 	struct irdma_sc_dev *dev;
3971 	u64 hdr;
3972 
3973 	dev = aeq->dev;
3974 	writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
3975 
3976 	cqp = dev->cqp;
3977 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3978 	if (!wqe)
3979 		return -ENOMEM;
3980 	set_64bit_val(wqe, 16, aeq->elem_cnt);
3981 	set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
3982 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
3983 	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
3984 	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
3985 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3986 	dma_wmb(); /* make sure WQE is written before valid bit is set */
3987 
3988 	set_64bit_val(wqe, 24, hdr);
3989 
3990 	print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
3991 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
3992 	if (post_sq)
3993 		irdma_sc_cqp_post_sq(cqp);
3994 	return 0;
3995 }
3996 
3997 /**
3998  * irdma_sc_get_next_aeqe - get next aeq entry
3999  * @aeq: aeq structure ptr
4000  * @info: aeqe info to be returned
4001  */
4002 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
4003 			   struct irdma_aeqe_info *info)
4004 {
4005 	u64 temp, compl_ctx;
4006 	__le64 *aeqe;
4007 	u16 wqe_idx;
4008 	u8 ae_src;
4009 	u8 polarity;
4010 
4011 	aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
4012 	get_64bit_val(aeqe, 0, &compl_ctx);
4013 	get_64bit_val(aeqe, 8, &temp);
4014 	polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
4015 
4016 	if (aeq->polarity != polarity)
4017 		return -ENOENT;
4018 
4019 	print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
4020 			     aeqe, 16, false);
4021 
4022 	ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
4023 	wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
4024 	info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
4025 			 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
4026 	info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
4027 	info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
4028 	info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
4029 	info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
4030 	info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
4031 
4032 	info->ae_src = ae_src;
4033 	switch (info->ae_id) {
4034 	case IRDMA_AE_PRIV_OPERATION_DENIED:
4035 	case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
4036 	case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
4037 	case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
4038 	case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
4039 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
4040 	case IRDMA_AE_UDA_XMIT_BAD_PD:
4041 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
4042 	case IRDMA_AE_BAD_CLOSE:
4043 	case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
4044 	case IRDMA_AE_STAG_ZERO_INVALID:
4045 	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
4046 	case IRDMA_AE_IB_INVALID_REQUEST:
4047 	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
4048 	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
4049 	case IRDMA_AE_IB_REMOTE_OP_ERROR:
4050 	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
4051 	case IRDMA_AE_DDP_UBE_INVALID_MO:
4052 	case IRDMA_AE_DDP_UBE_INVALID_QN:
4053 	case IRDMA_AE_DDP_NO_L_BIT:
4054 	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4055 	case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4056 	case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
4057 	case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
4058 	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
4059 	case IRDMA_AE_INVALID_ARP_ENTRY:
4060 	case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
4061 	case IRDMA_AE_STALE_ARP_ENTRY:
4062 	case IRDMA_AE_INVALID_AH_ENTRY:
4063 	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4064 	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
4065 	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
4066 	case IRDMA_AE_LLP_DOUBT_REACHABILITY:
4067 	case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
4068 	case IRDMA_AE_RESET_SENT:
4069 	case IRDMA_AE_TERMINATE_SENT:
4070 	case IRDMA_AE_RESET_NOT_SENT:
4071 	case IRDMA_AE_LCE_QP_CATASTROPHIC:
4072 	case IRDMA_AE_QP_SUSPEND_COMPLETE:
4073 	case IRDMA_AE_UDA_L4LEN_INVALID:
4074 		info->qp = true;
4075 		info->compl_ctx = compl_ctx;
4076 		break;
4077 	case IRDMA_AE_LCE_CQ_CATASTROPHIC:
4078 		info->cq = true;
4079 		info->compl_ctx = compl_ctx << 1;
4080 		ae_src = IRDMA_AE_SOURCE_RSVD;
4081 		break;
4082 	case IRDMA_AE_ROCE_EMPTY_MCG:
4083 	case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
4084 	case IRDMA_AE_ROCE_BAD_MC_QPID:
4085 	case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
4086 		fallthrough;
4087 	case IRDMA_AE_LLP_CONNECTION_RESET:
4088 	case IRDMA_AE_LLP_SYN_RECEIVED:
4089 	case IRDMA_AE_LLP_FIN_RECEIVED:
4090 	case IRDMA_AE_LLP_CLOSE_COMPLETE:
4091 	case IRDMA_AE_LLP_TERMINATE_RECEIVED:
4092 	case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
4093 		ae_src = IRDMA_AE_SOURCE_RSVD;
4094 		info->qp = true;
4095 		info->compl_ctx = compl_ctx;
4096 		break;
4097 	default:
4098 		break;
4099 	}
4100 
4101 	switch (ae_src) {
4102 	case IRDMA_AE_SOURCE_RQ:
4103 	case IRDMA_AE_SOURCE_RQ_0011:
4104 		info->qp = true;
4105 		info->rq = true;
4106 		info->wqe_idx = wqe_idx;
4107 		info->compl_ctx = compl_ctx;
4108 		break;
4109 	case IRDMA_AE_SOURCE_CQ:
4110 	case IRDMA_AE_SOURCE_CQ_0110:
4111 	case IRDMA_AE_SOURCE_CQ_1010:
4112 	case IRDMA_AE_SOURCE_CQ_1110:
4113 		info->cq = true;
4114 		info->compl_ctx = compl_ctx << 1;
4115 		break;
4116 	case IRDMA_AE_SOURCE_SQ:
4117 	case IRDMA_AE_SOURCE_SQ_0111:
4118 		info->qp = true;
4119 		info->sq = true;
4120 		info->wqe_idx = wqe_idx;
4121 		info->compl_ctx = compl_ctx;
4122 		break;
4123 	case IRDMA_AE_SOURCE_IN_RR_WR:
4124 	case IRDMA_AE_SOURCE_IN_RR_WR_1011:
4125 		info->qp = true;
4126 		info->compl_ctx = compl_ctx;
4127 		info->in_rdrsp_wr = true;
4128 		break;
4129 	case IRDMA_AE_SOURCE_OUT_RR:
4130 	case IRDMA_AE_SOURCE_OUT_RR_1111:
4131 		info->qp = true;
4132 		info->compl_ctx = compl_ctx;
4133 		info->out_rdrsp = true;
4134 		break;
4135 	case IRDMA_AE_SOURCE_RSVD:
4136 	default:
4137 		break;
4138 	}
4139 
4140 	IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
4141 	if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
4142 		aeq->polarity ^= 1;
4143 
4144 	return 0;
4145 }
4146 
4147 /**
4148  * irdma_sc_repost_aeq_entries - repost completed aeq entries
4149  * @dev: sc device struct
4150  * @count: allocate count
4151  */
4152 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
4153 {
4154 	writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
4155 }
4156 
4157 /**
4158  * irdma_sc_ccq_init - initialize control cq
4159  * @cq: sc's cq ctruct
4160  * @info: info for control cq initialization
4161  */
4162 int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
4163 {
4164 	u32 pble_obj_cnt;
4165 
4166 	if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
4167 	    info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
4168 		return -EINVAL;
4169 
4170 	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
4171 		return -EINVAL;
4172 
4173 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4174 
4175 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4176 		return -EINVAL;
4177 
4178 	cq->cq_pa = info->cq_pa;
4179 	cq->cq_uk.cq_base = info->cq_base;
4180 	cq->shadow_area_pa = info->shadow_area_pa;
4181 	cq->cq_uk.shadow_area = info->shadow_area;
4182 	cq->shadow_read_threshold = info->shadow_read_threshold;
4183 	cq->dev = info->dev;
4184 	cq->ceq_id = info->ceq_id;
4185 	cq->cq_uk.cq_size = info->num_elem;
4186 	cq->cq_type = IRDMA_CQ_TYPE_CQP;
4187 	cq->ceqe_mask = info->ceqe_mask;
4188 	IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
4189 	cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
4190 	cq->ceq_id_valid = info->ceq_id_valid;
4191 	cq->tph_en = info->tph_en;
4192 	cq->tph_val = info->tph_val;
4193 	cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
4194 	cq->pbl_list = info->pbl_list;
4195 	cq->virtual_map = info->virtual_map;
4196 	cq->pbl_chunk_size = info->pbl_chunk_size;
4197 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
4198 	cq->cq_uk.polarity = true;
4199 	cq->vsi = info->vsi;
4200 	cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
4201 
4202 	/* Only applicable to CQs other than CCQ so initialize to zero */
4203 	cq->cq_uk.cqe_alloc_db = NULL;
4204 
4205 	info->dev->ccq = cq;
4206 	return 0;
4207 }
4208 
4209 /**
4210  * irdma_sc_ccq_create_done - poll cqp for ccq create
4211  * @ccq: ccq sc struct
4212  */
4213 static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
4214 {
4215 	struct irdma_sc_cqp *cqp;
4216 
4217 	cqp = ccq->dev->cqp;
4218 
4219 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
4220 }
4221 
4222 /**
4223  * irdma_sc_ccq_create - create control cq
4224  * @ccq: ccq sc struct
4225  * @scratch: u64 saved to be used during cqp completion
4226  * @check_overflow: overlow flag for ccq
4227  * @post_sq: flag for cqp db to ring
4228  */
4229 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
4230 			bool check_overflow, bool post_sq)
4231 {
4232 	int ret_code;
4233 
4234 	ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
4235 	if (ret_code)
4236 		return ret_code;
4237 
4238 	if (post_sq) {
4239 		ret_code = irdma_sc_ccq_create_done(ccq);
4240 		if (ret_code)
4241 			return ret_code;
4242 	}
4243 	ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
4244 
4245 	return 0;
4246 }
4247 
4248 /**
4249  * irdma_sc_ccq_destroy - destroy ccq during close
4250  * @ccq: ccq sc struct
4251  * @scratch: u64 saved to be used during cqp completion
4252  * @post_sq: flag for cqp db to ring
4253  */
4254 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
4255 {
4256 	struct irdma_sc_cqp *cqp;
4257 	__le64 *wqe;
4258 	u64 hdr;
4259 	int ret_code = 0;
4260 	u32 tail, val, error;
4261 
4262 	cqp = ccq->dev->cqp;
4263 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4264 	if (!wqe)
4265 		return -ENOMEM;
4266 
4267 	set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
4268 	set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
4269 	set_64bit_val(wqe, 40, ccq->shadow_area_pa);
4270 
4271 	hdr = ccq->cq_uk.cq_id |
4272 	      FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
4273 			IRDMA_CQPSQ_CQ_CEQID) |
4274 	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
4275 	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
4276 	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
4277 	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
4278 	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
4279 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4280 	dma_wmb(); /* make sure WQE is written before valid bit is set */
4281 
4282 	set_64bit_val(wqe, 24, hdr);
4283 
4284 	print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
4285 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4286 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4287 
4288 	if (post_sq) {
4289 		irdma_sc_cqp_post_sq(cqp);
4290 		ret_code = irdma_cqp_poll_registers(cqp, tail,
4291 						    cqp->dev->hw_attrs.max_done_count);
4292 	}
4293 
4294 	cqp->process_cqp_sds = irdma_update_sds_noccq;
4295 
4296 	return ret_code;
4297 }
4298 
4299 /**
4300  * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
4301  * @dev : ptr to irdma_dev struct
4302  * @hmc_fn_id: hmc function id
4303  */
4304 int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
4305 {
4306 	struct irdma_hmc_info *hmc_info;
4307 	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4308 	struct irdma_dma_mem query_fpm_mem;
4309 	int ret_code = 0;
4310 	u8 wait_type;
4311 
4312 	hmc_info = dev->hmc_info;
4313 	hmc_fpm_misc = &dev->hmc_fpm_misc;
4314 	query_fpm_mem.pa = dev->fpm_query_buf_pa;
4315 	query_fpm_mem.va = dev->fpm_query_buf;
4316 	hmc_info->hmc_fn_id = hmc_fn_id;
4317 	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4318 
4319 	ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4320 					  &query_fpm_mem, true, wait_type);
4321 	if (ret_code)
4322 		return ret_code;
4323 
4324 	/* parse the fpm_query_buf and fill hmc obj info */
4325 	ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
4326 						hmc_fpm_misc);
4327 
4328 	print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4329 			     8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
4330 			     false);
4331 	return ret_code;
4332 }
4333 
4334 /**
4335  * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
4336  * command and populates fpm base address in hmc_info
4337  * @dev : ptr to irdma_dev struct
4338  * @hmc_fn_id: hmc function id
4339  */
4340 static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
4341 {
4342 	struct irdma_hmc_info *hmc_info;
4343 	struct irdma_hmc_obj_info *obj_info;
4344 	__le64 *buf;
4345 	struct irdma_dma_mem commit_fpm_mem;
4346 	int ret_code = 0;
4347 	u8 wait_type;
4348 
4349 	hmc_info = dev->hmc_info;
4350 	obj_info = hmc_info->hmc_obj;
4351 	buf = dev->fpm_commit_buf;
4352 
4353 	set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
4354 	set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
4355 	set_64bit_val(buf, 16, (u64)0); /* RSRVD */
4356 	set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
4357 	set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
4358 	set_64bit_val(buf, 40, (u64)0); /* RSVD */
4359 	set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
4360 	set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
4361 	set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
4362 	set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
4363 	set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
4364 	set_64bit_val(buf, 88,
4365 		      (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
4366 	set_64bit_val(buf, 96,
4367 		      (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
4368 	set_64bit_val(buf, 104,
4369 		      (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
4370 	set_64bit_val(buf, 112,
4371 		      (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
4372 	set_64bit_val(buf, 120, (u64)0); /* RSVD */
4373 	set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
4374 	set_64bit_val(buf, 136,
4375 		      (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
4376 	set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
4377 	set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
4378 	set_64bit_val(buf, 160,
4379 		      (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
4380 	set_64bit_val(buf, 168,
4381 		      (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
4382 
4383 	commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
4384 	commit_fpm_mem.va = dev->fpm_commit_buf;
4385 
4386 	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4387 	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4388 			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4389 			     false);
4390 	ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4391 					   &commit_fpm_mem, true, wait_type);
4392 	if (!ret_code)
4393 		irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
4394 					      hmc_info->hmc_obj,
4395 					      &hmc_info->sd_table.sd_cnt);
4396 	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
4397 			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
4398 			     false);
4399 
4400 	return ret_code;
4401 }
4402 
4403 /**
4404  * cqp_sds_wqe_fill - fill cqp wqe doe sd
4405  * @cqp: struct for cqp hw
4406  * @info: sd info for wqe
4407  * @scratch: u64 saved to be used during cqp completion
4408  */
4409 static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
4410 			    struct irdma_update_sds_info *info, u64 scratch)
4411 {
4412 	u64 data;
4413 	u64 hdr;
4414 	__le64 *wqe;
4415 	int mem_entries, wqe_entries;
4416 	struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
4417 	u64 offset = 0;
4418 	u32 wqe_idx;
4419 
4420 	wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
4421 	if (!wqe)
4422 		return -ENOMEM;
4423 
4424 	wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
4425 	mem_entries = info->cnt - wqe_entries;
4426 
4427 	if (mem_entries) {
4428 		offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
4429 		memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
4430 
4431 		data = (u64)sdbuf->pa + offset;
4432 	} else {
4433 		data = 0;
4434 	}
4435 	data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
4436 	set_64bit_val(wqe, 16, data);
4437 
4438 	switch (wqe_entries) {
4439 	case 3:
4440 		set_64bit_val(wqe, 48,
4441 			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
4442 			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4443 
4444 		set_64bit_val(wqe, 56, info->entry[2].data);
4445 		fallthrough;
4446 	case 2:
4447 		set_64bit_val(wqe, 32,
4448 			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
4449 			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4450 
4451 		set_64bit_val(wqe, 40, info->entry[1].data);
4452 		fallthrough;
4453 	case 1:
4454 		set_64bit_val(wqe, 0,
4455 			      FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
4456 
4457 		set_64bit_val(wqe, 8, info->entry[0].data);
4458 		break;
4459 	default:
4460 		break;
4461 	}
4462 
4463 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
4464 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
4465 	      FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
4466 	dma_wmb(); /* make sure WQE is written before valid bit is set */
4467 
4468 	set_64bit_val(wqe, 24, hdr);
4469 
4470 	if (mem_entries)
4471 		print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
4472 				     DUMP_PREFIX_OFFSET, 16, 8,
4473 				     (char *)sdbuf->va + offset,
4474 				     mem_entries << 4, false);
4475 
4476 	print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
4477 			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4478 
4479 	return 0;
4480 }
4481 
4482 /**
4483  * irdma_update_pe_sds - cqp wqe for sd
4484  * @dev: ptr to irdma_dev struct
4485  * @info: sd info for sd's
4486  * @scratch: u64 saved to be used during cqp completion
4487  */
4488 static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
4489 			       struct irdma_update_sds_info *info, u64 scratch)
4490 {
4491 	struct irdma_sc_cqp *cqp = dev->cqp;
4492 	int ret_code;
4493 
4494 	ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4495 	if (!ret_code)
4496 		irdma_sc_cqp_post_sq(cqp);
4497 
4498 	return ret_code;
4499 }
4500 
4501 /**
4502  * irdma_update_sds_noccq - update sd before ccq created
4503  * @dev: sc device struct
4504  * @info: sd info for sd's
4505  */
4506 int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
4507 			   struct irdma_update_sds_info *info)
4508 {
4509 	u32 error, val, tail;
4510 	struct irdma_sc_cqp *cqp = dev->cqp;
4511 	int ret_code;
4512 
4513 	ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4514 	if (ret_code)
4515 		return ret_code;
4516 
4517 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4518 
4519 	irdma_sc_cqp_post_sq(cqp);
4520 	return irdma_cqp_poll_registers(cqp, tail,
4521 					cqp->dev->hw_attrs.max_done_count);
4522 }
4523 
4524 /**
4525  * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
4526  * @cqp: struct for cqp hw
4527  * @scratch: u64 saved to be used during cqp completion
4528  * @hmc_fn_id: hmc function id
4529  * @post_sq: flag for cqp db to ring
4530  * @poll_registers: flag to poll register for cqp completion
4531  */
4532 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
4533 					u8 hmc_fn_id, bool post_sq,
4534 					bool poll_registers)
4535 {
4536 	u64 hdr;
4537 	__le64 *wqe;
4538 	u32 tail, val, error;
4539 
4540 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4541 	if (!wqe)
4542 		return -ENOMEM;
4543 
4544 	set_64bit_val(wqe, 16,
4545 		      FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
4546 
4547 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
4548 			 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
4549 	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4550 	dma_wmb(); /* make sure WQE is written before valid bit is set */
4551 
4552 	set_64bit_val(wqe, 24, hdr);
4553 
4554 	print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
4555 			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
4556 			     IRDMA_CQP_WQE_SIZE * 8, false);
4557 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4558 
4559 	if (post_sq) {
4560 		irdma_sc_cqp_post_sq(cqp);
4561 		if (poll_registers)
4562 			/* check for cqp sq tail update */
4563 			return irdma_cqp_poll_registers(cqp, tail,
4564 							cqp->dev->hw_attrs.max_done_count);
4565 		else
4566 			return irdma_sc_poll_for_cqp_op_done(cqp,
4567 							     IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
4568 							     NULL);
4569 	}
4570 
4571 	return 0;
4572 }
4573 
4574 /**
4575  * irdma_cqp_ring_full - check if cqp ring is full
4576  * @cqp: struct for cqp hw
4577  */
4578 static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
4579 {
4580 	return IRDMA_RING_FULL_ERR(cqp->sq_ring);
4581 }
4582 
4583 /**
4584  * irdma_est_sd - returns approximate number of SDs for HMC
4585  * @dev: sc device struct
4586  * @hmc_info: hmc structure, size and count for HMC objects
4587  */
4588 static u32 irdma_est_sd(struct irdma_sc_dev *dev,
4589 			struct irdma_hmc_info *hmc_info)
4590 {
4591 	int i;
4592 	u64 size = 0;
4593 	u64 sd;
4594 
4595 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4596 		if (i != IRDMA_HMC_IW_PBLE)
4597 			size += round_up(hmc_info->hmc_obj[i].cnt *
4598 					 hmc_info->hmc_obj[i].size, 512);
4599 	size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
4600 			 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
4601 	if (size & 0x1FFFFF)
4602 		sd = (size >> 21) + 1; /* add 1 for remainder */
4603 	else
4604 		sd = size >> 21;
4605 	if (sd > 0xFFFFFFFF) {
4606 		ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
4607 		sd = 0xFFFFFFFF - 1;
4608 	}
4609 
4610 	return (u32)sd;
4611 }
4612 
4613 /**
4614  * irdma_sc_query_rdma_features_done - poll cqp for query features done
4615  * @cqp: struct for cqp hw
4616  */
4617 static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
4618 {
4619 	return irdma_sc_poll_for_cqp_op_done(cqp,
4620 					     IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
4621 					     NULL);
4622 }
4623 
4624 /**
4625  * irdma_sc_query_rdma_features - query RDMA features and FW ver
4626  * @cqp: struct for cqp hw
4627  * @buf: buffer to hold query info
4628  * @scratch: u64 saved to be used during cqp completion
4629  */
4630 static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
4631 					struct irdma_dma_mem *buf, u64 scratch)
4632 {
4633 	__le64 *wqe;
4634 	u64 temp;
4635 
4636 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4637 	if (!wqe)
4638 		return -ENOMEM;
4639 
4640 	temp = buf->pa;
4641 	set_64bit_val(wqe, 32, temp);
4642 
4643 	temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
4644 			  cqp->polarity) |
4645 	       FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
4646 	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
4647 	dma_wmb(); /* make sure WQE is written before valid bit is set */
4648 
4649 	set_64bit_val(wqe, 24, temp);
4650 
4651 	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4652 			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
4653 	irdma_sc_cqp_post_sq(cqp);
4654 
4655 	return 0;
4656 }
4657 
4658 /**
4659  * irdma_get_rdma_features - get RDMA features
4660  * @dev: sc device struct
4661  */
4662 int irdma_get_rdma_features(struct irdma_sc_dev *dev)
4663 {
4664 	int ret_code;
4665 	struct irdma_dma_mem feat_buf;
4666 	u64 temp;
4667 	u16 byte_idx, feat_type, feat_cnt, feat_idx;
4668 
4669 	feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
4670 			      IRDMA_FEATURE_BUF_ALIGNMENT);
4671 	feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
4672 					 &feat_buf.pa, GFP_KERNEL);
4673 	if (!feat_buf.va)
4674 		return -ENOMEM;
4675 
4676 	ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4677 	if (!ret_code)
4678 		ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4679 	if (ret_code)
4680 		goto exit;
4681 
4682 	get_64bit_val(feat_buf.va, 0, &temp);
4683 	feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4684 	if (feat_cnt < 2) {
4685 		ret_code = -EINVAL;
4686 		goto exit;
4687 	} else if (feat_cnt > IRDMA_MAX_FEATURES) {
4688 		ibdev_dbg(to_ibdev(dev),
4689 			  "DEV: feature buf size insufficient, retrying with larger buffer\n");
4690 		dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4691 				  feat_buf.pa);
4692 		feat_buf.va = NULL;
4693 		feat_buf.size = ALIGN(8 * feat_cnt,
4694 				      IRDMA_FEATURE_BUF_ALIGNMENT);
4695 		feat_buf.va = dma_alloc_coherent(dev->hw->device,
4696 						 feat_buf.size, &feat_buf.pa,
4697 						 GFP_KERNEL);
4698 		if (!feat_buf.va)
4699 			return -ENOMEM;
4700 
4701 		ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4702 		if (!ret_code)
4703 			ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
4704 		if (ret_code)
4705 			goto exit;
4706 
4707 		get_64bit_val(feat_buf.va, 0, &temp);
4708 		feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4709 		if (feat_cnt < 2) {
4710 			ret_code = -EINVAL;
4711 			goto exit;
4712 		}
4713 	}
4714 
4715 	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
4716 			     16, 8, feat_buf.va, feat_cnt * 8, false);
4717 
4718 	for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
4719 	     feat_idx++, byte_idx += 8) {
4720 		get_64bit_val(feat_buf.va, byte_idx, &temp);
4721 		feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
4722 		if (feat_type >= IRDMA_MAX_FEATURES) {
4723 			ibdev_dbg(to_ibdev(dev),
4724 				  "DEV: found unrecognized feature type %d\n",
4725 				  feat_type);
4726 			continue;
4727 		}
4728 		dev->feature_info[feat_type] = temp;
4729 	}
4730 exit:
4731 	dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
4732 			  feat_buf.pa);
4733 	feat_buf.va = NULL;
4734 	return ret_code;
4735 }
4736 
4737 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
4738 			struct irdma_hmc_info *hmc_info, u32 qpwanted)
4739 {
4740 	u32 q1_cnt;
4741 
4742 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
4743 		q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
4744 	} else {
4745 		if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
4746 			q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
4747 		else
4748 			q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
4749 	}
4750 
4751 	return q1_cnt;
4752 }
4753 
4754 static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
4755 				struct irdma_hmc_info *hmc_info, u32 qpwanted)
4756 {
4757 	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
4758 }
4759 
4760 static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
4761 				struct irdma_hmc_info *hmc_info, u32 qpwanted)
4762 {
4763 	struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
4764 
4765 	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
4766 		4 * hmc_fpm_misc->xf_block_size * qpwanted;
4767 
4768 	hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
4769 
4770 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
4771 		hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
4772 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
4773 		hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
4774 			hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
4775 			hmc_fpm_misc->rrf_block_size;
4776 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
4777 		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
4778 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
4779 		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
4780 			hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
4781 			hmc_fpm_misc->ooiscf_block_size;
4782 }
4783 
4784 /**
4785  * irdma_cfg_fpm_val - configure HMC objects
4786  * @dev: sc device struct
4787  * @qp_count: desired qp count
4788  */
4789 int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
4790 {
4791 	struct irdma_virt_mem virt_mem;
4792 	u32 i, mem_size;
4793 	u32 qpwanted, mrwanted, pblewanted;
4794 	u32 powerof2, hte;
4795 	u32 sd_needed;
4796 	u32 sd_diff;
4797 	u32 loop_count = 0;
4798 	struct irdma_hmc_info *hmc_info;
4799 	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4800 	int ret_code = 0;
4801 
4802 	hmc_info = dev->hmc_info;
4803 	hmc_fpm_misc = &dev->hmc_fpm_misc;
4804 
4805 	ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
4806 	if (ret_code) {
4807 		ibdev_dbg(to_ibdev(dev),
4808 			  "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
4809 			  ret_code);
4810 		return ret_code;
4811 	}
4812 
4813 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4814 		hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
4815 	sd_needed = irdma_est_sd(dev, hmc_info);
4816 	ibdev_dbg(to_ibdev(dev),
4817 		  "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
4818 		  sd_needed, hmc_info->first_sd_index);
4819 	ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
4820 		  hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
4821 
4822 	qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
4823 
4824 	powerof2 = 1;
4825 	while (powerof2 <= qpwanted)
4826 		powerof2 *= 2;
4827 	powerof2 /= 2;
4828 	qpwanted = powerof2;
4829 
4830 	mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
4831 	pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
4832 
4833 	ibdev_dbg(to_ibdev(dev),
4834 		  "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
4835 		  qp_count, hmc_fpm_misc->max_sds,
4836 		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
4837 		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
4838 		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
4839 		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
4840 		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
4841 		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
4842 	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
4843 		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
4844 	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
4845 		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
4846 	hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
4847 		hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
4848 
4849 	hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
4850 
4851 	while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
4852 		qpwanted /= 2;
4853 
4854 	do {
4855 		++loop_count;
4856 		hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
4857 		hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
4858 			min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
4859 		hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
4860 		hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
4861 
4862 		hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
4863 		powerof2 = 1;
4864 		while (powerof2 < hte)
4865 			powerof2 *= 2;
4866 		hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
4867 			powerof2 * hmc_fpm_misc->ht_multiplier;
4868 		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
4869 			cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
4870 		else
4871 			cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
4872 
4873 		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
4874 		hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
4875 			hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
4876 		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
4877 			hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
4878 		hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
4879 			(round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
4880 
4881 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4882 		sd_needed = irdma_est_sd(dev, hmc_info);
4883 		ibdev_dbg(to_ibdev(dev),
4884 			  "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
4885 			  sd_needed, hmc_fpm_misc->max_sds, mrwanted,
4886 			  pblewanted, qpwanted);
4887 
4888 		/* Do not reduce resources further. All objects fit with max SDs */
4889 		if (sd_needed <= hmc_fpm_misc->max_sds)
4890 			break;
4891 
4892 		sd_diff = sd_needed - hmc_fpm_misc->max_sds;
4893 		if (sd_diff > 128) {
4894 			if (!(loop_count % 2) && qpwanted > 128) {
4895 				qpwanted /= 2;
4896 			} else {
4897 				mrwanted /= 2;
4898 				pblewanted /= 2;
4899 			}
4900 			continue;
4901 		}
4902 		if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
4903 		    pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
4904 			pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
4905 			continue;
4906 		} else if (pblewanted > (100 * FPM_MULTIPLIER)) {
4907 			pblewanted -= 10 * FPM_MULTIPLIER;
4908 		} else if (pblewanted > FPM_MULTIPLIER) {
4909 			pblewanted -= FPM_MULTIPLIER;
4910 		} else if (qpwanted <= 128) {
4911 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
4912 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
4913 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4914 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4915 		}
4916 		if (mrwanted > FPM_MULTIPLIER)
4917 			mrwanted -= FPM_MULTIPLIER;
4918 		if (!(loop_count % 10) && qpwanted > 128) {
4919 			qpwanted /= 2;
4920 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
4921 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
4922 		}
4923 	} while (loop_count < 2000);
4924 
4925 	if (sd_needed > hmc_fpm_misc->max_sds) {
4926 		ibdev_dbg(to_ibdev(dev),
4927 			  "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
4928 			  loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
4929 		return -EINVAL;
4930 	}
4931 
4932 	if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
4933 		pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
4934 			      FPM_MULTIPLIER;
4935 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
4936 		sd_needed = irdma_est_sd(dev, hmc_info);
4937 	}
4938 
4939 	ibdev_dbg(to_ibdev(dev),
4940 		  "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
4941 		  loop_count, sd_needed,
4942 		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
4943 		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
4944 		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
4945 		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
4946 		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
4947 		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
4948 		  hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
4949 
4950 	ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
4951 	if (ret_code) {
4952 		ibdev_dbg(to_ibdev(dev),
4953 			  "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
4954 			  readl(dev->hw_regs[IRDMA_CQPERRCODES]));
4955 		return ret_code;
4956 	}
4957 
4958 	mem_size = sizeof(struct irdma_hmc_sd_entry) *
4959 		   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
4960 	virt_mem.size = mem_size;
4961 	virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
4962 	if (!virt_mem.va) {
4963 		ibdev_dbg(to_ibdev(dev),
4964 			  "HMC: failed to allocate memory for sd_entry buffer\n");
4965 		return -ENOMEM;
4966 	}
4967 	hmc_info->sd_table.sd_entry = virt_mem.va;
4968 
4969 	return ret_code;
4970 }
4971 
4972 /**
4973  * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
4974  * @dev: rdma device
4975  * @pcmdinfo: cqp command info
4976  */
4977 static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
4978 			      struct cqp_cmds_info *pcmdinfo)
4979 {
4980 	int status;
4981 	struct irdma_dma_mem val_mem;
4982 	bool alloc = false;
4983 
4984 	dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
4985 	switch (pcmdinfo->cqp_cmd) {
4986 	case IRDMA_OP_CEQ_DESTROY:
4987 		status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
4988 					      pcmdinfo->in.u.ceq_destroy.scratch,
4989 					      pcmdinfo->post_sq);
4990 		break;
4991 	case IRDMA_OP_AEQ_DESTROY:
4992 		status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
4993 					      pcmdinfo->in.u.aeq_destroy.scratch,
4994 					      pcmdinfo->post_sq);
4995 
4996 		break;
4997 	case IRDMA_OP_CEQ_CREATE:
4998 		status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
4999 					     pcmdinfo->in.u.ceq_create.scratch,
5000 					     pcmdinfo->post_sq);
5001 		break;
5002 	case IRDMA_OP_AEQ_CREATE:
5003 		status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
5004 					     pcmdinfo->in.u.aeq_create.scratch,
5005 					     pcmdinfo->post_sq);
5006 		break;
5007 	case IRDMA_OP_QP_UPLOAD_CONTEXT:
5008 		status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
5009 						    &pcmdinfo->in.u.qp_upload_context.info,
5010 						    pcmdinfo->in.u.qp_upload_context.scratch,
5011 						    pcmdinfo->post_sq);
5012 		break;
5013 	case IRDMA_OP_CQ_CREATE:
5014 		status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
5015 					    pcmdinfo->in.u.cq_create.scratch,
5016 					    pcmdinfo->in.u.cq_create.check_overflow,
5017 					    pcmdinfo->post_sq);
5018 		break;
5019 	case IRDMA_OP_CQ_MODIFY:
5020 		status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
5021 					    &pcmdinfo->in.u.cq_modify.info,
5022 					    pcmdinfo->in.u.cq_modify.scratch,
5023 					    pcmdinfo->post_sq);
5024 		break;
5025 	case IRDMA_OP_CQ_DESTROY:
5026 		status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
5027 					     pcmdinfo->in.u.cq_destroy.scratch,
5028 					     pcmdinfo->post_sq);
5029 		break;
5030 	case IRDMA_OP_QP_FLUSH_WQES:
5031 		status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
5032 						&pcmdinfo->in.u.qp_flush_wqes.info,
5033 						pcmdinfo->in.u.qp_flush_wqes.scratch,
5034 						pcmdinfo->post_sq);
5035 		break;
5036 	case IRDMA_OP_GEN_AE:
5037 		status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
5038 					 &pcmdinfo->in.u.gen_ae.info,
5039 					 pcmdinfo->in.u.gen_ae.scratch,
5040 					 pcmdinfo->post_sq);
5041 		break;
5042 	case IRDMA_OP_MANAGE_PUSH_PAGE:
5043 		status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
5044 						   &pcmdinfo->in.u.manage_push_page.info,
5045 						   pcmdinfo->in.u.manage_push_page.scratch,
5046 						   pcmdinfo->post_sq);
5047 		break;
5048 	case IRDMA_OP_UPDATE_PE_SDS:
5049 		status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
5050 					     &pcmdinfo->in.u.update_pe_sds.info,
5051 					     pcmdinfo->in.u.update_pe_sds.scratch);
5052 		break;
5053 	case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
5054 		/* switch to calling through the call table */
5055 		status =
5056 			irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
5057 							  &pcmdinfo->in.u.manage_hmc_pm.info,
5058 							  pcmdinfo->in.u.manage_hmc_pm.scratch,
5059 							  true);
5060 		break;
5061 	case IRDMA_OP_SUSPEND:
5062 		status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
5063 					     pcmdinfo->in.u.suspend_resume.qp,
5064 					     pcmdinfo->in.u.suspend_resume.scratch);
5065 		break;
5066 	case IRDMA_OP_RESUME:
5067 		status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
5068 					    pcmdinfo->in.u.suspend_resume.qp,
5069 					    pcmdinfo->in.u.suspend_resume.scratch);
5070 		break;
5071 	case IRDMA_OP_QUERY_FPM_VAL:
5072 		val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
5073 		val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
5074 		status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
5075 						pcmdinfo->in.u.query_fpm_val.scratch,
5076 						pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
5077 						&val_mem, true, IRDMA_CQP_WAIT_EVENT);
5078 		break;
5079 	case IRDMA_OP_COMMIT_FPM_VAL:
5080 		val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
5081 		val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
5082 		status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
5083 						 pcmdinfo->in.u.commit_fpm_val.scratch,
5084 						 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
5085 						 &val_mem,
5086 						 true,
5087 						 IRDMA_CQP_WAIT_EVENT);
5088 		break;
5089 	case IRDMA_OP_STATS_ALLOCATE:
5090 		alloc = true;
5091 		fallthrough;
5092 	case IRDMA_OP_STATS_FREE:
5093 		status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
5094 						    &pcmdinfo->in.u.stats_manage.info,
5095 						    alloc,
5096 						    pcmdinfo->in.u.stats_manage.scratch);
5097 		break;
5098 	case IRDMA_OP_STATS_GATHER:
5099 		status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
5100 					       &pcmdinfo->in.u.stats_gather.info,
5101 					       pcmdinfo->in.u.stats_gather.scratch);
5102 		break;
5103 	case IRDMA_OP_WS_MODIFY_NODE:
5104 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5105 						 &pcmdinfo->in.u.ws_node.info,
5106 						 IRDMA_MODIFY_NODE,
5107 						 pcmdinfo->in.u.ws_node.scratch);
5108 		break;
5109 	case IRDMA_OP_WS_DELETE_NODE:
5110 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5111 						 &pcmdinfo->in.u.ws_node.info,
5112 						 IRDMA_DEL_NODE,
5113 						 pcmdinfo->in.u.ws_node.scratch);
5114 		break;
5115 	case IRDMA_OP_WS_ADD_NODE:
5116 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5117 						 &pcmdinfo->in.u.ws_node.info,
5118 						 IRDMA_ADD_NODE,
5119 						 pcmdinfo->in.u.ws_node.scratch);
5120 		break;
5121 	case IRDMA_OP_SET_UP_MAP:
5122 		status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
5123 					     &pcmdinfo->in.u.up_map.info,
5124 					     pcmdinfo->in.u.up_map.scratch);
5125 		break;
5126 	case IRDMA_OP_QUERY_RDMA_FEATURES:
5127 		status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
5128 						      &pcmdinfo->in.u.query_rdma.query_buff_mem,
5129 						      pcmdinfo->in.u.query_rdma.scratch);
5130 		break;
5131 	case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
5132 		status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
5133 						      pcmdinfo->in.u.del_arp_cache_entry.scratch,
5134 						      pcmdinfo->in.u.del_arp_cache_entry.arp_index,
5135 						      pcmdinfo->post_sq);
5136 		break;
5137 	case IRDMA_OP_MANAGE_APBVT_ENTRY:
5138 		status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
5139 						     &pcmdinfo->in.u.manage_apbvt_entry.info,
5140 						     pcmdinfo->in.u.manage_apbvt_entry.scratch,
5141 						     pcmdinfo->post_sq);
5142 		break;
5143 	case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
5144 		status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
5145 							   &pcmdinfo->in.u.manage_qhash_table_entry.info,
5146 							   pcmdinfo->in.u.manage_qhash_table_entry.scratch,
5147 							   pcmdinfo->post_sq);
5148 		break;
5149 	case IRDMA_OP_QP_MODIFY:
5150 		status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
5151 					    &pcmdinfo->in.u.qp_modify.info,
5152 					    pcmdinfo->in.u.qp_modify.scratch,
5153 					    pcmdinfo->post_sq);
5154 		break;
5155 	case IRDMA_OP_QP_CREATE:
5156 		status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
5157 					    &pcmdinfo->in.u.qp_create.info,
5158 					    pcmdinfo->in.u.qp_create.scratch,
5159 					    pcmdinfo->post_sq);
5160 		break;
5161 	case IRDMA_OP_QP_DESTROY:
5162 		status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
5163 					     pcmdinfo->in.u.qp_destroy.scratch,
5164 					     pcmdinfo->in.u.qp_destroy.remove_hash_idx,
5165 					     pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
5166 					     pcmdinfo->post_sq);
5167 		break;
5168 	case IRDMA_OP_ALLOC_STAG:
5169 		status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
5170 					     &pcmdinfo->in.u.alloc_stag.info,
5171 					     pcmdinfo->in.u.alloc_stag.scratch,
5172 					     pcmdinfo->post_sq);
5173 		break;
5174 	case IRDMA_OP_MR_REG_NON_SHARED:
5175 		status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
5176 						    &pcmdinfo->in.u.mr_reg_non_shared.info,
5177 						    pcmdinfo->in.u.mr_reg_non_shared.scratch,
5178 						    pcmdinfo->post_sq);
5179 		break;
5180 	case IRDMA_OP_DEALLOC_STAG:
5181 		status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
5182 					       &pcmdinfo->in.u.dealloc_stag.info,
5183 					       pcmdinfo->in.u.dealloc_stag.scratch,
5184 					       pcmdinfo->post_sq);
5185 		break;
5186 	case IRDMA_OP_MW_ALLOC:
5187 		status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
5188 					   &pcmdinfo->in.u.mw_alloc.info,
5189 					   pcmdinfo->in.u.mw_alloc.scratch,
5190 					   pcmdinfo->post_sq);
5191 		break;
5192 	case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
5193 		status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
5194 						      &pcmdinfo->in.u.add_arp_cache_entry.info,
5195 						      pcmdinfo->in.u.add_arp_cache_entry.scratch,
5196 						      pcmdinfo->post_sq);
5197 		break;
5198 	case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
5199 		status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
5200 							pcmdinfo->in.u.alloc_local_mac_entry.scratch,
5201 							pcmdinfo->post_sq);
5202 		break;
5203 	case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
5204 		status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
5205 						      &pcmdinfo->in.u.add_local_mac_entry.info,
5206 						      pcmdinfo->in.u.add_local_mac_entry.scratch,
5207 						      pcmdinfo->post_sq);
5208 		break;
5209 	case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
5210 		status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
5211 						      pcmdinfo->in.u.del_local_mac_entry.scratch,
5212 						      pcmdinfo->in.u.del_local_mac_entry.entry_idx,
5213 						      pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
5214 						      pcmdinfo->post_sq);
5215 		break;
5216 	case IRDMA_OP_AH_CREATE:
5217 		status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
5218 					    &pcmdinfo->in.u.ah_create.info,
5219 					    pcmdinfo->in.u.ah_create.scratch);
5220 		break;
5221 	case IRDMA_OP_AH_DESTROY:
5222 		status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
5223 					     &pcmdinfo->in.u.ah_destroy.info,
5224 					     pcmdinfo->in.u.ah_destroy.scratch);
5225 		break;
5226 	case IRDMA_OP_MC_CREATE:
5227 		status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
5228 						   &pcmdinfo->in.u.mc_create.info,
5229 						   pcmdinfo->in.u.mc_create.scratch);
5230 		break;
5231 	case IRDMA_OP_MC_DESTROY:
5232 		status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
5233 						    &pcmdinfo->in.u.mc_destroy.info,
5234 						    pcmdinfo->in.u.mc_destroy.scratch);
5235 		break;
5236 	case IRDMA_OP_MC_MODIFY:
5237 		status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
5238 						   &pcmdinfo->in.u.mc_modify.info,
5239 						   pcmdinfo->in.u.mc_modify.scratch);
5240 		break;
5241 	default:
5242 		status = -EOPNOTSUPP;
5243 		break;
5244 	}
5245 
5246 	return status;
5247 }
5248 
5249 /**
5250  * irdma_process_cqp_cmd - process all cqp commands
5251  * @dev: sc device struct
5252  * @pcmdinfo: cqp command info
5253  */
5254 int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
5255 			  struct cqp_cmds_info *pcmdinfo)
5256 {
5257 	int status = 0;
5258 	unsigned long flags;
5259 
5260 	spin_lock_irqsave(&dev->cqp_lock, flags);
5261 	if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
5262 		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5263 	else
5264 		list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
5265 	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5266 	return status;
5267 }
5268 
5269 /**
5270  * irdma_process_bh - called from tasklet for cqp list
5271  * @dev: sc device struct
5272  */
5273 int irdma_process_bh(struct irdma_sc_dev *dev)
5274 {
5275 	int status = 0;
5276 	struct cqp_cmds_info *pcmdinfo;
5277 	unsigned long flags;
5278 
5279 	spin_lock_irqsave(&dev->cqp_lock, flags);
5280 	while (!list_empty(&dev->cqp_cmd_head) &&
5281 	       !irdma_cqp_ring_full(dev->cqp)) {
5282 		pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
5283 		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5284 		if (status)
5285 			break;
5286 	}
5287 	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5288 	return status;
5289 }
5290 
5291 /**
5292  * irdma_cfg_aeq- Configure AEQ interrupt
5293  * @dev: pointer to the device structure
5294  * @idx: vector index
5295  * @enable: True to enable, False disables
5296  */
5297 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
5298 {
5299 	u32 reg_val;
5300 
5301 	reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
5302 		  FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
5303 		  FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
5304 	writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
5305 }
5306 
5307 /**
5308  * sc_vsi_update_stats - Update statistics
5309  * @vsi: sc_vsi instance to update
5310  */
5311 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
5312 {
5313 	struct irdma_gather_stats *gather_stats;
5314 	struct irdma_gather_stats *last_gather_stats;
5315 
5316 	gather_stats = vsi->pestat->gather_info.gather_stats_va;
5317 	last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
5318 	irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
5319 			   last_gather_stats, vsi->dev->hw_stats_map,
5320 			   vsi->dev->hw_attrs.max_stat_idx);
5321 }
5322 
5323 /**
5324  * irdma_wait_pe_ready - Check if firmware is ready
5325  * @dev: provides access to registers
5326  */
5327 static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
5328 {
5329 	u32 statuscpu0;
5330 	u32 statuscpu1;
5331 	u32 statuscpu2;
5332 	u32 retrycount = 0;
5333 
5334 	do {
5335 		statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
5336 		statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
5337 		statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
5338 		if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
5339 		    statuscpu2 == 0x80)
5340 			return 0;
5341 		mdelay(1000);
5342 	} while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
5343 	return -1;
5344 }
5345 
5346 static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
5347 {
5348 	switch (dev->hw_attrs.uk_attrs.hw_rev) {
5349 	case IRDMA_GEN_1:
5350 		i40iw_init_hw(dev);
5351 		break;
5352 	case IRDMA_GEN_2:
5353 		icrdma_init_hw(dev);
5354 		break;
5355 	}
5356 }
5357 
5358 /**
5359  * irdma_sc_dev_init - Initialize control part of device
5360  * @ver: version
5361  * @dev: Device pointer
5362  * @info: Device init info
5363  */
5364 int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
5365 		      struct irdma_device_init_info *info)
5366 {
5367 	u32 val;
5368 	int ret_code = 0;
5369 	u8 db_size;
5370 
5371 	INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
5372 	mutex_init(&dev->ws_mutex);
5373 	dev->hmc_fn_id = info->hmc_fn_id;
5374 	dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5375 	dev->fpm_query_buf = info->fpm_query_buf;
5376 	dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5377 	dev->fpm_commit_buf = info->fpm_commit_buf;
5378 	dev->hw = info->hw;
5379 	dev->hw->hw_addr = info->bar0;
5380 	/* Setup the hardware limits, hmc may limit further */
5381 	dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
5382 	dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
5383 	dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
5384 	dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
5385 	dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
5386 	dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
5387 	dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
5388 	dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
5389 	dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
5390 	dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
5391 	dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
5392 	dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
5393 	dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
5394 	dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
5395 	dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
5396 	dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
5397 
5398 	dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
5399 	dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
5400 	dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
5401 	dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
5402 
5403 	dev->hw_attrs.max_pe_ready_count = 14;
5404 	dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
5405 	dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
5406 	dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
5407 
5408 	dev->hw_attrs.uk_attrs.hw_rev = ver;
5409 	irdma_sc_init_hw(dev);
5410 
5411 	if (irdma_wait_pe_ready(dev))
5412 		return -ETIMEDOUT;
5413 
5414 	val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
5415 	db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
5416 	if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
5417 		ibdev_dbg(to_ibdev(dev),
5418 			  "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
5419 			  val, db_size);
5420 		return -ENODEV;
5421 	}
5422 	dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
5423 
5424 	return ret_code;
5425 }
5426 
5427 /**
5428  * irdma_stat_val - Extract HW counter value from statistics buffer
5429  * @stats_val: pointer to statistics buffer
5430  * @byteoff: byte offset of counter value in the buffer (8B-aligned)
5431  * @bitoff: bit offset of counter value within 8B entry
5432  * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
5433  */
5434 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff,
5435 				 u64 bitmask)
5436 {
5437 	u16 idx = byteoff / sizeof(*stats_val);
5438 
5439 	return (stats_val[idx] >> bitoff) & bitmask;
5440 }
5441 
5442 /**
5443  * irdma_stat_delta - Calculate counter delta
5444  * @new_val: updated counter value
5445  * @old_val: last counter value
5446  * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
5447  */
5448 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val)
5449 {
5450 	if (new_val >= old_val)
5451 		return new_val - old_val;
5452 
5453 	/* roll-over case */
5454 	return max_val - old_val + new_val + 1;
5455 }
5456 
5457 /**
5458  * irdma_update_stats - Update statistics
5459  * @hw_stats: hw_stats instance to update
5460  * @gather_stats: updated stat counters
5461  * @last_gather_stats: last stat counters
5462  * @map: HW stat map (hw_stats => gather_stats)
5463  * @max_stat_idx: number of HW stats
5464  */
5465 void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
5466 			struct irdma_gather_stats *gather_stats,
5467 			struct irdma_gather_stats *last_gather_stats,
5468 			const struct irdma_hw_stat_map *map, u16 max_stat_idx)
5469 {
5470 	u64 *stats_val = hw_stats->stats_val;
5471 	u16 i;
5472 
5473 	for (i = 0; i < max_stat_idx; i++) {
5474 		u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
5475 					     map[i].bitoff, map[i].bitmask);
5476 		u64 last_val = irdma_stat_val(last_gather_stats->val,
5477 					      map[i].byteoff, map[i].bitoff,
5478 					      map[i].bitmask);
5479 
5480 		stats_val[i] +=
5481 			irdma_stat_delta(new_val, last_val, map[i].bitmask);
5482 	}
5483 
5484 	memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
5485 }
5486