xref: /freebsd/sys/dev/bnxt/bnxt_re/ib_verbs.c (revision 84d7ec4c657f406c6cbd29baf32c8e057b663d17)
1acd884deSSumit Saxena /*
2acd884deSSumit Saxena  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3acd884deSSumit Saxena  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4acd884deSSumit Saxena  *
5acd884deSSumit Saxena  * Redistribution and use in source and binary forms, with or without
6acd884deSSumit Saxena  * modification, are permitted provided that the following conditions
7acd884deSSumit Saxena  * are met:
8acd884deSSumit Saxena  *
9acd884deSSumit Saxena  * 1. Redistributions of source code must retain the above copyright
10acd884deSSumit Saxena  *    notice, this list of conditions and the following disclaimer.
11acd884deSSumit Saxena  * 2. Redistributions in binary form must reproduce the above copyright
12acd884deSSumit Saxena  *    notice, this list of conditions and the following disclaimer in
13acd884deSSumit Saxena  *    the documentation and/or other materials provided with the
14acd884deSSumit Saxena  *    distribution.
15acd884deSSumit Saxena  *
16acd884deSSumit Saxena  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17acd884deSSumit Saxena  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18acd884deSSumit Saxena  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19acd884deSSumit Saxena  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20acd884deSSumit Saxena  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21acd884deSSumit Saxena  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22acd884deSSumit Saxena  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23acd884deSSumit Saxena  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24acd884deSSumit Saxena  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25acd884deSSumit Saxena  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26acd884deSSumit Saxena  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27acd884deSSumit Saxena  *
28acd884deSSumit Saxena  * Description: IB Verbs interpreter
29acd884deSSumit Saxena  */
30acd884deSSumit Saxena 
31acd884deSSumit Saxena #include <linux/if_ether.h>
32acd884deSSumit Saxena #include <linux/etherdevice.h>
33acd884deSSumit Saxena #include <rdma/uverbs_ioctl.h>
34acd884deSSumit Saxena 
35acd884deSSumit Saxena #include "bnxt_re.h"
36acd884deSSumit Saxena #include "ib_verbs.h"
37acd884deSSumit Saxena 
38acd884deSSumit Saxena static inline
get_ib_umem_sgl(struct ib_umem * umem,u32 * nmap)39acd884deSSumit Saxena struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap)
40acd884deSSumit Saxena {
41acd884deSSumit Saxena 
42acd884deSSumit Saxena 	*nmap = umem->nmap;
43acd884deSSumit Saxena 	return umem->sg_head.sgl;
44acd884deSSumit Saxena }
45acd884deSSumit Saxena 
bnxt_re_peer_mem_release(struct ib_umem * umem)46acd884deSSumit Saxena static inline void bnxt_re_peer_mem_release(struct ib_umem *umem)
47acd884deSSumit Saxena {
48acd884deSSumit Saxena 	dev_dbg(NULL, "ib_umem_release getting invoked \n");
49acd884deSSumit Saxena 	ib_umem_release(umem);
50acd884deSSumit Saxena }
51acd884deSSumit Saxena 
bnxt_re_resolve_dmac_task(struct work_struct * work)52acd884deSSumit Saxena void bnxt_re_resolve_dmac_task(struct work_struct *work)
53acd884deSSumit Saxena {
54acd884deSSumit Saxena 	int rc = -1;
55acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
56acd884deSSumit Saxena 	struct ib_ah_attr	*ah_attr;
57acd884deSSumit Saxena 	struct bnxt_re_resolve_dmac_work *dmac_work =
58acd884deSSumit Saxena 			container_of(work, struct bnxt_re_resolve_dmac_work, work);
59acd884deSSumit Saxena 
60acd884deSSumit Saxena 	rdev = dmac_work->rdev;
61acd884deSSumit Saxena 	ah_attr = dmac_work->ah_attr;
62acd884deSSumit Saxena 	rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr);
63acd884deSSumit Saxena 	if (rc)
64acd884deSSumit Saxena 		dev_err(rdev_to_dev(dmac_work->rdev),
65acd884deSSumit Saxena 			"Failed to resolve dest mac rc = %d\n", rc);
66acd884deSSumit Saxena 	atomic_set(&dmac_work->status_wait, rc << 8);
67acd884deSSumit Saxena }
68acd884deSSumit Saxena 
__from_ib_access_flags(int iflags)69acd884deSSumit Saxena static int __from_ib_access_flags(int iflags)
70acd884deSSumit Saxena {
71acd884deSSumit Saxena 	int qflags = 0;
72acd884deSSumit Saxena 
73acd884deSSumit Saxena 	if (iflags & IB_ACCESS_LOCAL_WRITE)
74acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
75acd884deSSumit Saxena 	if (iflags & IB_ACCESS_REMOTE_READ)
76acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
77acd884deSSumit Saxena 	if (iflags & IB_ACCESS_REMOTE_WRITE)
78acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
79acd884deSSumit Saxena 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
80acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
81acd884deSSumit Saxena 	if (iflags & IB_ACCESS_MW_BIND)
82acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
83acd884deSSumit Saxena 	if (iflags & IB_ZERO_BASED)
84acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
85acd884deSSumit Saxena 	if (iflags & IB_ACCESS_ON_DEMAND)
86acd884deSSumit Saxena 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
87acd884deSSumit Saxena 	return qflags;
88acd884deSSumit Saxena };
89acd884deSSumit Saxena 
__to_ib_access_flags(int qflags)90acd884deSSumit Saxena static enum ib_access_flags __to_ib_access_flags(int qflags)
91acd884deSSumit Saxena {
92acd884deSSumit Saxena 	enum ib_access_flags iflags = 0;
93acd884deSSumit Saxena 
94acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
95acd884deSSumit Saxena 		iflags |= IB_ACCESS_LOCAL_WRITE;
96acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
97acd884deSSumit Saxena 		iflags |= IB_ACCESS_REMOTE_WRITE;
98acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
99acd884deSSumit Saxena 		iflags |= IB_ACCESS_REMOTE_READ;
100acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
101acd884deSSumit Saxena 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
102acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
103acd884deSSumit Saxena 		iflags |= IB_ACCESS_MW_BIND;
104acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
105acd884deSSumit Saxena 		iflags |= IB_ZERO_BASED;
106acd884deSSumit Saxena 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
107acd884deSSumit Saxena 		iflags |= IB_ACCESS_ON_DEMAND;
108acd884deSSumit Saxena 	return iflags;
109acd884deSSumit Saxena };
110acd884deSSumit Saxena 
bnxt_re_copy_to_udata(struct bnxt_re_dev * rdev,void * data,int len,struct ib_udata * udata)111acd884deSSumit Saxena static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data,
112acd884deSSumit Saxena 				 int len, struct ib_udata *udata)
113acd884deSSumit Saxena {
114acd884deSSumit Saxena 	int rc;
115acd884deSSumit Saxena 
116acd884deSSumit Saxena 	rc = ib_copy_to_udata(udata, data, len);
117acd884deSSumit Saxena 	if (rc)
118acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
119acd884deSSumit Saxena 			"ucontext copy failed from %ps rc %d\n",
120acd884deSSumit Saxena 			__builtin_return_address(0), rc);
121acd884deSSumit Saxena 
122acd884deSSumit Saxena 	return rc;
123acd884deSSumit Saxena }
124acd884deSSumit Saxena 
bnxt_re_get_netdev(struct ib_device * ibdev,u8 port_num)125acd884deSSumit Saxena struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
126acd884deSSumit Saxena 				 u8 port_num)
127acd884deSSumit Saxena {
128acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
129acd884deSSumit Saxena 	struct ifnet *netdev = NULL;
130acd884deSSumit Saxena 
131acd884deSSumit Saxena 	rcu_read_lock();
132acd884deSSumit Saxena 
133acd884deSSumit Saxena 	if (!rdev || !rdev->netdev)
134acd884deSSumit Saxena 		goto end;
135acd884deSSumit Saxena 
136acd884deSSumit Saxena 	netdev = rdev->netdev;
137acd884deSSumit Saxena 
138acd884deSSumit Saxena 	/* In case of active-backup bond mode, return active slave */
139acd884deSSumit Saxena 	if (netdev)
140acd884deSSumit Saxena 		dev_hold(netdev);
141acd884deSSumit Saxena 
142acd884deSSumit Saxena end:
143acd884deSSumit Saxena 	rcu_read_unlock();
144acd884deSSumit Saxena 	return netdev;
145acd884deSSumit Saxena }
146acd884deSSumit Saxena 
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)147acd884deSSumit Saxena int bnxt_re_query_device(struct ib_device *ibdev,
148acd884deSSumit Saxena 			 struct ib_device_attr *ib_attr,
149acd884deSSumit Saxena 			 struct ib_udata *udata)
150acd884deSSumit Saxena {
151acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
152acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
153acd884deSSumit Saxena 
154acd884deSSumit Saxena 	memset(ib_attr, 0, sizeof(*ib_attr));
155acd884deSSumit Saxena 
156acd884deSSumit Saxena 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 4);
157acd884deSSumit Saxena 	bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid);
158acd884deSSumit Saxena 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
159acd884deSSumit Saxena 	ib_attr->page_size_cap = dev_attr->page_size_cap;
160acd884deSSumit Saxena 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
161acd884deSSumit Saxena 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
162acd884deSSumit Saxena 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
163acd884deSSumit Saxena 	ib_attr->max_qp = dev_attr->max_qp;
164acd884deSSumit Saxena 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
165acd884deSSumit Saxena 	/*
166acd884deSSumit Saxena 	 * Read and set from the module param 'min_tx_depth'
167acd884deSSumit Saxena 	 * only once after the driver load
168acd884deSSumit Saxena 	 */
169acd884deSSumit Saxena 	if (rdev->min_tx_depth == 1 &&
170acd884deSSumit Saxena 	    min_tx_depth < dev_attr->max_qp_wqes)
171acd884deSSumit Saxena 		rdev->min_tx_depth = min_tx_depth;
172acd884deSSumit Saxena 	ib_attr->device_cap_flags =
173acd884deSSumit Saxena 				    IB_DEVICE_CURR_QP_STATE_MOD
174acd884deSSumit Saxena 				    | IB_DEVICE_RC_RNR_NAK_GEN
175acd884deSSumit Saxena 				    | IB_DEVICE_SHUTDOWN_PORT
176acd884deSSumit Saxena 				    | IB_DEVICE_SYS_IMAGE_GUID
177acd884deSSumit Saxena 				    | IB_DEVICE_LOCAL_DMA_LKEY
178acd884deSSumit Saxena 				    | IB_DEVICE_RESIZE_MAX_WR
179acd884deSSumit Saxena 				    | IB_DEVICE_PORT_ACTIVE_EVENT
180acd884deSSumit Saxena 				    | IB_DEVICE_N_NOTIFY_CQ
181acd884deSSumit Saxena 				    | IB_DEVICE_MEM_WINDOW
182acd884deSSumit Saxena 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
183acd884deSSumit Saxena 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
184acd884deSSumit Saxena 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
185acd884deSSumit Saxena 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
186acd884deSSumit Saxena 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
187acd884deSSumit Saxena 	ib_attr->max_cq = dev_attr->max_cq;
188acd884deSSumit Saxena 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
189acd884deSSumit Saxena 	ib_attr->max_mr = dev_attr->max_mr;
190acd884deSSumit Saxena 	ib_attr->max_pd = dev_attr->max_pd;
191acd884deSSumit Saxena 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
192acd884deSSumit Saxena 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
193acd884deSSumit Saxena 	if (dev_attr->is_atomic) {
194acd884deSSumit Saxena 		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
195acd884deSSumit Saxena 		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
196acd884deSSumit Saxena 	}
197acd884deSSumit Saxena 	ib_attr->max_ee_rd_atom = 0;
198acd884deSSumit Saxena 	ib_attr->max_res_rd_atom = 0;
199acd884deSSumit Saxena 	ib_attr->max_ee_init_rd_atom = 0;
200acd884deSSumit Saxena 	ib_attr->max_ee = 0;
201acd884deSSumit Saxena 	ib_attr->max_rdd = 0;
202acd884deSSumit Saxena 	ib_attr->max_mw = dev_attr->max_mw;
203acd884deSSumit Saxena 	ib_attr->max_raw_ipv6_qp = 0;
204acd884deSSumit Saxena 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
205acd884deSSumit Saxena 	ib_attr->max_mcast_grp = 0;
206acd884deSSumit Saxena 	ib_attr->max_mcast_qp_attach = 0;
207acd884deSSumit Saxena 	ib_attr->max_total_mcast_qp_attach = 0;
208acd884deSSumit Saxena 	ib_attr->max_ah = dev_attr->max_ah;
209acd884deSSumit Saxena 	ib_attr->max_srq = dev_attr->max_srq;
210acd884deSSumit Saxena 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
211acd884deSSumit Saxena 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
212acd884deSSumit Saxena 
213acd884deSSumit Saxena 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
214acd884deSSumit Saxena 	ib_attr->max_pkeys = 1;
215acd884deSSumit Saxena 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
216acd884deSSumit Saxena 	ib_attr->sig_prot_cap = 0;
217acd884deSSumit Saxena 	ib_attr->sig_guard_cap = 0;
218acd884deSSumit Saxena 	ib_attr->odp_caps.general_caps = 0;
219acd884deSSumit Saxena 
220acd884deSSumit Saxena 	return 0;
221acd884deSSumit Saxena }
222acd884deSSumit Saxena 
bnxt_re_modify_device(struct ib_device * ibdev,int device_modify_mask,struct ib_device_modify * device_modify)223acd884deSSumit Saxena int bnxt_re_modify_device(struct ib_device *ibdev,
224acd884deSSumit Saxena 			  int device_modify_mask,
225acd884deSSumit Saxena 			  struct ib_device_modify *device_modify)
226acd884deSSumit Saxena {
227acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n",
228acd884deSSumit Saxena 		device_modify_mask);
229acd884deSSumit Saxena 
230acd884deSSumit Saxena 	switch (device_modify_mask) {
231acd884deSSumit Saxena 	case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
232acd884deSSumit Saxena 		/* Modify the GUID requires the modification of the GID table */
233acd884deSSumit Saxena 		/* GUID should be made as READ-ONLY */
234acd884deSSumit Saxena 		break;
235acd884deSSumit Saxena 	case IB_DEVICE_MODIFY_NODE_DESC:
236acd884deSSumit Saxena 		/* Node Desc should be made as READ-ONLY */
237acd884deSSumit Saxena 		break;
238acd884deSSumit Saxena 	default:
239acd884deSSumit Saxena 		break;
240acd884deSSumit Saxena 	}
241acd884deSSumit Saxena 	return 0;
242acd884deSSumit Saxena }
243acd884deSSumit Saxena 
__to_ib_speed_width(u32 espeed,u8 * speed,u8 * width)244acd884deSSumit Saxena static void __to_ib_speed_width(u32 espeed, u8 *speed, u8 *width)
245acd884deSSumit Saxena {
246acd884deSSumit Saxena 	switch (espeed) {
247acd884deSSumit Saxena 	case SPEED_1000:
248acd884deSSumit Saxena 		*speed = IB_SPEED_SDR;
249acd884deSSumit Saxena 		*width = IB_WIDTH_1X;
250acd884deSSumit Saxena 		break;
251acd884deSSumit Saxena 	case SPEED_10000:
252acd884deSSumit Saxena 		*speed = IB_SPEED_QDR;
253acd884deSSumit Saxena 		*width = IB_WIDTH_1X;
254acd884deSSumit Saxena 		break;
255acd884deSSumit Saxena 	case SPEED_20000:
256acd884deSSumit Saxena 		*speed = IB_SPEED_DDR;
257acd884deSSumit Saxena 		*width = IB_WIDTH_4X;
258acd884deSSumit Saxena 		break;
259acd884deSSumit Saxena 	case SPEED_25000:
260acd884deSSumit Saxena 		*speed = IB_SPEED_EDR;
261acd884deSSumit Saxena 		*width = IB_WIDTH_1X;
262acd884deSSumit Saxena 		break;
263acd884deSSumit Saxena 	case SPEED_40000:
264acd884deSSumit Saxena 		*speed = IB_SPEED_QDR;
265acd884deSSumit Saxena 		*width = IB_WIDTH_4X;
266acd884deSSumit Saxena 		break;
267acd884deSSumit Saxena 	case SPEED_50000:
268acd884deSSumit Saxena 		*speed = IB_SPEED_EDR;
269acd884deSSumit Saxena 		*width = IB_WIDTH_2X;
270acd884deSSumit Saxena 		break;
271acd884deSSumit Saxena 	case SPEED_100000:
272acd884deSSumit Saxena 		*speed = IB_SPEED_EDR;
273acd884deSSumit Saxena 		*width = IB_WIDTH_4X;
274acd884deSSumit Saxena 		break;
275acd884deSSumit Saxena 	case SPEED_200000:
276acd884deSSumit Saxena 		*speed = IB_SPEED_HDR;
277acd884deSSumit Saxena 		*width = IB_WIDTH_4X;
278acd884deSSumit Saxena 		break;
279acd884deSSumit Saxena 	default:
280acd884deSSumit Saxena 		*speed = IB_SPEED_SDR;
281acd884deSSumit Saxena 		*width = IB_WIDTH_1X;
282acd884deSSumit Saxena 		break;
283acd884deSSumit Saxena 	}
284acd884deSSumit Saxena }
285acd884deSSumit Saxena 
286acd884deSSumit Saxena /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u8 port_num,struct ib_port_attr * port_attr)287acd884deSSumit Saxena int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
288acd884deSSumit Saxena 		       struct ib_port_attr *port_attr)
289acd884deSSumit Saxena {
290acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
291acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
292acd884deSSumit Saxena 	u8 active_speed = 0, active_width = 0;
293acd884deSSumit Saxena 
294acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num);
295acd884deSSumit Saxena 	memset(port_attr, 0, sizeof(*port_attr));
296acd884deSSumit Saxena 
297acd884deSSumit Saxena 	port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
298acd884deSSumit Saxena 	port_attr->state = bnxt_re_get_link_state(rdev);
299acd884deSSumit Saxena 	if (port_attr->state == IB_PORT_ACTIVE)
300acd884deSSumit Saxena 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
301acd884deSSumit Saxena 	port_attr->max_mtu = IB_MTU_4096;
302*84d7ec4cSJustin Hibbits 	port_attr->active_mtu = iboe_get_mtu(if_getmtu(rdev->netdev));
303acd884deSSumit Saxena 	port_attr->gid_tbl_len = dev_attr->max_sgid;
304acd884deSSumit Saxena 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
305acd884deSSumit Saxena 				    IB_PORT_DEVICE_MGMT_SUP |
306acd884deSSumit Saxena 				    IB_PORT_VENDOR_CLASS_SUP |
307acd884deSSumit Saxena 				    IB_PORT_IP_BASED_GIDS;
308acd884deSSumit Saxena 
309acd884deSSumit Saxena 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
310acd884deSSumit Saxena 	port_attr->bad_pkey_cntr = 0;
311acd884deSSumit Saxena 	port_attr->qkey_viol_cntr = 0;
312acd884deSSumit Saxena 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
313acd884deSSumit Saxena 	port_attr->lid = 0;
314acd884deSSumit Saxena 	port_attr->sm_lid = 0;
315acd884deSSumit Saxena 	port_attr->lmc = 0;
316acd884deSSumit Saxena 	port_attr->max_vl_num = 4;
317acd884deSSumit Saxena 	port_attr->sm_sl = 0;
318acd884deSSumit Saxena 	port_attr->subnet_timeout = 0;
319acd884deSSumit Saxena 	port_attr->init_type_reply = 0;
320acd884deSSumit Saxena 	rdev->espeed = rdev->en_dev->espeed;
321acd884deSSumit Saxena 
322acd884deSSumit Saxena 	if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
323acd884deSSumit Saxena 		__to_ib_speed_width(rdev->espeed, &active_speed,
324acd884deSSumit Saxena 				    &active_width);
325acd884deSSumit Saxena 
326acd884deSSumit Saxena 	port_attr->active_speed = active_speed;
327acd884deSSumit Saxena 	port_attr->active_width = active_width;
328acd884deSSumit Saxena 
329acd884deSSumit Saxena 	return 0;
330acd884deSSumit Saxena }
331acd884deSSumit Saxena 
bnxt_re_modify_port(struct ib_device * ibdev,u8 port_num,int port_modify_mask,struct ib_port_modify * port_modify)332acd884deSSumit Saxena int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
333acd884deSSumit Saxena 			int port_modify_mask,
334acd884deSSumit Saxena 			struct ib_port_modify *port_modify)
335acd884deSSumit Saxena {
336acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n",
337acd884deSSumit Saxena 		port_modify_mask);
338acd884deSSumit Saxena 
339acd884deSSumit Saxena 	switch (port_modify_mask) {
340acd884deSSumit Saxena 	case IB_PORT_SHUTDOWN:
341acd884deSSumit Saxena 		break;
342acd884deSSumit Saxena 	case IB_PORT_INIT_TYPE:
343acd884deSSumit Saxena 		break;
344acd884deSSumit Saxena 	case IB_PORT_RESET_QKEY_CNTR:
345acd884deSSumit Saxena 		break;
346acd884deSSumit Saxena 	default:
347acd884deSSumit Saxena 		break;
348acd884deSSumit Saxena 	}
349acd884deSSumit Saxena 	return 0;
350acd884deSSumit Saxena }
351acd884deSSumit Saxena 
bnxt_re_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)352acd884deSSumit Saxena int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
353acd884deSSumit Saxena 			       struct ib_port_immutable *immutable)
354acd884deSSumit Saxena {
355acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
356acd884deSSumit Saxena 	struct ib_port_attr port_attr;
357acd884deSSumit Saxena 
358acd884deSSumit Saxena 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
359acd884deSSumit Saxena 		return -EINVAL;
360acd884deSSumit Saxena 
361acd884deSSumit Saxena 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
362acd884deSSumit Saxena 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
363acd884deSSumit Saxena 	if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP)
364acd884deSSumit Saxena 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
365acd884deSSumit Saxena 	else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP)
366acd884deSSumit Saxena 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
367acd884deSSumit Saxena 	else
368acd884deSSumit Saxena 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
369acd884deSSumit Saxena 					    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
370acd884deSSumit Saxena 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
371acd884deSSumit Saxena 	return 0;
372acd884deSSumit Saxena }
373acd884deSSumit Saxena 
bnxt_re_compat_qfwstr(void)374acd884deSSumit Saxena void bnxt_re_compat_qfwstr(void)
375acd884deSSumit Saxena {
376acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
377acd884deSSumit Saxena 
378acd884deSSumit Saxena 	sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0],
379acd884deSSumit Saxena 		rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2],
380acd884deSSumit Saxena 		rdev->dev_attr->fw_ver[3]);
381acd884deSSumit Saxena }
382acd884deSSumit Saxena 
bnxt_re_query_pkey(struct ib_device * ibdev,u8 port_num,u16 index,u16 * pkey)383acd884deSSumit Saxena int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
384acd884deSSumit Saxena 		       u16 index, u16 *pkey)
385acd884deSSumit Saxena {
386acd884deSSumit Saxena 	if (index > 0)
387acd884deSSumit Saxena 		return -EINVAL;
388acd884deSSumit Saxena 
389acd884deSSumit Saxena 	*pkey = IB_DEFAULT_PKEY_FULL;
390acd884deSSumit Saxena 
391acd884deSSumit Saxena 	return 0;
392acd884deSSumit Saxena }
393acd884deSSumit Saxena 
bnxt_re_query_gid(struct ib_device * ibdev,u8 port_num,int index,union ib_gid * gid)394acd884deSSumit Saxena int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
395acd884deSSumit Saxena 		      int index, union ib_gid *gid)
396acd884deSSumit Saxena {
397acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
398acd884deSSumit Saxena 	int rc = 0;
399acd884deSSumit Saxena 
400acd884deSSumit Saxena 	/* Ignore port_num */
401acd884deSSumit Saxena 	memset(gid, 0, sizeof(*gid));
402acd884deSSumit Saxena 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
403acd884deSSumit Saxena 				 &rdev->qplib_res.sgid_tbl, index,
404acd884deSSumit Saxena 				 (struct bnxt_qplib_gid *)gid);
405acd884deSSumit Saxena 	return rc;
406acd884deSSumit Saxena }
407acd884deSSumit Saxena 
bnxt_re_del_gid(struct ib_device * ibdev,u8 port_num,unsigned int index,void ** context)408acd884deSSumit Saxena int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
409acd884deSSumit Saxena 		    unsigned int index, void **context)
410acd884deSSumit Saxena {
411acd884deSSumit Saxena 	int rc = 0;
412acd884deSSumit Saxena 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
413acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
414acd884deSSumit Saxena 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
415acd884deSSumit Saxena 	struct bnxt_qplib_gid *gid_to_del;
416acd884deSSumit Saxena 	u16 vlan_id = 0xFFFF;
417acd884deSSumit Saxena 
418acd884deSSumit Saxena 	/* Delete the entry from the hardware */
419acd884deSSumit Saxena 	ctx = *context;
420acd884deSSumit Saxena 	if (!ctx) {
421acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!\n");
422acd884deSSumit Saxena 		return -EINVAL;
423acd884deSSumit Saxena 	}
424acd884deSSumit Saxena 	if (sgid_tbl && sgid_tbl->active) {
425acd884deSSumit Saxena 		if (ctx->idx >= sgid_tbl->max) {
426acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev), "GID index out of range?!\n");
427acd884deSSumit Saxena 			return -EINVAL;
428acd884deSSumit Saxena 		}
429acd884deSSumit Saxena 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
430acd884deSSumit Saxena 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
431acd884deSSumit Saxena 		ctx->refcnt--;
432acd884deSSumit Saxena 		/* DEL_GID is called via WQ context(netdevice_event_work_handler)
433acd884deSSumit Saxena 		 * or via the ib_unregister_device path. In the former case QP1
434acd884deSSumit Saxena 		 * may not be destroyed yet, in which case just return as FW
435acd884deSSumit Saxena 		 * needs that entry to be present and will fail it's deletion.
436acd884deSSumit Saxena 		 * We could get invoked again after QP1 is destroyed OR get an
437acd884deSSumit Saxena 		 * ADD_GID call with a different GID value for the same index
438acd884deSSumit Saxena 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
439acd884deSSumit Saxena 		 */
440acd884deSSumit Saxena 		if (ctx->idx == 0 &&
441acd884deSSumit Saxena 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
442acd884deSSumit Saxena 		    (rdev->gsi_ctx.gsi_sqp ||
443acd884deSSumit Saxena 		     rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) {
444acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
445acd884deSSumit Saxena 				"Trying to delete GID0 while QP1 is alive\n");
446acd884deSSumit Saxena 			if (!ctx->refcnt) {
447acd884deSSumit Saxena 				rdev->gid_map[index] = -1;
448acd884deSSumit Saxena 				ctx_tbl = sgid_tbl->ctx;
449acd884deSSumit Saxena 				ctx_tbl[ctx->idx] = NULL;
450acd884deSSumit Saxena 				kfree(ctx);
451acd884deSSumit Saxena 			}
452acd884deSSumit Saxena 			return 0;
453acd884deSSumit Saxena 		}
454acd884deSSumit Saxena 		rdev->gid_map[index] = -1;
455acd884deSSumit Saxena 		if (!ctx->refcnt) {
456acd884deSSumit Saxena 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
457acd884deSSumit Saxena 						 vlan_id, true);
458acd884deSSumit Saxena 			if (!rc) {
459acd884deSSumit Saxena 				dev_dbg(rdev_to_dev(rdev), "GID remove success\n");
460acd884deSSumit Saxena 				ctx_tbl = sgid_tbl->ctx;
461acd884deSSumit Saxena 				ctx_tbl[ctx->idx] = NULL;
462acd884deSSumit Saxena 				kfree(ctx);
463acd884deSSumit Saxena 			} else {
464acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
465acd884deSSumit Saxena 					"Remove GID failed rc = 0x%x\n", rc);
466acd884deSSumit Saxena 			}
467acd884deSSumit Saxena 		}
468acd884deSSumit Saxena 	} else {
469acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!\n");
470acd884deSSumit Saxena 		return -EINVAL;
471acd884deSSumit Saxena 	}
472acd884deSSumit Saxena 	return rc;
473acd884deSSumit Saxena }
474acd884deSSumit Saxena 
bnxt_re_add_gid(struct ib_device * ibdev,u8 port_num,unsigned int index,const union ib_gid * gid,const struct ib_gid_attr * attr,void ** context)475acd884deSSumit Saxena int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
476acd884deSSumit Saxena 		    unsigned int index, const union ib_gid *gid,
477acd884deSSumit Saxena 		    const struct ib_gid_attr *attr, void **context)
478acd884deSSumit Saxena {
479acd884deSSumit Saxena 	int rc;
480acd884deSSumit Saxena 	u32 tbl_idx = 0;
481acd884deSSumit Saxena 	u16 vlan_id = 0xFFFF;
482acd884deSSumit Saxena 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
483acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
484acd884deSSumit Saxena 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
485acd884deSSumit Saxena 	if ((attr->ndev) && is_vlan_dev(attr->ndev))
486acd884deSSumit Saxena 		vlan_id = vlan_dev_vlan_id(attr->ndev);
487acd884deSSumit Saxena 
488acd884deSSumit Saxena 	rc = bnxt_qplib_add_sgid(sgid_tbl, gid,
489acd884deSSumit Saxena 				 rdev->dev_addr,
490acd884deSSumit Saxena 				 vlan_id, true, &tbl_idx);
491acd884deSSumit Saxena 	if (rc == -EALREADY) {
492acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present\n", gid);
493acd884deSSumit Saxena 		ctx_tbl = sgid_tbl->ctx;
494acd884deSSumit Saxena 		if (!ctx_tbl[tbl_idx]) {
495acd884deSSumit Saxena 			ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
496acd884deSSumit Saxena 			if (!ctx)
497acd884deSSumit Saxena 				return -ENOMEM;
498acd884deSSumit Saxena 			ctx->idx = tbl_idx;
499acd884deSSumit Saxena 			ctx->refcnt = 1;
500acd884deSSumit Saxena 			ctx_tbl[tbl_idx] = ctx;
501acd884deSSumit Saxena 		} else {
502acd884deSSumit Saxena 			ctx_tbl[tbl_idx]->refcnt++;
503acd884deSSumit Saxena 		}
504acd884deSSumit Saxena 		*context = ctx_tbl[tbl_idx];
505acd884deSSumit Saxena 		/* tbl_idx is the HW table index and index is the stack index */
506acd884deSSumit Saxena 		rdev->gid_map[index] = tbl_idx;
507acd884deSSumit Saxena 		return 0;
508acd884deSSumit Saxena 	} else if (rc < 0) {
509acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc);
510acd884deSSumit Saxena 		return rc;
511acd884deSSumit Saxena 	} else {
512acd884deSSumit Saxena 		ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
513acd884deSSumit Saxena 		if (!ctx) {
514acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Add GID ctx failed\n");
515acd884deSSumit Saxena 			return -ENOMEM;
516acd884deSSumit Saxena 		}
517acd884deSSumit Saxena 		ctx_tbl = sgid_tbl->ctx;
518acd884deSSumit Saxena 		ctx->idx = tbl_idx;
519acd884deSSumit Saxena 		ctx->refcnt = 1;
520acd884deSSumit Saxena 		ctx_tbl[tbl_idx] = ctx;
521acd884deSSumit Saxena 		/* tbl_idx is the HW table index and index is the stack index */
522acd884deSSumit Saxena 		rdev->gid_map[index] = tbl_idx;
523acd884deSSumit Saxena 		*context = ctx;
524acd884deSSumit Saxena 	}
525acd884deSSumit Saxena 	return rc;
526acd884deSSumit Saxena }
527acd884deSSumit Saxena 
bnxt_re_get_link_layer(struct ib_device * ibdev,u8 port_num)528acd884deSSumit Saxena enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
529acd884deSSumit Saxena 					    u8 port_num)
530acd884deSSumit Saxena {
531acd884deSSumit Saxena 	return IB_LINK_LAYER_ETHERNET;
532acd884deSSumit Saxena }
533acd884deSSumit Saxena 
bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd * pd)534acd884deSSumit Saxena static void bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd *pd)
535acd884deSSumit Saxena {
536acd884deSSumit Saxena 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
537acd884deSSumit Saxena 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
538acd884deSSumit Saxena 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
539acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
540acd884deSSumit Saxena 
541acd884deSSumit Saxena 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
542acd884deSSumit Saxena 		return;
543acd884deSSumit Saxena 
544acd884deSSumit Saxena 	memset(wqe, 0, sizeof(*wqe));
545acd884deSSumit Saxena 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
546acd884deSSumit Saxena 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
547acd884deSSumit Saxena 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
548acd884deSSumit Saxena 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
549acd884deSSumit Saxena 	wqe->bind.zero_based = false;
550acd884deSSumit Saxena 	wqe->bind.parent_l_key = ib_mr->lkey;
551acd884deSSumit Saxena 	wqe->bind.va = (u64)fence->va;
552acd884deSSumit Saxena 	wqe->bind.length = fence->size;
553acd884deSSumit Saxena 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
554acd884deSSumit Saxena 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
555acd884deSSumit Saxena 
556acd884deSSumit Saxena 	/* Save the initial rkey in fence structure for now;
557acd884deSSumit Saxena 	 * wqe->bind.r_key will be set at (re)bind time.
558acd884deSSumit Saxena 	 */
559acd884deSSumit Saxena 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
560acd884deSSumit Saxena }
561acd884deSSumit Saxena 
bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)562acd884deSSumit Saxena static int bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
563acd884deSSumit Saxena {
564acd884deSSumit Saxena 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
565acd884deSSumit Saxena 					     qplib_qp);
566acd884deSSumit Saxena 	struct ib_pd *ib_pd = qp->ib_qp.pd;
567acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
568acd884deSSumit Saxena 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
569acd884deSSumit Saxena 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
570acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe;
571acd884deSSumit Saxena 	int rc;
572acd884deSSumit Saxena 
573acd884deSSumit Saxena 	/* TODO: Need SQ locking here when Fence WQE
574acd884deSSumit Saxena 	 * posting moves up into bnxt_re from bnxt_qplib.
575acd884deSSumit Saxena 	 */
576acd884deSSumit Saxena 	memcpy(&wqe, fence_wqe, sizeof(wqe));
577acd884deSSumit Saxena 	wqe.bind.r_key = fence->bind_rkey;
578acd884deSSumit Saxena 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
579acd884deSSumit Saxena 
580acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(qp->rdev),
581acd884deSSumit Saxena 		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
582acd884deSSumit Saxena 		wqe.bind.r_key, qp->qplib_qp.id, pd);
583acd884deSSumit Saxena 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
584acd884deSSumit Saxena 	if (rc) {
585acd884deSSumit Saxena 		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
586acd884deSSumit Saxena 		return rc;
587acd884deSSumit Saxena 	}
588acd884deSSumit Saxena 	bnxt_qplib_post_send_db(&qp->qplib_qp);
589acd884deSSumit Saxena 
590acd884deSSumit Saxena 	return rc;
591acd884deSSumit Saxena }
592acd884deSSumit Saxena 
bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd * pd)593acd884deSSumit Saxena static int bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd *pd)
594acd884deSSumit Saxena {
595acd884deSSumit Saxena 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
596acd884deSSumit Saxena 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
597acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
598acd884deSSumit Saxena 	struct bnxt_qplib_mrinfo mrinfo;
599acd884deSSumit Saxena 	struct bnxt_re_mr *mr = NULL;
600acd884deSSumit Saxena 	struct ib_mw *ib_mw = NULL;
601acd884deSSumit Saxena 	dma_addr_t dma_addr = 0;
602acd884deSSumit Saxena 	u32 max_mr_count;
603acd884deSSumit Saxena 	u64 pbl_tbl;
604acd884deSSumit Saxena 	int rc;
605acd884deSSumit Saxena 
606acd884deSSumit Saxena 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
607acd884deSSumit Saxena 		return 0;
608acd884deSSumit Saxena 
609acd884deSSumit Saxena 	memset(&mrinfo, 0, sizeof(mrinfo));
610acd884deSSumit Saxena 	/* Allocate a small chunk of memory and dma-map it */
611acd884deSSumit Saxena 	fence->va = kzalloc(BNXT_RE_LEGACY_FENCE_BYTES, GFP_KERNEL);
612acd884deSSumit Saxena 	if (!fence->va)
613acd884deSSumit Saxena 		return -ENOMEM;
614acd884deSSumit Saxena 	dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va,
615acd884deSSumit Saxena 				     BNXT_RE_LEGACY_FENCE_BYTES,
616acd884deSSumit Saxena 				     DMA_BIDIRECTIONAL);
617acd884deSSumit Saxena 	rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr);
618acd884deSSumit Saxena 	if (rc) {
619acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
620acd884deSSumit Saxena 		rc = -EIO;
621acd884deSSumit Saxena 		fence->dma_addr = 0;
622acd884deSSumit Saxena 		goto free_va;
623acd884deSSumit Saxena 	}
624acd884deSSumit Saxena 	fence->dma_addr = dma_addr;
625acd884deSSumit Saxena 
626acd884deSSumit Saxena 	/* Allocate a MR */
627acd884deSSumit Saxena 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
628acd884deSSumit Saxena 	if (!mr)
629acd884deSSumit Saxena 		goto free_dma_addr;
630acd884deSSumit Saxena 	fence->mr = mr;
631acd884deSSumit Saxena 	mr->rdev = rdev;
632acd884deSSumit Saxena 	mr->qplib_mr.pd = &pd->qplib_pd;
633acd884deSSumit Saxena 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
634acd884deSSumit Saxena 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
635acd884deSSumit Saxena 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
636acd884deSSumit Saxena 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
637acd884deSSumit Saxena 		if (rc) {
638acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
639acd884deSSumit Saxena 			goto free_mr;
640acd884deSSumit Saxena 		}
641acd884deSSumit Saxena 		/* Register MR */
642acd884deSSumit Saxena 		mr->ib_mr.lkey = mr->qplib_mr.lkey;
643acd884deSSumit Saxena 	}
644acd884deSSumit Saxena 	mr->qplib_mr.va         = (u64)fence->va;
645acd884deSSumit Saxena 	mr->qplib_mr.total_size = BNXT_RE_LEGACY_FENCE_BYTES;
646acd884deSSumit Saxena 	pbl_tbl = dma_addr;
647acd884deSSumit Saxena 
648acd884deSSumit Saxena 	mrinfo.mrw = &mr->qplib_mr;
649acd884deSSumit Saxena 	mrinfo.ptes = &pbl_tbl;
650acd884deSSumit Saxena 	mrinfo.sg.npages = BNXT_RE_LEGACY_FENCE_PBL_SIZE;
651acd884deSSumit Saxena 
652acd884deSSumit Saxena 	mrinfo.sg.nmap = 0;
653acd884deSSumit Saxena 	mrinfo.sg.sghead = 0;
654acd884deSSumit Saxena 	mrinfo.sg.pgshft = PAGE_SHIFT;
655acd884deSSumit Saxena 	mrinfo.sg.pgsize = PAGE_SIZE;
656acd884deSSumit Saxena 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
657acd884deSSumit Saxena 	if (rc) {
658acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
659acd884deSSumit Saxena 		goto free_mr;
660acd884deSSumit Saxena 	}
661acd884deSSumit Saxena 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
662acd884deSSumit Saxena 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
663acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.mr_count);
664acd884deSSumit Saxena 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
665acd884deSSumit Saxena 	if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count)))
666acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
667acd884deSSumit Saxena 
668acd884deSSumit Saxena 	ib_mw = bnxt_re_alloc_mw(&pd->ibpd, IB_MW_TYPE_1, NULL);
669acd884deSSumit Saxena 	/* Create a fence MW only for kernel consumers */
670acd884deSSumit Saxena 	if (!ib_mw) {
671acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
672acd884deSSumit Saxena 			"Failed to create fence-MW for PD: %p\n", pd);
673acd884deSSumit Saxena 		rc = -EINVAL;
674acd884deSSumit Saxena 		goto free_mr;
675acd884deSSumit Saxena 	}
676acd884deSSumit Saxena 	fence->mw = ib_mw;
677acd884deSSumit Saxena 
678acd884deSSumit Saxena 	bnxt_re_legacy_create_fence_wqe(pd);
679acd884deSSumit Saxena 	return 0;
680acd884deSSumit Saxena 
681acd884deSSumit Saxena free_mr:
682acd884deSSumit Saxena 	if (mr->ib_mr.lkey) {
683acd884deSSumit Saxena 		bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
684acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.mr_count);
685acd884deSSumit Saxena 	}
686acd884deSSumit Saxena 	kfree(mr);
687acd884deSSumit Saxena 	fence->mr = NULL;
688acd884deSSumit Saxena 
689acd884deSSumit Saxena free_dma_addr:
690acd884deSSumit Saxena 	ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
691acd884deSSumit Saxena 			    BNXT_RE_LEGACY_FENCE_BYTES, DMA_BIDIRECTIONAL);
692acd884deSSumit Saxena 	fence->dma_addr = 0;
693acd884deSSumit Saxena 
694acd884deSSumit Saxena free_va:
695acd884deSSumit Saxena 	kfree(fence->va);
696acd884deSSumit Saxena 	fence->va = NULL;
697acd884deSSumit Saxena 	return rc;
698acd884deSSumit Saxena }
699acd884deSSumit Saxena 
bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd * pd)700acd884deSSumit Saxena static void bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd *pd)
701acd884deSSumit Saxena {
702acd884deSSumit Saxena 	struct bnxt_re_legacy_fence_data *fence = &pd->fence;
703acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
704acd884deSSumit Saxena 	struct bnxt_re_mr *mr = fence->mr;
705acd884deSSumit Saxena 
706acd884deSSumit Saxena 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
707acd884deSSumit Saxena 		return;
708acd884deSSumit Saxena 
709acd884deSSumit Saxena 	if (fence->mw) {
710acd884deSSumit Saxena 		bnxt_re_dealloc_mw(fence->mw);
711acd884deSSumit Saxena 		fence->mw = NULL;
712acd884deSSumit Saxena 	}
713acd884deSSumit Saxena 	if (mr) {
714acd884deSSumit Saxena 		if (mr->ib_mr.rkey)
715acd884deSSumit Saxena 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
716acd884deSSumit Saxena 					     false);
717acd884deSSumit Saxena 		if (mr->ib_mr.lkey)
718acd884deSSumit Saxena 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
719acd884deSSumit Saxena 		kfree(mr);
720acd884deSSumit Saxena 		fence->mr = NULL;
721acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.mr_count);
722acd884deSSumit Saxena 	}
723acd884deSSumit Saxena 	if (fence->dma_addr) {
724acd884deSSumit Saxena 		ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
725acd884deSSumit Saxena 				    BNXT_RE_LEGACY_FENCE_BYTES,
726acd884deSSumit Saxena 				    DMA_BIDIRECTIONAL);
727acd884deSSumit Saxena 		fence->dma_addr = 0;
728acd884deSSumit Saxena 	}
729acd884deSSumit Saxena 	kfree(fence->va);
730acd884deSSumit Saxena 	fence->va = NULL;
731acd884deSSumit Saxena }
732acd884deSSumit Saxena 
733acd884deSSumit Saxena 
bnxt_re_get_user_dpi(struct bnxt_re_dev * rdev,struct bnxt_re_ucontext * cntx)734acd884deSSumit Saxena static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev,
735acd884deSSumit Saxena 				struct bnxt_re_ucontext *cntx)
736acd884deSSumit Saxena {
737acd884deSSumit Saxena 	struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
738acd884deSSumit Saxena 	int ret = 0;
739acd884deSSumit Saxena 	u8 type;
740acd884deSSumit Saxena 	/* Allocate DPI in alloc_pd or in create_cq to avoid failing of
741acd884deSSumit Saxena 	 * ibv_devinfo and family of application when DPIs are depleted.
742acd884deSSumit Saxena 	 */
743acd884deSSumit Saxena 	type = BNXT_QPLIB_DPI_TYPE_UC;
744acd884deSSumit Saxena 	ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type);
745acd884deSSumit Saxena 	if (ret) {
746acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!\n");
747acd884deSSumit Saxena 		goto out;
748acd884deSSumit Saxena 	}
749acd884deSSumit Saxena 
750acd884deSSumit Saxena 	if (cctx->modes.db_push) {
751acd884deSSumit Saxena 		type = BNXT_QPLIB_DPI_TYPE_WC;
752acd884deSSumit Saxena 		ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi,
753acd884deSSumit Saxena 					   cntx, type);
754acd884deSSumit Saxena 		if (ret)
755acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "push dp alloc failed\n");
756acd884deSSumit Saxena 	}
757acd884deSSumit Saxena out:
758acd884deSSumit Saxena 	return ret;
759acd884deSSumit Saxena }
760acd884deSSumit Saxena 
761acd884deSSumit Saxena /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)762acd884deSSumit Saxena void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
763acd884deSSumit Saxena {
764acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
765acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
766acd884deSSumit Saxena 	int rc;
767acd884deSSumit Saxena 
768acd884deSSumit Saxena 	bnxt_re_legacy_destroy_fence_mr(pd);
769acd884deSSumit Saxena 
770acd884deSSumit Saxena 	rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
771acd884deSSumit Saxena 				   &rdev->qplib_res.pd_tbl,
772acd884deSSumit Saxena 				   &pd->qplib_pd);
773acd884deSSumit Saxena 	if (rc)
774acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(rdev),
775acd884deSSumit Saxena 				    "%s failed rc = %d\n", __func__, rc);
776acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.pd_count);
777acd884deSSumit Saxena 
778acd884deSSumit Saxena 	return;
779acd884deSSumit Saxena }
780acd884deSSumit Saxena 
bnxt_re_alloc_pd(struct ib_pd * pd_in,struct ib_udata * udata)781acd884deSSumit Saxena int bnxt_re_alloc_pd(struct ib_pd *pd_in,
782acd884deSSumit Saxena 		     struct ib_udata *udata)
783acd884deSSumit Saxena {
784acd884deSSumit Saxena 	struct ib_pd *ibpd = pd_in;
785acd884deSSumit Saxena 	struct ib_device *ibdev = ibpd->device;
786acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
787acd884deSSumit Saxena 	struct bnxt_re_ucontext *ucntx =
788acd884deSSumit Saxena 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
789acd884deSSumit Saxena 					  ibucontext);
790acd884deSSumit Saxena 	u32 max_pd_count;
791acd884deSSumit Saxena 	int rc;
792acd884deSSumit Saxena 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ibpd);
793acd884deSSumit Saxena 
794acd884deSSumit Saxena 	pd->rdev = rdev;
795acd884deSSumit Saxena 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
796acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
797acd884deSSumit Saxena 			"Allocate HW Protection Domain failed!\n");
798acd884deSSumit Saxena 		rc = -ENOMEM;
799acd884deSSumit Saxena 		goto fail;
800acd884deSSumit Saxena 	}
801acd884deSSumit Saxena 
802acd884deSSumit Saxena 	if (udata) {
803acd884deSSumit Saxena 		struct bnxt_re_pd_resp resp = {};
804acd884deSSumit Saxena 
805acd884deSSumit Saxena 		if (!ucntx->dpi.dbr) {
806acd884deSSumit Saxena 			rc = bnxt_re_get_user_dpi(rdev, ucntx);
807acd884deSSumit Saxena 			if (rc)
808acd884deSSumit Saxena 				goto dbfail;
809acd884deSSumit Saxena 		}
810acd884deSSumit Saxena 
811acd884deSSumit Saxena 		resp.pdid = pd->qplib_pd.id;
812acd884deSSumit Saxena 		/* Still allow mapping this DBR to the new user PD. */
813acd884deSSumit Saxena 		resp.dpi = ucntx->dpi.dpi;
814acd884deSSumit Saxena 		resp.dbr = (u64)ucntx->dpi.umdbr;
815acd884deSSumit Saxena 		/* Copy only on a valid wcpdi */
816acd884deSSumit Saxena 		if (ucntx->wcdpi.dpi) {
817acd884deSSumit Saxena 			resp.wcdpi = ucntx->wcdpi.dpi;
818acd884deSSumit Saxena 			resp.comp_mask = BNXT_RE_COMP_MASK_PD_HAS_WC_DPI;
819acd884deSSumit Saxena 		}
820acd884deSSumit Saxena 		if (rdev->dbr_pacing) {
821acd884deSSumit Saxena 			WARN_ON(!rdev->dbr_bar_addr);
822acd884deSSumit Saxena 			resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr;
823acd884deSSumit Saxena 			resp.comp_mask |= BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR;
824acd884deSSumit Saxena 		}
825acd884deSSumit Saxena 
826acd884deSSumit Saxena 		rc = bnxt_re_copy_to_udata(rdev, &resp,
827acd884deSSumit Saxena 					   min(udata->outlen, sizeof(resp)),
828acd884deSSumit Saxena 					   udata);
829acd884deSSumit Saxena 		if (rc)
830acd884deSSumit Saxena 			goto dbfail;
831acd884deSSumit Saxena 	}
832acd884deSSumit Saxena 
833acd884deSSumit Saxena 	if (!udata)
834acd884deSSumit Saxena 		if (bnxt_re_legacy_create_fence_mr(pd))
835acd884deSSumit Saxena 			dev_warn(rdev_to_dev(rdev),
836acd884deSSumit Saxena 				 "Failed to create Fence-MR\n");
837acd884deSSumit Saxena 
838acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.pd_count);
839acd884deSSumit Saxena 	max_pd_count = atomic_read(&rdev->stats.rsors.pd_count);
840acd884deSSumit Saxena 	if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count))
841acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count);
842acd884deSSumit Saxena 
843acd884deSSumit Saxena 	return 0;
844acd884deSSumit Saxena dbfail:
845acd884deSSumit Saxena 	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
846acd884deSSumit Saxena 				    &pd->qplib_pd);
847acd884deSSumit Saxena fail:
848acd884deSSumit Saxena 	return rc;
849acd884deSSumit Saxena }
850acd884deSSumit Saxena 
851acd884deSSumit Saxena /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)852acd884deSSumit Saxena void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
853acd884deSSumit Saxena {
854acd884deSSumit Saxena 	struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
855acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = ah->rdev;
856acd884deSSumit Saxena 	int rc = 0;
857acd884deSSumit Saxena 	bool block = true;
858acd884deSSumit Saxena 
859acd884deSSumit Saxena 	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
860acd884deSSumit Saxena 
861acd884deSSumit Saxena 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
862acd884deSSumit Saxena 	if (rc)
863acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(rdev),
864acd884deSSumit Saxena 				   "%s id = %d blocking %d failed rc = %d\n",
865acd884deSSumit Saxena 				    __func__, ah->qplib_ah.id, block, rc);
866acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.ah_count);
867acd884deSSumit Saxena 
868acd884deSSumit Saxena 	return;
869acd884deSSumit Saxena }
870acd884deSSumit Saxena 
_to_bnxt_re_nw_type(enum rdma_network_type ntype)871acd884deSSumit Saxena static u8 _to_bnxt_re_nw_type(enum rdma_network_type ntype)
872acd884deSSumit Saxena {
873acd884deSSumit Saxena 	u8 nw_type;
874acd884deSSumit Saxena 	switch (ntype) {
875acd884deSSumit Saxena 		case RDMA_NETWORK_IPV4:
876acd884deSSumit Saxena 			nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
877acd884deSSumit Saxena 			break;
878acd884deSSumit Saxena 		case RDMA_NETWORK_IPV6:
879acd884deSSumit Saxena 			nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
880acd884deSSumit Saxena 			break;
881acd884deSSumit Saxena 		default:
882acd884deSSumit Saxena 			nw_type = CMDQ_CREATE_AH_TYPE_V1;
883acd884deSSumit Saxena 			break;
884acd884deSSumit Saxena 	}
885acd884deSSumit Saxena 	return nw_type;
886acd884deSSumit Saxena }
887acd884deSSumit Saxena 
888acd884deSSumit Saxena static inline int
bnxt_re_get_cached_gid(struct ib_device * dev,u8 port_num,int index,union ib_gid * sgid,struct ib_gid_attr ** sgid_attr,struct ib_global_route * grh,struct ib_ah * ah)889acd884deSSumit Saxena bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
890acd884deSSumit Saxena 		       union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
891acd884deSSumit Saxena 		       struct ib_global_route *grh, struct ib_ah *ah)
892acd884deSSumit Saxena {
893acd884deSSumit Saxena 	int ret = 0;
894acd884deSSumit Saxena 
895acd884deSSumit Saxena 	ret = ib_get_cached_gid(dev, port_num, index, sgid, *sgid_attr);
896acd884deSSumit Saxena 	return ret;
897acd884deSSumit Saxena }
898acd884deSSumit Saxena 
899acd884deSSumit Saxena static inline enum rdma_network_type
bnxt_re_gid_to_network_type(struct ib_gid_attr * sgid_attr,union ib_gid * sgid)900acd884deSSumit Saxena bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
901acd884deSSumit Saxena 			    union ib_gid *sgid)
902acd884deSSumit Saxena {
903acd884deSSumit Saxena 	return ib_gid_to_network_type(sgid_attr->gid_type, sgid);
904acd884deSSumit Saxena }
905acd884deSSumit Saxena 
bnxt_re_get_ah_info(struct bnxt_re_dev * rdev,struct ib_ah_attr * ah_attr,struct bnxt_re_ah_info * ah_info)906acd884deSSumit Saxena static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev,
907acd884deSSumit Saxena 			       struct ib_ah_attr *ah_attr,
908acd884deSSumit Saxena 			       struct bnxt_re_ah_info *ah_info)
909acd884deSSumit Saxena {
910acd884deSSumit Saxena 	struct ib_gid_attr *gattr;
911acd884deSSumit Saxena 	enum rdma_network_type ib_ntype;
912acd884deSSumit Saxena 	u8 ntype;
913acd884deSSumit Saxena 	union ib_gid *gid;
914acd884deSSumit Saxena 	int rc = 0;
915acd884deSSumit Saxena 
916acd884deSSumit Saxena 	gid = &ah_info->sgid;
917acd884deSSumit Saxena 	gattr = &ah_info->sgid_attr;
918acd884deSSumit Saxena 
919acd884deSSumit Saxena 	rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index,
920acd884deSSumit Saxena 				    gid, &gattr, &ah_attr->grh, NULL);
921acd884deSSumit Saxena 	if (rc)
922acd884deSSumit Saxena 		return rc;
923acd884deSSumit Saxena 
924acd884deSSumit Saxena 	/* Get vlan tag */
925acd884deSSumit Saxena 	if (gattr->ndev) {
926acd884deSSumit Saxena 		if (is_vlan_dev(gattr->ndev))
927acd884deSSumit Saxena 			ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev);
928acd884deSSumit Saxena 		if_rele(gattr->ndev);
929acd884deSSumit Saxena 	}
930acd884deSSumit Saxena 
931acd884deSSumit Saxena 	/* Get network header type for this GID */
932acd884deSSumit Saxena 
933acd884deSSumit Saxena 	ib_ntype = bnxt_re_gid_to_network_type(gattr, gid);
934acd884deSSumit Saxena 	ntype = _to_bnxt_re_nw_type(ib_ntype);
935acd884deSSumit Saxena 	ah_info->nw_type = ntype;
936acd884deSSumit Saxena 
937acd884deSSumit Saxena 	return rc;
938acd884deSSumit Saxena }
939acd884deSSumit Saxena 
_get_sgid_index(struct bnxt_re_dev * rdev,u8 gindx)940acd884deSSumit Saxena static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx)
941acd884deSSumit Saxena {
942acd884deSSumit Saxena 	gindx = rdev->gid_map[gindx];
943acd884deSSumit Saxena 	return gindx;
944acd884deSSumit Saxena }
945acd884deSSumit Saxena 
bnxt_re_init_dmac(struct bnxt_re_dev * rdev,struct ib_ah_attr * ah_attr,struct bnxt_re_ah_info * ah_info,bool is_user,struct bnxt_re_ah * ah)946acd884deSSumit Saxena static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, struct ib_ah_attr *ah_attr,
947acd884deSSumit Saxena 			     struct bnxt_re_ah_info *ah_info, bool is_user,
948acd884deSSumit Saxena 			     struct bnxt_re_ah *ah)
949acd884deSSumit Saxena {
950acd884deSSumit Saxena 	int rc = 0;
951acd884deSSumit Saxena 	u8 *dmac;
952acd884deSSumit Saxena 
953acd884deSSumit Saxena 	if (is_user && !rdma_is_multicast_addr((struct in6_addr *)
954acd884deSSumit Saxena 						ah_attr->grh.dgid.raw) &&
955acd884deSSumit Saxena 	    !rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
956acd884deSSumit Saxena 
957acd884deSSumit Saxena 		u32 retry_count = BNXT_RE_RESOLVE_RETRY_COUNT_US;
958acd884deSSumit Saxena 		struct bnxt_re_resolve_dmac_work *resolve_dmac_work;
959acd884deSSumit Saxena 
960acd884deSSumit Saxena 
961acd884deSSumit Saxena 		resolve_dmac_work = kzalloc(sizeof(*resolve_dmac_work), GFP_ATOMIC);
962acd884deSSumit Saxena 
963acd884deSSumit Saxena 		resolve_dmac_work->rdev = rdev;
964acd884deSSumit Saxena 		resolve_dmac_work->ah_attr = ah_attr;
965acd884deSSumit Saxena 		resolve_dmac_work->ah_info = ah_info;
966acd884deSSumit Saxena 
967acd884deSSumit Saxena 		atomic_set(&resolve_dmac_work->status_wait, 1);
968acd884deSSumit Saxena 		INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task);
969acd884deSSumit Saxena 		queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
970acd884deSSumit Saxena 
971acd884deSSumit Saxena 		do {
972acd884deSSumit Saxena 			rc = atomic_read(&resolve_dmac_work->status_wait) & 0xFF;
973acd884deSSumit Saxena 			if (!rc)
974acd884deSSumit Saxena 				break;
975acd884deSSumit Saxena 			udelay(1);
976acd884deSSumit Saxena 		} while (--retry_count);
977acd884deSSumit Saxena 		if (atomic_read(&resolve_dmac_work->status_wait)) {
978acd884deSSumit Saxena 			INIT_LIST_HEAD(&resolve_dmac_work->list);
979acd884deSSumit Saxena 			list_add_tail(&resolve_dmac_work->list,
980acd884deSSumit Saxena 					&rdev->mac_wq_list);
981acd884deSSumit Saxena 			return -EFAULT;
982acd884deSSumit Saxena 		}
983acd884deSSumit Saxena 		kfree(resolve_dmac_work);
984acd884deSSumit Saxena 	}
985acd884deSSumit Saxena 	dmac = ROCE_DMAC(ah_attr);
986acd884deSSumit Saxena 	if (dmac)
987acd884deSSumit Saxena 		memcpy(ah->qplib_ah.dmac, dmac, ETH_ALEN);
988acd884deSSumit Saxena 	return rc;
989acd884deSSumit Saxena }
990acd884deSSumit Saxena 
bnxt_re_create_ah(struct ib_ah * ah_in,struct ib_ah_attr * attr,u32 flags,struct ib_udata * udata)991acd884deSSumit Saxena int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
992acd884deSSumit Saxena 		      u32 flags, struct ib_udata *udata)
993acd884deSSumit Saxena {
994acd884deSSumit Saxena 
995acd884deSSumit Saxena 	struct ib_ah *ib_ah = ah_in;
996acd884deSSumit Saxena 	struct ib_pd *ib_pd = ib_ah->pd;
997acd884deSSumit Saxena 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ibah);
998acd884deSSumit Saxena 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ibpd);
999acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
1000acd884deSSumit Saxena 	struct bnxt_re_ah_info ah_info;
1001acd884deSSumit Saxena 	u32 max_ah_count;
1002acd884deSSumit Saxena 	bool is_user;
1003acd884deSSumit Saxena 	int rc;
1004acd884deSSumit Saxena 	bool block = true;
1005acd884deSSumit Saxena 	struct ib_ah_attr *ah_attr = attr;
1006acd884deSSumit Saxena 	block = !(flags & RDMA_CREATE_AH_SLEEPABLE);
1007acd884deSSumit Saxena 
1008acd884deSSumit Saxena 	if (!(ah_attr->ah_flags & IB_AH_GRH))
1009acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n");
1010acd884deSSumit Saxena 
1011acd884deSSumit Saxena 	ah->rdev = rdev;
1012acd884deSSumit Saxena 	ah->qplib_ah.pd = &pd->qplib_pd;
1013acd884deSSumit Saxena 	is_user = ib_pd->uobject ? true : false;
1014acd884deSSumit Saxena 
1015acd884deSSumit Saxena 	/* Supply the configuration for the HW */
1016acd884deSSumit Saxena 	memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
1017acd884deSSumit Saxena 			sizeof(union ib_gid));
1018acd884deSSumit Saxena 	ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index);
1019acd884deSSumit Saxena 	if (ah->qplib_ah.sgid_index == 0xFF) {
1020acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "invalid sgid_index!\n");
1021acd884deSSumit Saxena 		rc = -EINVAL;
1022acd884deSSumit Saxena 		goto fail;
1023acd884deSSumit Saxena 	}
1024acd884deSSumit Saxena 	ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
1025acd884deSSumit Saxena 	ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
1026acd884deSSumit Saxena 	ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
1027acd884deSSumit Saxena 	ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
1028acd884deSSumit Saxena 	ah->qplib_ah.sl = ah_attr->sl;
1029acd884deSSumit Saxena 	rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info);
1030acd884deSSumit Saxena 	if (rc)
1031acd884deSSumit Saxena 		goto fail;
1032acd884deSSumit Saxena 	ah->qplib_ah.nw_type = ah_info.nw_type;
1033acd884deSSumit Saxena 
1034acd884deSSumit Saxena 	rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah);
1035acd884deSSumit Saxena 	if (rc)
1036acd884deSSumit Saxena 		goto fail;
1037acd884deSSumit Saxena 
1038acd884deSSumit Saxena 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block);
1039acd884deSSumit Saxena 	if (rc) {
1040acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
1041acd884deSSumit Saxena 			"Allocate HW Address Handle failed!\n");
1042acd884deSSumit Saxena 		goto fail;
1043acd884deSSumit Saxena 	}
1044acd884deSSumit Saxena 
1045acd884deSSumit Saxena 	/* Write AVID to shared page. */
1046acd884deSSumit Saxena 	if (ib_pd->uobject) {
1047acd884deSSumit Saxena 		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
1048acd884deSSumit Saxena 		struct bnxt_re_ucontext *uctx;
1049acd884deSSumit Saxena 		unsigned long flag;
1050acd884deSSumit Saxena 		u32 *wrptr;
1051acd884deSSumit Saxena 
1052acd884deSSumit Saxena 		uctx = to_bnxt_re(ib_uctx, struct bnxt_re_ucontext, ibucontext);
1053acd884deSSumit Saxena 		spin_lock_irqsave(&uctx->sh_lock, flag);
1054acd884deSSumit Saxena 		wrptr = (u32 *)((u8 *)uctx->shpg + BNXT_RE_AVID_OFFT);
1055acd884deSSumit Saxena 		*wrptr = ah->qplib_ah.id;
1056acd884deSSumit Saxena 		wmb(); /* make sure cache is updated. */
1057acd884deSSumit Saxena 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
1058acd884deSSumit Saxena 	}
1059acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.ah_count);
1060acd884deSSumit Saxena 	max_ah_count = atomic_read(&rdev->stats.rsors.ah_count);
1061acd884deSSumit Saxena 	if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count))
1062acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count);
1063acd884deSSumit Saxena 
1064acd884deSSumit Saxena 	return 0;
1065acd884deSSumit Saxena fail:
1066acd884deSSumit Saxena 	return rc;
1067acd884deSSumit Saxena }
1068acd884deSSumit Saxena 
bnxt_re_modify_ah(struct ib_ah * ib_ah,struct ib_ah_attr * ah_attr)1069acd884deSSumit Saxena int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
1070acd884deSSumit Saxena {
1071acd884deSSumit Saxena 	return 0;
1072acd884deSSumit Saxena }
1073acd884deSSumit Saxena 
bnxt_re_query_ah(struct ib_ah * ib_ah,struct ib_ah_attr * ah_attr)1074acd884deSSumit Saxena int bnxt_re_query_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
1075acd884deSSumit Saxena {
1076acd884deSSumit Saxena 	struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
1077acd884deSSumit Saxena 
1078acd884deSSumit Saxena 	memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
1079acd884deSSumit Saxena 	       sizeof(union ib_gid));
1080acd884deSSumit Saxena 	ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
1081acd884deSSumit Saxena 	ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
1082acd884deSSumit Saxena 	ah_attr->sl = ah->qplib_ah.sl;
1083acd884deSSumit Saxena 	memcpy(ROCE_DMAC(ah_attr), ah->qplib_ah.dmac, ETH_ALEN);
1084acd884deSSumit Saxena 	ah_attr->ah_flags = IB_AH_GRH;
1085acd884deSSumit Saxena 	ah_attr->port_num = 1;
1086acd884deSSumit Saxena 	ah_attr->static_rate = 0;
1087acd884deSSumit Saxena 
1088acd884deSSumit Saxena 	return 0;
1089acd884deSSumit Saxena }
1090acd884deSSumit Saxena 
1091acd884deSSumit Saxena /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1092acd884deSSumit Saxena void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
1093acd884deSSumit Saxena 			 struct ib_udata *udata)
1094acd884deSSumit Saxena {
1095acd884deSSumit Saxena 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, ibsrq);
1096acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = srq->rdev;
1097acd884deSSumit Saxena 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1098acd884deSSumit Saxena 	int rc = 0;
1099acd884deSSumit Saxena 
1100acd884deSSumit Saxena 
1101acd884deSSumit Saxena 	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1102acd884deSSumit Saxena 	if (rc)
1103acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(rdev),
1104acd884deSSumit Saxena 				   "%s id = %d failed rc = %d\n",
1105acd884deSSumit Saxena 				    __func__, qplib_srq->id, rc);
1106acd884deSSumit Saxena 
1107acd884deSSumit Saxena 	if (srq->umem && !IS_ERR(srq->umem))
1108acd884deSSumit Saxena 		ib_umem_release(srq->umem);
1109acd884deSSumit Saxena 
1110acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.srq_count);
1111acd884deSSumit Saxena 
1112acd884deSSumit Saxena 	return;
1113acd884deSSumit Saxena }
1114acd884deSSumit Saxena 
_max_rwqe_sz(int nsge)1115acd884deSSumit Saxena static u16 _max_rwqe_sz(int nsge)
1116acd884deSSumit Saxena {
1117acd884deSSumit Saxena 	return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
1118acd884deSSumit Saxena }
1119acd884deSSumit Saxena 
bnxt_re_get_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)1120acd884deSSumit Saxena static u16 bnxt_re_get_rwqe_size(struct bnxt_qplib_qp *qplqp,
1121acd884deSSumit Saxena 				 int rsge, int max)
1122acd884deSSumit Saxena {
1123acd884deSSumit Saxena 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1124acd884deSSumit Saxena 		rsge = max;
1125acd884deSSumit Saxena 
1126acd884deSSumit Saxena 	return _max_rwqe_sz(rsge);
1127acd884deSSumit Saxena }
1128acd884deSSumit Saxena 
1129acd884deSSumit Saxena static inline
ib_umem_get_compat(struct bnxt_re_dev * rdev,struct ib_ucontext * ucontext,struct ib_udata * udata,unsigned long addr,size_t size,int access,int dmasync)1130acd884deSSumit Saxena struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
1131acd884deSSumit Saxena 				   struct ib_ucontext *ucontext,
1132acd884deSSumit Saxena 				   struct ib_udata *udata,
1133acd884deSSumit Saxena 				   unsigned long addr,
1134acd884deSSumit Saxena 				   size_t size, int access, int dmasync)
1135acd884deSSumit Saxena {
1136acd884deSSumit Saxena 	return ib_umem_get(ucontext, addr, size, access, dmasync);
1137acd884deSSumit Saxena }
1138acd884deSSumit Saxena 
1139acd884deSSumit Saxena static inline
ib_umem_get_flags_compat(struct bnxt_re_dev * rdev,struct ib_ucontext * ucontext,struct ib_udata * udata,unsigned long addr,size_t size,int access,int dmasync)1140acd884deSSumit Saxena struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
1141acd884deSSumit Saxena 					 struct ib_ucontext *ucontext,
1142acd884deSSumit Saxena 					 struct ib_udata *udata,
1143acd884deSSumit Saxena 					 unsigned long addr,
1144acd884deSSumit Saxena 					 size_t size, int access, int dmasync)
1145acd884deSSumit Saxena {
1146acd884deSSumit Saxena 	return ib_umem_get_compat(rdev, ucontext, udata, addr, size,
1147acd884deSSumit Saxena 				  access, 0);
1148acd884deSSumit Saxena }
1149acd884deSSumit Saxena 
ib_umem_num_pages_compat(struct ib_umem * umem)1150acd884deSSumit Saxena static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem)
1151acd884deSSumit Saxena {
1152acd884deSSumit Saxena 	return ib_umem_num_pages(umem);
1153acd884deSSumit Saxena }
1154acd884deSSumit Saxena 
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1155acd884deSSumit Saxena static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1156acd884deSSumit Saxena 				 struct bnxt_re_pd *pd,
1157acd884deSSumit Saxena 				 struct bnxt_re_srq *srq,
1158acd884deSSumit Saxena 				 struct ib_udata *udata)
1159acd884deSSumit Saxena {
1160acd884deSSumit Saxena 	struct bnxt_qplib_sg_info *sginfo;
1161acd884deSSumit Saxena 	struct bnxt_qplib_srq *qplib_srq;
1162acd884deSSumit Saxena 	struct bnxt_re_ucontext *cntx;
1163acd884deSSumit Saxena 	struct ib_ucontext *context;
1164acd884deSSumit Saxena 	struct bnxt_re_srq_req ureq;
1165acd884deSSumit Saxena 	struct ib_umem *umem;
1166acd884deSSumit Saxena 	int rc, bytes = 0;
1167acd884deSSumit Saxena 
1168acd884deSSumit Saxena 	context = pd->ibpd.uobject->context;
1169acd884deSSumit Saxena 	cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1170acd884deSSumit Saxena 	qplib_srq = &srq->qplib_srq;
1171acd884deSSumit Saxena 	sginfo = &qplib_srq->sginfo;
1172acd884deSSumit Saxena 
1173acd884deSSumit Saxena 	if (udata->inlen < sizeof(ureq))
1174acd884deSSumit Saxena 		dev_warn(rdev_to_dev(rdev),
1175acd884deSSumit Saxena 			 "Update the library ulen %d klen %d\n",
1176acd884deSSumit Saxena 			 (unsigned int)udata->inlen,
1177acd884deSSumit Saxena 			 (unsigned int)sizeof(ureq));
1178acd884deSSumit Saxena 
1179acd884deSSumit Saxena 	rc = ib_copy_from_udata(&ureq, udata,
1180acd884deSSumit Saxena 				min(udata->inlen, sizeof(ureq)));
1181acd884deSSumit Saxena 	if (rc)
1182acd884deSSumit Saxena 		return rc;
1183acd884deSSumit Saxena 
1184acd884deSSumit Saxena 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1185acd884deSSumit Saxena 	bytes = PAGE_ALIGN(bytes);
1186acd884deSSumit Saxena 	umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes,
1187acd884deSSumit Saxena 				  IB_ACCESS_LOCAL_WRITE, 1);
1188acd884deSSumit Saxena 	if (IS_ERR(umem)) {
1189acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1190acd884deSSumit Saxena 			__func__, PTR_ERR(umem));
1191acd884deSSumit Saxena 		return PTR_ERR(umem);
1192acd884deSSumit Saxena 	}
1193acd884deSSumit Saxena 
1194acd884deSSumit Saxena 	srq->umem = umem;
1195acd884deSSumit Saxena 	sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1196acd884deSSumit Saxena 	sginfo->npages = ib_umem_num_pages_compat(umem);
1197acd884deSSumit Saxena 	qplib_srq->srq_handle = ureq.srq_handle;
1198acd884deSSumit Saxena 	qplib_srq->dpi = &cntx->dpi;
1199acd884deSSumit Saxena 	qplib_srq->is_user = true;
1200acd884deSSumit Saxena 
1201acd884deSSumit Saxena 	return 0;
1202acd884deSSumit Saxena }
1203acd884deSSumit Saxena 
bnxt_re_create_srq(struct ib_srq * srq_in,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1204acd884deSSumit Saxena int bnxt_re_create_srq(struct ib_srq *srq_in, struct ib_srq_init_attr *srq_init_attr,
1205acd884deSSumit Saxena 		       struct ib_udata *udata)
1206acd884deSSumit Saxena {
1207acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
1208acd884deSSumit Saxena 	struct bnxt_re_ucontext *cntx = NULL;
1209acd884deSSumit Saxena 	struct ib_ucontext *context;
1210acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1211acd884deSSumit Saxena 	struct bnxt_re_pd *pd;
1212acd884deSSumit Saxena 	int rc, entries;
1213acd884deSSumit Saxena 	struct ib_srq *ib_srq = srq_in;
1214acd884deSSumit Saxena 	struct ib_pd *ib_pd = ib_srq->pd;
1215acd884deSSumit Saxena 	struct bnxt_re_srq *srq =
1216acd884deSSumit Saxena 		container_of(ib_srq, struct bnxt_re_srq, ibsrq);
1217acd884deSSumit Saxena 	u32 max_srq_count;
1218acd884deSSumit Saxena 
1219acd884deSSumit Saxena 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
1220acd884deSSumit Saxena 	rdev = pd->rdev;
1221acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
1222acd884deSSumit Saxena 
1223acd884deSSumit Saxena 	if (rdev->mod_exit) {
1224acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
1225acd884deSSumit Saxena 		rc = -EIO;
1226acd884deSSumit Saxena 		goto exit;
1227acd884deSSumit Saxena 	}
1228acd884deSSumit Saxena 
1229acd884deSSumit Saxena 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1230acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "SRQ type not supported\n");
1231acd884deSSumit Saxena 		rc = -ENOTSUPP;
1232acd884deSSumit Saxena 		goto exit;
1233acd884deSSumit Saxena 	}
1234acd884deSSumit Saxena 
1235acd884deSSumit Saxena 	if (udata) {
1236acd884deSSumit Saxena 		context = pd->ibpd.uobject->context;
1237acd884deSSumit Saxena 		cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1238acd884deSSumit Saxena 	}
1239acd884deSSumit Saxena 
1240acd884deSSumit Saxena 	if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) {
1241acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n");
1242acd884deSSumit Saxena 		rc = -EINVAL;
1243acd884deSSumit Saxena 		goto exit;
1244acd884deSSumit Saxena 	}
1245acd884deSSumit Saxena 
1246acd884deSSumit Saxena 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1247acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n");
1248acd884deSSumit Saxena 		rc = -EINVAL;
1249acd884deSSumit Saxena 		goto exit;
1250acd884deSSumit Saxena 	}
1251acd884deSSumit Saxena 
1252acd884deSSumit Saxena 	srq->rdev = rdev;
1253acd884deSSumit Saxena 	srq->qplib_srq.pd = &pd->qplib_pd;
1254acd884deSSumit Saxena 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1255acd884deSSumit Saxena 
1256acd884deSSumit Saxena 	/* Allocate 1 more than what's provided so posting max doesn't
1257acd884deSSumit Saxena 	   mean empty */
1258acd884deSSumit Saxena 	entries = srq_init_attr->attr.max_wr + 1;
1259acd884deSSumit Saxena 	entries = bnxt_re_init_depth(entries, cntx);
1260acd884deSSumit Saxena 	if (entries > dev_attr->max_srq_wqes + 1)
1261acd884deSSumit Saxena 		entries = dev_attr->max_srq_wqes + 1;
1262acd884deSSumit Saxena 
1263acd884deSSumit Saxena 	srq->qplib_srq.wqe_size = _max_rwqe_sz(6); /* 128 byte wqe size */
1264acd884deSSumit Saxena 	srq->qplib_srq.max_wqe = entries;
1265acd884deSSumit Saxena 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1266acd884deSSumit Saxena 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1267acd884deSSumit Saxena 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1268acd884deSSumit Saxena 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id;
1269acd884deSSumit Saxena 	srq->qplib_srq.sginfo.pgsize = PAGE_SIZE;
1270acd884deSSumit Saxena 	srq->qplib_srq.sginfo.pgshft = PAGE_SHIFT;
1271acd884deSSumit Saxena 
1272acd884deSSumit Saxena 	if (udata) {
1273acd884deSSumit Saxena 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1274acd884deSSumit Saxena 		if (rc)
1275acd884deSSumit Saxena 			goto fail;
1276acd884deSSumit Saxena 	}
1277acd884deSSumit Saxena 
1278acd884deSSumit Saxena 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1279acd884deSSumit Saxena 	if (rc) {
1280acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!\n");
1281acd884deSSumit Saxena 		goto fail;
1282acd884deSSumit Saxena 	}
1283acd884deSSumit Saxena 
1284acd884deSSumit Saxena 	if (udata) {
1285acd884deSSumit Saxena 		struct bnxt_re_srq_resp resp;
1286acd884deSSumit Saxena 
1287acd884deSSumit Saxena 		resp.srqid = srq->qplib_srq.id;
1288acd884deSSumit Saxena 		rc = bnxt_re_copy_to_udata(rdev, &resp,
1289acd884deSSumit Saxena 					   min(udata->outlen, sizeof(resp)),
1290acd884deSSumit Saxena 					   udata);
1291acd884deSSumit Saxena 		if (rc) {
1292acd884deSSumit Saxena 			bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq);
1293acd884deSSumit Saxena 			goto fail;
1294acd884deSSumit Saxena 		}
1295acd884deSSumit Saxena 	}
1296acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.srq_count);
1297acd884deSSumit Saxena 	max_srq_count = atomic_read(&rdev->stats.rsors.srq_count);
1298acd884deSSumit Saxena 	if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count))
1299acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count);
1300acd884deSSumit Saxena 	spin_lock_init(&srq->lock);
1301acd884deSSumit Saxena 
1302acd884deSSumit Saxena 	return 0;
1303acd884deSSumit Saxena fail:
1304acd884deSSumit Saxena 	if (udata && srq->umem && !IS_ERR(srq->umem)) {
1305acd884deSSumit Saxena 		ib_umem_release(srq->umem);
1306acd884deSSumit Saxena 		srq->umem = NULL;
1307acd884deSSumit Saxena 	}
1308acd884deSSumit Saxena exit:
1309acd884deSSumit Saxena 	return rc;
1310acd884deSSumit Saxena }
1311acd884deSSumit Saxena 
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1312acd884deSSumit Saxena int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1313acd884deSSumit Saxena 		       enum ib_srq_attr_mask srq_attr_mask,
1314acd884deSSumit Saxena 		       struct ib_udata *udata)
1315acd884deSSumit Saxena {
1316acd884deSSumit Saxena 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1317acd884deSSumit Saxena 					     ibsrq);
1318acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = srq->rdev;
1319acd884deSSumit Saxena 	int rc;
1320acd884deSSumit Saxena 
1321acd884deSSumit Saxena 	switch (srq_attr_mask) {
1322acd884deSSumit Saxena 	case IB_SRQ_MAX_WR:
1323acd884deSSumit Saxena 		/* SRQ resize is not supported */
1324acd884deSSumit Saxena 		break;
1325acd884deSSumit Saxena 	case IB_SRQ_LIMIT:
1326acd884deSSumit Saxena 		/* Change the SRQ threshold */
1327acd884deSSumit Saxena 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1328acd884deSSumit Saxena 			return -EINVAL;
1329acd884deSSumit Saxena 
1330acd884deSSumit Saxena 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1331acd884deSSumit Saxena 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1332acd884deSSumit Saxena 		if (rc) {
1333acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!\n");
1334acd884deSSumit Saxena 			return rc;
1335acd884deSSumit Saxena 		}
1336acd884deSSumit Saxena 		/* On success, update the shadow */
1337acd884deSSumit Saxena 		srq->srq_limit = srq_attr->srq_limit;
1338acd884deSSumit Saxena 
1339acd884deSSumit Saxena 		if (udata) {
1340acd884deSSumit Saxena 			/* Build and send response back to udata */
1341acd884deSSumit Saxena 			rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata);
1342acd884deSSumit Saxena 			if (rc)
1343acd884deSSumit Saxena 				return rc;
1344acd884deSSumit Saxena 		}
1345acd884deSSumit Saxena 		break;
1346acd884deSSumit Saxena 	default:
1347acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
1348acd884deSSumit Saxena 			"Unsupported srq_attr_mask 0x%x\n", srq_attr_mask);
1349acd884deSSumit Saxena 		return -EINVAL;
1350acd884deSSumit Saxena 	}
1351acd884deSSumit Saxena 	return 0;
1352acd884deSSumit Saxena }
1353acd884deSSumit Saxena 
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1354acd884deSSumit Saxena int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1355acd884deSSumit Saxena {
1356acd884deSSumit Saxena 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1357acd884deSSumit Saxena 					     ibsrq);
1358acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = srq->rdev;
1359acd884deSSumit Saxena 	int rc;
1360acd884deSSumit Saxena 
1361acd884deSSumit Saxena 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq);
1362acd884deSSumit Saxena 	if (rc) {
1363acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n",
1364acd884deSSumit Saxena 			srq->qplib_srq.id, rc);
1365acd884deSSumit Saxena 		return rc;
1366acd884deSSumit Saxena 	}
1367acd884deSSumit Saxena 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1368acd884deSSumit Saxena 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1369acd884deSSumit Saxena 	srq_attr->srq_limit = srq->qplib_srq.threshold;
1370acd884deSSumit Saxena 
1371acd884deSSumit Saxena 	return 0;
1372acd884deSSumit Saxena }
1373acd884deSSumit Saxena 
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1374acd884deSSumit Saxena int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1375acd884deSSumit Saxena 			  const struct ib_recv_wr **bad_wr)
1376acd884deSSumit Saxena {
1377acd884deSSumit Saxena 	struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
1378acd884deSSumit Saxena 					     ibsrq);
1379acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe = {};
1380acd884deSSumit Saxena 	unsigned long flags;
1381acd884deSSumit Saxena 	int rc = 0;
1382acd884deSSumit Saxena 
1383acd884deSSumit Saxena 	spin_lock_irqsave(&srq->lock, flags);
1384acd884deSSumit Saxena 	while (wr) {
1385acd884deSSumit Saxena 		/* Transcribe each ib_recv_wr to qplib_swqe */
1386acd884deSSumit Saxena 		wqe.num_sge = wr->num_sge;
1387acd884deSSumit Saxena 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
1388acd884deSSumit Saxena 		wqe.wr_id = wr->wr_id;
1389acd884deSSumit Saxena 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1390acd884deSSumit Saxena 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1391acd884deSSumit Saxena 		if (rc) {
1392acd884deSSumit Saxena 			*bad_wr = wr;
1393acd884deSSumit Saxena 			break;
1394acd884deSSumit Saxena 		}
1395acd884deSSumit Saxena 		wr = wr->next;
1396acd884deSSumit Saxena 	}
1397acd884deSSumit Saxena 	spin_unlock_irqrestore(&srq->lock, flags);
1398acd884deSSumit Saxena 
1399acd884deSSumit Saxena 	return rc;
1400acd884deSSumit Saxena }
1401acd884deSSumit Saxena 
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)1402acd884deSSumit Saxena unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
1403acd884deSSumit Saxena {
1404acd884deSSumit Saxena 	unsigned long flags;
1405acd884deSSumit Saxena 
1406acd884deSSumit Saxena 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
1407acd884deSSumit Saxena 	if (qp->rcq && qp->rcq != qp->scq)
1408acd884deSSumit Saxena 		spin_lock(&qp->rcq->cq_lock);
1409acd884deSSumit Saxena 
1410acd884deSSumit Saxena 	return flags;
1411acd884deSSumit Saxena }
1412acd884deSSumit Saxena 
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)1413acd884deSSumit Saxena void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
1414acd884deSSumit Saxena 				  unsigned long flags)
1415acd884deSSumit Saxena {
1416acd884deSSumit Saxena 	if (qp->rcq && qp->rcq != qp->scq)
1417acd884deSSumit Saxena 		spin_unlock(&qp->rcq->cq_lock);
1418acd884deSSumit Saxena 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
1419acd884deSSumit Saxena }
1420acd884deSSumit Saxena 
1421acd884deSSumit Saxena /* Queue Pairs */
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)1422acd884deSSumit Saxena static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
1423acd884deSSumit Saxena {
1424acd884deSSumit Saxena 	struct bnxt_re_qp *gsi_sqp;
1425acd884deSSumit Saxena 	struct bnxt_re_ah *gsi_sah;
1426acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1427acd884deSSumit Saxena 	unsigned long flags;
1428acd884deSSumit Saxena 	int rc = 0;
1429acd884deSSumit Saxena 
1430acd884deSSumit Saxena 	rdev = qp->rdev;
1431acd884deSSumit Saxena 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
1432acd884deSSumit Saxena 	gsi_sah = rdev->gsi_ctx.gsi_sah;
1433acd884deSSumit Saxena 
1434acd884deSSumit Saxena 	/* remove from active qp list */
1435acd884deSSumit Saxena 	mutex_lock(&rdev->qp_lock);
1436acd884deSSumit Saxena 	list_del(&gsi_sqp->list);
1437acd884deSSumit Saxena 	mutex_unlock(&rdev->qp_lock);
1438acd884deSSumit Saxena 
1439acd884deSSumit Saxena 	if (gsi_sah) {
1440acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
1441acd884deSSumit Saxena 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah,
1442acd884deSSumit Saxena 					   true);
1443acd884deSSumit Saxena 		if (rc)
1444acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
1445acd884deSSumit Saxena 				"Destroy HW AH for shadow QP failed!\n");
1446acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.ah_count);
1447acd884deSSumit Saxena 	}
1448acd884deSSumit Saxena 
1449acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
1450acd884deSSumit Saxena 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1451acd884deSSumit Saxena 	if (rc)
1452acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n");
1453acd884deSSumit Saxena 
1454acd884deSSumit Saxena 	/* Clean the CQ for shadow QP completions */
1455acd884deSSumit Saxena 	flags = bnxt_re_lock_cqs(gsi_sqp);
1456acd884deSSumit Saxena 	bnxt_qplib_clean_qp(&gsi_sqp->qplib_qp);
1457acd884deSSumit Saxena 	bnxt_re_unlock_cqs(gsi_sqp, flags);
1458acd884deSSumit Saxena 
1459acd884deSSumit Saxena 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1460acd884deSSumit Saxena 	bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1461acd884deSSumit Saxena 	kfree(rdev->gsi_ctx.sqp_tbl);
1462acd884deSSumit Saxena 	kfree(gsi_sah);
1463acd884deSSumit Saxena 	kfree(gsi_sqp);
1464acd884deSSumit Saxena 	rdev->gsi_ctx.gsi_sqp = NULL;
1465acd884deSSumit Saxena 	rdev->gsi_ctx.gsi_sah = NULL;
1466acd884deSSumit Saxena 	rdev->gsi_ctx.sqp_tbl = NULL;
1467acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.qp_count);
1468acd884deSSumit Saxena 
1469acd884deSSumit Saxena 	return 0;
1470acd884deSSumit Saxena }
1471acd884deSSumit Saxena 
bnxt_re_dump_debug_stats(struct bnxt_re_dev * rdev,u32 active_qps)1472acd884deSSumit Saxena static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps)
1473acd884deSSumit Saxena {
1474acd884deSSumit Saxena 	u32	total_qp = 0;
1475acd884deSSumit Saxena 	u64	avg_time = 0;
1476acd884deSSumit Saxena 	int	i;
1477acd884deSSumit Saxena 
1478acd884deSSumit Saxena 	if (!rdev->rcfw.sp_perf_stats_enabled)
1479acd884deSSumit Saxena 		return;
1480acd884deSSumit Saxena 
1481acd884deSSumit Saxena 	switch (active_qps) {
1482acd884deSSumit Saxena 	case 1:
1483acd884deSSumit Saxena 		/* Potential hint for Test Stop */
1484acd884deSSumit Saxena 		for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) {
1485acd884deSSumit Saxena 			if (rdev->rcfw.qp_destroy_stats[i]) {
1486acd884deSSumit Saxena 				total_qp++;
1487acd884deSSumit Saxena 				avg_time += rdev->rcfw.qp_destroy_stats[i];
1488acd884deSSumit Saxena 			}
1489acd884deSSumit Saxena 		}
1490acd884deSSumit Saxena 		if (total_qp >= 0 || avg_time >= 0)
1491acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
1492acd884deSSumit Saxena 				"Perf Debug: %ps Total (%d) QP destroyed in (%ld) msec\n",
1493acd884deSSumit Saxena 				__builtin_return_address(0), total_qp,
1494acd884deSSumit Saxena 				(long)jiffies_to_msecs(avg_time));
1495acd884deSSumit Saxena 		break;
1496acd884deSSumit Saxena 	case 2:
1497acd884deSSumit Saxena 		/* Potential hint for Test Start */
1498acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev),
1499acd884deSSumit Saxena 			"Perf Debug: %ps active_qps = %d\n",
1500acd884deSSumit Saxena 			__builtin_return_address(0), active_qps);
1501acd884deSSumit Saxena 		break;
1502acd884deSSumit Saxena 	default:
1503acd884deSSumit Saxena 		/* Potential hint to know latency of QP destroy.
1504acd884deSSumit Saxena 		 * Average time taken for 1K QP Destroy.
1505acd884deSSumit Saxena 		 */
1506acd884deSSumit Saxena 		if (active_qps > 1024 && !(active_qps % 1024))
1507acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
1508acd884deSSumit Saxena 				"Perf Debug: %ps Active QP (%d) Watermark (%d)\n",
1509acd884deSSumit Saxena 				__builtin_return_address(0), active_qps,
1510acd884deSSumit Saxena 				atomic_read(&rdev->stats.rsors.max_qp_count));
1511acd884deSSumit Saxena 		break;
1512acd884deSSumit Saxena 	}
1513acd884deSSumit Saxena }
1514acd884deSSumit Saxena 
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)1515acd884deSSumit Saxena int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
1516acd884deSSumit Saxena {
1517acd884deSSumit Saxena 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
1518acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = qp->rdev;
1519acd884deSSumit Saxena 	unsigned long flags;
1520acd884deSSumit Saxena 	u32 active_qps;
1521acd884deSSumit Saxena 	int rc;
1522acd884deSSumit Saxena 
1523acd884deSSumit Saxena 	mutex_lock(&rdev->qp_lock);
1524acd884deSSumit Saxena 	list_del(&qp->list);
1525acd884deSSumit Saxena 	active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count);
1526acd884deSSumit Saxena 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
1527acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.rc_qp_count);
1528acd884deSSumit Saxena 	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
1529acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.ud_qp_count);
1530acd884deSSumit Saxena 	mutex_unlock(&rdev->qp_lock);
1531acd884deSSumit Saxena 
1532acd884deSSumit Saxena 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1533acd884deSSumit Saxena 	if (rc)
1534acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(rdev),
1535acd884deSSumit Saxena 				   "%s id = %d failed rc = %d\n",
1536acd884deSSumit Saxena 				    __func__, qp->qplib_qp.id, rc);
1537acd884deSSumit Saxena 
1538acd884deSSumit Saxena 	if (!ib_qp->uobject) {
1539acd884deSSumit Saxena 		flags = bnxt_re_lock_cqs(qp);
1540acd884deSSumit Saxena 		bnxt_qplib_clean_qp(&qp->qplib_qp);
1541acd884deSSumit Saxena 		bnxt_re_unlock_cqs(qp, flags);
1542acd884deSSumit Saxena 	}
1543acd884deSSumit Saxena 
1544acd884deSSumit Saxena 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
1545acd884deSSumit Saxena 	if (ib_qp->qp_type == IB_QPT_GSI &&
1546acd884deSSumit Saxena 	    rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
1547acd884deSSumit Saxena 		if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
1548acd884deSSumit Saxena 		    rdev->gsi_ctx.gsi_sqp) {
1549acd884deSSumit Saxena 			bnxt_re_destroy_gsi_sqp(qp);
1550acd884deSSumit Saxena 		}
1551acd884deSSumit Saxena 		bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp);
1552acd884deSSumit Saxena 	}
1553acd884deSSumit Saxena 
1554acd884deSSumit Saxena 	if (qp->rumem && !IS_ERR(qp->rumem))
1555acd884deSSumit Saxena 		ib_umem_release(qp->rumem);
1556acd884deSSumit Saxena 	if (qp->sumem && !IS_ERR(qp->sumem))
1557acd884deSSumit Saxena 		ib_umem_release(qp->sumem);
1558acd884deSSumit Saxena 	kfree(qp);
1559acd884deSSumit Saxena 
1560acd884deSSumit Saxena 	bnxt_re_dump_debug_stats(rdev, active_qps);
1561acd884deSSumit Saxena 
1562acd884deSSumit Saxena 	return 0;
1563acd884deSSumit Saxena }
1564acd884deSSumit Saxena 
__from_ib_qp_type(enum ib_qp_type type)1565acd884deSSumit Saxena static u8 __from_ib_qp_type(enum ib_qp_type type)
1566acd884deSSumit Saxena {
1567acd884deSSumit Saxena 	switch (type) {
1568acd884deSSumit Saxena 	case IB_QPT_GSI:
1569acd884deSSumit Saxena 		return CMDQ_CREATE_QP1_TYPE_GSI;
1570acd884deSSumit Saxena 	case IB_QPT_RC:
1571acd884deSSumit Saxena 		return CMDQ_CREATE_QP_TYPE_RC;
1572acd884deSSumit Saxena 	case IB_QPT_UD:
1573acd884deSSumit Saxena 		return CMDQ_CREATE_QP_TYPE_UD;
1574acd884deSSumit Saxena 	case IB_QPT_RAW_ETHERTYPE:
1575acd884deSSumit Saxena 		return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
1576acd884deSSumit Saxena 	default:
1577acd884deSSumit Saxena 		return IB_QPT_MAX;
1578acd884deSSumit Saxena 	}
1579acd884deSSumit Saxena }
1580acd884deSSumit Saxena 
_get_swqe_sz(int nsge)1581acd884deSSumit Saxena static u16 _get_swqe_sz(int nsge)
1582acd884deSSumit Saxena {
1583acd884deSSumit Saxena 	return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1584acd884deSSumit Saxena }
1585acd884deSSumit Saxena 
bnxt_re_get_swqe_size(int ilsize,int nsge)1586acd884deSSumit Saxena static int bnxt_re_get_swqe_size(int ilsize, int nsge)
1587acd884deSSumit Saxena {
1588acd884deSSumit Saxena 	u16 wqe_size, calc_ils;
1589acd884deSSumit Saxena 
1590acd884deSSumit Saxena 	wqe_size = _get_swqe_sz(nsge);
1591acd884deSSumit Saxena 	if (ilsize) {
1592acd884deSSumit Saxena 		calc_ils = (sizeof(struct sq_send_hdr) + ilsize);
1593acd884deSSumit Saxena 		wqe_size = max_t(int, calc_ils, wqe_size);
1594acd884deSSumit Saxena 		wqe_size = ALIGN(wqe_size, 32);
1595acd884deSSumit Saxena 	}
1596acd884deSSumit Saxena 	return wqe_size;
1597acd884deSSumit Saxena }
1598acd884deSSumit Saxena 
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)1599acd884deSSumit Saxena static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
1600acd884deSSumit Saxena 				   struct ib_qp_init_attr *init_attr)
1601acd884deSSumit Saxena {
1602acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
1603acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
1604acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1605acd884deSSumit Saxena 	struct bnxt_qplib_q *sq;
1606acd884deSSumit Saxena 	int align, ilsize;
1607acd884deSSumit Saxena 
1608acd884deSSumit Saxena 	rdev = qp->rdev;
1609acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
1610acd884deSSumit Saxena 	sq = &qplqp->sq;
1611acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
1612acd884deSSumit Saxena 
1613acd884deSSumit Saxena 	align = sizeof(struct sq_send_hdr);
1614acd884deSSumit Saxena 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1615acd884deSSumit Saxena 
1616acd884deSSumit Saxena 	sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
1617acd884deSSumit Saxena 	if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
1618acd884deSSumit Saxena 		return -EINVAL;
1619acd884deSSumit Saxena 	/* For Cu/Wh and gen p5 backward compatibility mode
1620acd884deSSumit Saxena 	 * wqe size is fixed to 128 bytes
1621acd884deSSumit Saxena 	 */
1622acd884deSSumit Saxena 	if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) &&
1623acd884deSSumit Saxena 	    qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1624acd884deSSumit Saxena 		sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges);
1625acd884deSSumit Saxena 
1626acd884deSSumit Saxena 	if (init_attr->cap.max_inline_data) {
1627acd884deSSumit Saxena 		qplqp->max_inline_data = sq->wqe_size -
1628acd884deSSumit Saxena 					 sizeof(struct sq_send_hdr);
1629acd884deSSumit Saxena 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1630acd884deSSumit Saxena 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1631acd884deSSumit Saxena 			sq->max_sge = qplqp->max_inline_data /
1632acd884deSSumit Saxena 				      sizeof(struct sq_sge);
1633acd884deSSumit Saxena 	}
1634acd884deSSumit Saxena 
1635acd884deSSumit Saxena 	return 0;
1636acd884deSSumit Saxena }
1637acd884deSSumit Saxena 
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct ib_udata * udata)1638acd884deSSumit Saxena static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev,
1639acd884deSSumit Saxena 				struct bnxt_re_pd *pd, struct bnxt_re_qp *qp,
1640acd884deSSumit Saxena 				struct ib_udata *udata)
1641acd884deSSumit Saxena {
1642acd884deSSumit Saxena 	struct bnxt_qplib_sg_info *sginfo;
1643acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplib_qp;
1644acd884deSSumit Saxena 	struct bnxt_re_ucontext *cntx;
1645acd884deSSumit Saxena 	struct ib_ucontext *context;
1646acd884deSSumit Saxena 	struct bnxt_re_qp_req ureq;
1647acd884deSSumit Saxena 	struct ib_umem *umem;
1648acd884deSSumit Saxena 	int rc, bytes = 0;
1649acd884deSSumit Saxena 	int psn_nume;
1650acd884deSSumit Saxena 	int psn_sz;
1651acd884deSSumit Saxena 
1652acd884deSSumit Saxena 	qplib_qp = &qp->qplib_qp;
1653acd884deSSumit Saxena 	context = pd->ibpd.uobject->context;
1654acd884deSSumit Saxena 	cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
1655acd884deSSumit Saxena 	sginfo = &qplib_qp->sq.sginfo;
1656acd884deSSumit Saxena 
1657acd884deSSumit Saxena 	if (udata->inlen < sizeof(ureq))
1658acd884deSSumit Saxena 		dev_warn(rdev_to_dev(rdev),
1659acd884deSSumit Saxena 			 "Update the library ulen %d klen %d\n",
1660acd884deSSumit Saxena 			 (unsigned int)udata->inlen,
1661acd884deSSumit Saxena 			 (unsigned int)sizeof(ureq));
1662acd884deSSumit Saxena 
1663acd884deSSumit Saxena 	rc = ib_copy_from_udata(&ureq, udata,
1664acd884deSSumit Saxena 				min(udata->inlen, sizeof(ureq)));
1665acd884deSSumit Saxena 	if (rc)
1666acd884deSSumit Saxena 		return rc;
1667acd884deSSumit Saxena 
1668acd884deSSumit Saxena 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1669acd884deSSumit Saxena 	/* Consider mapping PSN search memory only for RC QPs. */
1670acd884deSSumit Saxena 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1671acd884deSSumit Saxena 		psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1672acd884deSSumit Saxena 				sizeof(struct sq_psn_search_ext) :
1673acd884deSSumit Saxena 				sizeof(struct sq_psn_search);
1674acd884deSSumit Saxena 		if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1675acd884deSSumit Saxena 			psn_sz = sizeof(struct sq_msn_search);
1676acd884deSSumit Saxena 		psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1677acd884deSSumit Saxena 			    qplib_qp->sq.max_wqe :
1678acd884deSSumit Saxena 			    ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1679acd884deSSumit Saxena 			     sizeof(struct bnxt_qplib_sge));
1680acd884deSSumit Saxena 		if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1681acd884deSSumit Saxena 			psn_nume = roundup_pow_of_two(psn_nume);
1682acd884deSSumit Saxena 
1683acd884deSSumit Saxena 		bytes += (psn_nume * psn_sz);
1684acd884deSSumit Saxena 	}
1685acd884deSSumit Saxena 	bytes = PAGE_ALIGN(bytes);
1686acd884deSSumit Saxena 	umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes,
1687acd884deSSumit Saxena 				  IB_ACCESS_LOCAL_WRITE, 1);
1688acd884deSSumit Saxena 	if (IS_ERR(umem)) {
1689acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1690acd884deSSumit Saxena 			__func__, PTR_ERR(umem));
1691acd884deSSumit Saxena 		return PTR_ERR(umem);
1692acd884deSSumit Saxena 	}
1693acd884deSSumit Saxena 
1694acd884deSSumit Saxena 	qp->sumem = umem;
1695acd884deSSumit Saxena 	/* pgsize and pgshft were initialize already. */
1696acd884deSSumit Saxena 	sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1697acd884deSSumit Saxena 	sginfo->npages = ib_umem_num_pages_compat(umem);
1698acd884deSSumit Saxena 	qplib_qp->qp_handle = ureq.qp_handle;
1699acd884deSSumit Saxena 
1700acd884deSSumit Saxena 	if (!qp->qplib_qp.srq) {
1701acd884deSSumit Saxena 		sginfo = &qplib_qp->rq.sginfo;
1702acd884deSSumit Saxena 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1703acd884deSSumit Saxena 		bytes = PAGE_ALIGN(bytes);
1704acd884deSSumit Saxena 		umem = ib_umem_get_compat(rdev,
1705acd884deSSumit Saxena 					  context, udata, ureq.qprva, bytes,
1706acd884deSSumit Saxena 					  IB_ACCESS_LOCAL_WRITE, 1);
1707acd884deSSumit Saxena 		if (IS_ERR(umem)) {
1708acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
1709acd884deSSumit Saxena 				"%s: ib_umem_get failed ret =%ld\n",
1710acd884deSSumit Saxena 				__func__, PTR_ERR(umem));
1711acd884deSSumit Saxena 			goto rqfail;
1712acd884deSSumit Saxena 		}
1713acd884deSSumit Saxena 		qp->rumem = umem;
1714acd884deSSumit Saxena 		/* pgsize and pgshft were initialize already. */
1715acd884deSSumit Saxena 		sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1716acd884deSSumit Saxena 		sginfo->npages = ib_umem_num_pages_compat(umem);
1717acd884deSSumit Saxena 	}
1718acd884deSSumit Saxena 
1719acd884deSSumit Saxena 	qplib_qp->dpi = &cntx->dpi;
1720acd884deSSumit Saxena 	qplib_qp->is_user = true;
1721acd884deSSumit Saxena 
1722acd884deSSumit Saxena 	return 0;
1723acd884deSSumit Saxena rqfail:
1724acd884deSSumit Saxena 	ib_umem_release(qp->sumem);
1725acd884deSSumit Saxena 	qp->sumem = NULL;
1726acd884deSSumit Saxena 	qplib_qp->sq.sginfo.sghead = NULL;
1727acd884deSSumit Saxena 	qplib_qp->sq.sginfo.nmap = 0;
1728acd884deSSumit Saxena 
1729acd884deSSumit Saxena 	return PTR_ERR(umem);
1730acd884deSSumit Saxena }
1731acd884deSSumit Saxena 
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1732acd884deSSumit Saxena static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd *pd,
1733acd884deSSumit Saxena 					       struct bnxt_qplib_res *qp1_res,
1734acd884deSSumit Saxena 					       struct bnxt_qplib_qp *qp1_qp)
1735acd884deSSumit Saxena {
1736acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
1737acd884deSSumit Saxena 	struct bnxt_re_ah *ah;
1738acd884deSSumit Saxena 	union ib_gid sgid;
1739acd884deSSumit Saxena 	int rc;
1740acd884deSSumit Saxena 
1741acd884deSSumit Saxena 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1742acd884deSSumit Saxena 	if (!ah) {
1743acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate Address Handle failed!\n");
1744acd884deSSumit Saxena 		return NULL;
1745acd884deSSumit Saxena 	}
1746acd884deSSumit Saxena 	memset(ah, 0, sizeof(*ah));
1747acd884deSSumit Saxena 	ah->rdev = rdev;
1748acd884deSSumit Saxena 	ah->qplib_ah.pd = &pd->qplib_pd;
1749acd884deSSumit Saxena 
1750acd884deSSumit Saxena 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1751acd884deSSumit Saxena 	if (rc)
1752acd884deSSumit Saxena 		goto fail;
1753acd884deSSumit Saxena 
1754acd884deSSumit Saxena 	/* supply the dgid data same as sgid */
1755acd884deSSumit Saxena 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1756acd884deSSumit Saxena 	       sizeof(union ib_gid));
1757acd884deSSumit Saxena 	ah->qplib_ah.sgid_index = 0;
1758acd884deSSumit Saxena 
1759acd884deSSumit Saxena 	ah->qplib_ah.traffic_class = 0;
1760acd884deSSumit Saxena 	ah->qplib_ah.flow_label = 0;
1761acd884deSSumit Saxena 	ah->qplib_ah.hop_limit = 1;
1762acd884deSSumit Saxena 	ah->qplib_ah.sl = 0;
1763acd884deSSumit Saxena 	/* Have DMAC same as SMAC */
1764acd884deSSumit Saxena 	ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr);
1765acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n",
1766acd884deSSumit Saxena 		ah->qplib_ah.dmac[0], ah->qplib_ah.dmac[1], ah->qplib_ah.dmac[2],
1767acd884deSSumit Saxena 		ah->qplib_ah.dmac[3], ah->qplib_ah.dmac[4], ah->qplib_ah.dmac[5]);
1768acd884deSSumit Saxena 
1769acd884deSSumit Saxena 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true);
1770acd884deSSumit Saxena 	if (rc) {
1771acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
1772acd884deSSumit Saxena 			"Allocate HW AH for Shadow QP failed!\n");
1773acd884deSSumit Saxena 		goto fail;
1774acd884deSSumit Saxena 	}
1775acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id);
1776acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.ah_count);
1777acd884deSSumit Saxena 
1778acd884deSSumit Saxena 	return ah;
1779acd884deSSumit Saxena fail:
1780acd884deSSumit Saxena 	kfree(ah);
1781acd884deSSumit Saxena 	return NULL;
1782acd884deSSumit Saxena }
1783acd884deSSumit Saxena 
bnxt_re_update_shadow_ah(struct bnxt_re_dev * rdev)1784acd884deSSumit Saxena void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev)
1785acd884deSSumit Saxena {
1786acd884deSSumit Saxena 	struct bnxt_re_qp *gsi_qp;
1787acd884deSSumit Saxena 	struct bnxt_re_ah *sah;
1788acd884deSSumit Saxena 	struct bnxt_re_pd *pd;
1789acd884deSSumit Saxena 	struct ib_pd *ib_pd;
1790acd884deSSumit Saxena 	int rc;
1791acd884deSSumit Saxena 
1792acd884deSSumit Saxena 	if (!rdev)
1793acd884deSSumit Saxena 		return;
1794acd884deSSumit Saxena 
1795acd884deSSumit Saxena 	sah = rdev->gsi_ctx.gsi_sah;
1796acd884deSSumit Saxena 
1797acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Updating the AH\n");
1798acd884deSSumit Saxena 	if (sah) {
1799acd884deSSumit Saxena 		/* Check if the AH created with current mac address */
1800acd884deSSumit Saxena 		if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) {
1801acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
1802acd884deSSumit Saxena 				"Not modifying shadow AH during AH update\n");
1803acd884deSSumit Saxena 			return;
1804acd884deSSumit Saxena 		}
1805acd884deSSumit Saxena 
1806acd884deSSumit Saxena 		gsi_qp = rdev->gsi_ctx.gsi_qp;
1807acd884deSSumit Saxena 		ib_pd = gsi_qp->ib_qp.pd;
1808acd884deSSumit Saxena 		pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
1809acd884deSSumit Saxena 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
1810acd884deSSumit Saxena 					   &sah->qplib_ah, false);
1811acd884deSSumit Saxena 		if (rc) {
1812acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
1813acd884deSSumit Saxena 				"Failed to destroy shadow AH during AH update\n");
1814acd884deSSumit Saxena 			return;
1815acd884deSSumit Saxena 		}
1816acd884deSSumit Saxena 		atomic_dec(&rdev->stats.rsors.ah_count);
1817acd884deSSumit Saxena 		kfree(sah);
1818acd884deSSumit Saxena 		rdev->gsi_ctx.gsi_sah = NULL;
1819acd884deSSumit Saxena 
1820acd884deSSumit Saxena 		sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1821acd884deSSumit Saxena 						  &gsi_qp->qplib_qp);
1822acd884deSSumit Saxena 		if (!sah) {
1823acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
1824acd884deSSumit Saxena 				"Failed to update AH for ShadowQP\n");
1825acd884deSSumit Saxena 			return;
1826acd884deSSumit Saxena 		}
1827acd884deSSumit Saxena 		rdev->gsi_ctx.gsi_sah = sah;
1828acd884deSSumit Saxena 		atomic_inc(&rdev->stats.rsors.ah_count);
1829acd884deSSumit Saxena 	}
1830acd884deSSumit Saxena }
1831acd884deSSumit Saxena 
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1832acd884deSSumit Saxena static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd,
1833acd884deSSumit Saxena 					    struct bnxt_qplib_res *qp1_res,
1834acd884deSSumit Saxena 					    struct bnxt_qplib_qp *qp1_qp)
1835acd884deSSumit Saxena {
1836acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
1837acd884deSSumit Saxena 	struct bnxt_re_qp *qp;
1838acd884deSSumit Saxena 	int rc;
1839acd884deSSumit Saxena 
1840acd884deSSumit Saxena 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1841acd884deSSumit Saxena 	if (!qp) {
1842acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n");
1843acd884deSSumit Saxena 		return NULL;
1844acd884deSSumit Saxena 	}
1845acd884deSSumit Saxena 	memset(qp, 0, sizeof(*qp));
1846acd884deSSumit Saxena 	qp->rdev = rdev;
1847acd884deSSumit Saxena 
1848acd884deSSumit Saxena 	/* Initialize the shadow QP structure from the QP1 values */
1849acd884deSSumit Saxena 	ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr);
1850acd884deSSumit Saxena 	qp->qplib_qp.pd = &pd->qplib_pd;
1851acd884deSSumit Saxena 	qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp;
1852acd884deSSumit Saxena 	qp->qplib_qp.type = IB_QPT_UD;
1853acd884deSSumit Saxena 
1854acd884deSSumit Saxena 	qp->qplib_qp.max_inline_data = 0;
1855acd884deSSumit Saxena 	qp->qplib_qp.sig_type = true;
1856acd884deSSumit Saxena 
1857acd884deSSumit Saxena 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1858acd884deSSumit Saxena 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6);
1859acd884deSSumit Saxena 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1860acd884deSSumit Saxena 	qp->qplib_qp.sq.max_sge = 2;
1861acd884deSSumit Saxena 	/* Q full delta can be 1 since it is internal QP */
1862acd884deSSumit Saxena 	qp->qplib_qp.sq.q_full_delta = 1;
1863acd884deSSumit Saxena 	qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE;
1864acd884deSSumit Saxena 	qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT;
1865acd884deSSumit Saxena 
1866acd884deSSumit Saxena 	qp->qplib_qp.scq = qp1_qp->scq;
1867acd884deSSumit Saxena 	qp->qplib_qp.rcq = qp1_qp->rcq;
1868acd884deSSumit Saxena 
1869acd884deSSumit Saxena 	qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */
1870acd884deSSumit Saxena 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1871acd884deSSumit Saxena 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1872acd884deSSumit Saxena 	qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE;
1873acd884deSSumit Saxena 	qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT;
1874acd884deSSumit Saxena 	/* Q full delta can be 1 since it is internal QP */
1875acd884deSSumit Saxena 	qp->qplib_qp.rq.q_full_delta = 1;
1876acd884deSSumit Saxena 	qp->qplib_qp.mtu = qp1_qp->mtu;
1877acd884deSSumit Saxena 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1878acd884deSSumit Saxena 
1879acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0,
1880acd884deSSumit Saxena 				      BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6);
1881acd884deSSumit Saxena 	if (rc)
1882acd884deSSumit Saxena 		goto fail;
1883acd884deSSumit Saxena 
1884acd884deSSumit Saxena 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1885acd884deSSumit Saxena 	if (rc) {
1886acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
1887acd884deSSumit Saxena 		goto qp_fail;
1888acd884deSSumit Saxena 	}
1889acd884deSSumit Saxena 
1890acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n",
1891acd884deSSumit Saxena 		qp->qplib_qp.id);
1892acd884deSSumit Saxena 	spin_lock_init(&qp->sq_lock);
1893acd884deSSumit Saxena 	INIT_LIST_HEAD(&qp->list);
1894acd884deSSumit Saxena 	mutex_lock(&rdev->qp_lock);
1895acd884deSSumit Saxena 	list_add_tail(&qp->list, &rdev->qp_list);
1896acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.qp_count);
1897acd884deSSumit Saxena 	mutex_unlock(&rdev->qp_lock);
1898acd884deSSumit Saxena 	return qp;
1899acd884deSSumit Saxena qp_fail:
1900acd884deSSumit Saxena 	bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp);
1901acd884deSSumit Saxena fail:
1902acd884deSSumit Saxena 	kfree(qp);
1903acd884deSSumit Saxena 	return NULL;
1904acd884deSSumit Saxena }
1905acd884deSSumit Saxena 
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)1906acd884deSSumit Saxena static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1907acd884deSSumit Saxena 				struct ib_qp_init_attr *init_attr, void *cntx)
1908acd884deSSumit Saxena {
1909acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
1910acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
1911acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1912acd884deSSumit Saxena 	struct bnxt_qplib_q *rq;
1913acd884deSSumit Saxena 	int entries;
1914acd884deSSumit Saxena 
1915acd884deSSumit Saxena 	rdev = qp->rdev;
1916acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
1917acd884deSSumit Saxena 	rq = &qplqp->rq;
1918acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
1919acd884deSSumit Saxena 
1920acd884deSSumit Saxena 	if (init_attr->srq) {
1921acd884deSSumit Saxena 		struct bnxt_re_srq *srq;
1922acd884deSSumit Saxena 
1923acd884deSSumit Saxena 		srq = to_bnxt_re(init_attr->srq, struct bnxt_re_srq, ibsrq);
1924acd884deSSumit Saxena 		if (!srq) {
1925acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "SRQ not found\n");
1926acd884deSSumit Saxena 			return -EINVAL;
1927acd884deSSumit Saxena 		}
1928acd884deSSumit Saxena 		qplqp->srq = &srq->qplib_srq;
1929acd884deSSumit Saxena 		rq->max_wqe = 0;
1930acd884deSSumit Saxena 	} else {
1931acd884deSSumit Saxena 		rq->max_sge = init_attr->cap.max_recv_sge;
1932acd884deSSumit Saxena 		if (rq->max_sge > dev_attr->max_qp_sges)
1933acd884deSSumit Saxena 			rq->max_sge = dev_attr->max_qp_sges;
1934acd884deSSumit Saxena 		init_attr->cap.max_recv_sge = rq->max_sge;
1935acd884deSSumit Saxena 		rq->wqe_size = bnxt_re_get_rwqe_size(qplqp, rq->max_sge,
1936acd884deSSumit Saxena 						     dev_attr->max_qp_sges);
1937acd884deSSumit Saxena 
1938acd884deSSumit Saxena 		/* Allocate 1 more than what's provided so posting max doesn't
1939acd884deSSumit Saxena 		   mean empty */
1940acd884deSSumit Saxena 		entries = init_attr->cap.max_recv_wr + 1;
1941acd884deSSumit Saxena 		entries = bnxt_re_init_depth(entries, cntx);
1942acd884deSSumit Saxena 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1943acd884deSSumit Saxena 		rq->q_full_delta = 0;
1944acd884deSSumit Saxena 		rq->sginfo.pgsize = PAGE_SIZE;
1945acd884deSSumit Saxena 		rq->sginfo.pgshft = PAGE_SHIFT;
1946acd884deSSumit Saxena 	}
1947acd884deSSumit Saxena 
1948acd884deSSumit Saxena 	return 0;
1949acd884deSSumit Saxena }
1950acd884deSSumit Saxena 
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1951acd884deSSumit Saxena static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1952acd884deSSumit Saxena {
1953acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
1954acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
1955acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1956acd884deSSumit Saxena 
1957acd884deSSumit Saxena 	rdev = qp->rdev;
1958acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
1959acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
1960acd884deSSumit Saxena 
1961acd884deSSumit Saxena 	if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD)
1962acd884deSSumit Saxena 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1963acd884deSSumit Saxena }
1964acd884deSSumit Saxena 
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)1965acd884deSSumit Saxena static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1966acd884deSSumit Saxena 				struct ib_qp_init_attr *init_attr,
1967acd884deSSumit Saxena 				void *cntx)
1968acd884deSSumit Saxena {
1969acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
1970acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
1971acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
1972acd884deSSumit Saxena 	struct bnxt_qplib_q *sq;
1973acd884deSSumit Saxena 	int diff = 0;
1974acd884deSSumit Saxena 	int entries;
1975acd884deSSumit Saxena 	int rc;
1976acd884deSSumit Saxena 
1977acd884deSSumit Saxena 	rdev = qp->rdev;
1978acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
1979acd884deSSumit Saxena 	sq = &qplqp->sq;
1980acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
1981acd884deSSumit Saxena 
1982acd884deSSumit Saxena 	sq->max_sge = init_attr->cap.max_send_sge;
1983acd884deSSumit Saxena 	if (sq->max_sge > dev_attr->max_qp_sges) {
1984acd884deSSumit Saxena 		sq->max_sge = dev_attr->max_qp_sges;
1985acd884deSSumit Saxena 		init_attr->cap.max_send_sge = sq->max_sge;
1986acd884deSSumit Saxena 	}
1987acd884deSSumit Saxena 	rc = bnxt_re_setup_swqe_size(qp, init_attr);
1988acd884deSSumit Saxena 	if (rc)
1989acd884deSSumit Saxena 		return rc;
1990acd884deSSumit Saxena 	/*
1991acd884deSSumit Saxena 	 * Change the SQ depth if user has requested minimum using
1992acd884deSSumit Saxena 	 * configfs. Only supported for kernel consumers. Setting
1993acd884deSSumit Saxena 	 * min_tx_depth to 4096 to handle iser SQ full condition
1994acd884deSSumit Saxena 	 * in most of the newer OS distros
1995acd884deSSumit Saxena 	 */
1996acd884deSSumit Saxena 	entries = init_attr->cap.max_send_wr;
1997acd884deSSumit Saxena 	if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
1998acd884deSSumit Saxena 		/*
1999acd884deSSumit Saxena 		 * If users specify any value greater than 1 use min_tx_depth
2000acd884deSSumit Saxena 		 * provided by user for comparison. Else, compare it with the
2001acd884deSSumit Saxena 		 * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly.
2002acd884deSSumit Saxena 		 */
2003acd884deSSumit Saxena 		if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
2004acd884deSSumit Saxena 			entries = rdev->min_tx_depth;
2005acd884deSSumit Saxena 		else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH)
2006acd884deSSumit Saxena 			entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH;
2007acd884deSSumit Saxena 	}
2008acd884deSSumit Saxena 	diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
2009acd884deSSumit Saxena 	entries = bnxt_re_init_depth(entries + diff + 1, cntx);
2010acd884deSSumit Saxena 	sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
2011acd884deSSumit Saxena 	sq->q_full_delta = diff + 1;
2012acd884deSSumit Saxena 	/*
2013acd884deSSumit Saxena 	 * Reserving one slot for Phantom WQE. Application can
2014acd884deSSumit Saxena 	 * post one extra entry in this case. But allowing this to avoid
2015acd884deSSumit Saxena 	 * unexpected Queue full condition
2016acd884deSSumit Saxena 	 */
2017acd884deSSumit Saxena 	sq->q_full_delta -= 1; /* becomes 0 for gen-p5 */
2018acd884deSSumit Saxena 	sq->sginfo.pgsize = PAGE_SIZE;
2019acd884deSSumit Saxena 	sq->sginfo.pgshft = PAGE_SHIFT;
2020acd884deSSumit Saxena 	return 0;
2021acd884deSSumit Saxena }
2022acd884deSSumit Saxena 
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,void * cntx)2023acd884deSSumit Saxena static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
2024acd884deSSumit Saxena 				       struct ib_qp_init_attr *init_attr,
2025acd884deSSumit Saxena 				       void *cntx)
2026acd884deSSumit Saxena {
2027acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
2028acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
2029acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2030acd884deSSumit Saxena 	int entries;
2031acd884deSSumit Saxena 
2032acd884deSSumit Saxena 	rdev = qp->rdev;
2033acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
2034acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
2035acd884deSSumit Saxena 
2036acd884deSSumit Saxena 	if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
2037acd884deSSumit Saxena 		entries = init_attr->cap.max_send_wr + 1;
2038acd884deSSumit Saxena 		entries = bnxt_re_init_depth(entries, cntx);
2039acd884deSSumit Saxena 		qplqp->sq.max_wqe = min_t(u32, entries,
2040acd884deSSumit Saxena 					  dev_attr->max_qp_wqes + 1);
2041acd884deSSumit Saxena 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
2042acd884deSSumit Saxena 					 init_attr->cap.max_send_wr;
2043acd884deSSumit Saxena 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
2044acd884deSSumit Saxena 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
2045acd884deSSumit Saxena 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
2046acd884deSSumit Saxena 	}
2047acd884deSSumit Saxena }
2048acd884deSSumit Saxena 
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)2049acd884deSSumit Saxena static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
2050acd884deSSumit Saxena 				struct ib_qp_init_attr *init_attr)
2051acd884deSSumit Saxena {
2052acd884deSSumit Saxena 	struct bnxt_qplib_chip_ctx *chip_ctx;
2053acd884deSSumit Saxena 	struct bnxt_re_gsi_context *gsi_ctx;
2054acd884deSSumit Saxena 	int qptype;
2055acd884deSSumit Saxena 
2056acd884deSSumit Saxena 	chip_ctx = rdev->chip_ctx;
2057acd884deSSumit Saxena 	gsi_ctx = &rdev->gsi_ctx;
2058acd884deSSumit Saxena 
2059acd884deSSumit Saxena 	qptype = __from_ib_qp_type(init_attr->qp_type);
2060acd884deSSumit Saxena 	if (qptype == IB_QPT_MAX) {
2061acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n",
2062acd884deSSumit Saxena 			qptype);
2063acd884deSSumit Saxena 		qptype = -EINVAL;
2064acd884deSSumit Saxena 		goto out;
2065acd884deSSumit Saxena 	}
2066acd884deSSumit Saxena 
2067acd884deSSumit Saxena 	if (_is_chip_gen_p5_p7(chip_ctx) && init_attr->qp_type == IB_QPT_GSI) {
2068acd884deSSumit Saxena 		/* For Thor always force UD mode. */
2069acd884deSSumit Saxena 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
2070acd884deSSumit Saxena 		gsi_ctx->gsi_qp_mode = BNXT_RE_GSI_MODE_UD;
2071acd884deSSumit Saxena 	}
2072acd884deSSumit Saxena out:
2073acd884deSSumit Saxena 	return qptype;
2074acd884deSSumit Saxena }
2075acd884deSSumit Saxena 
bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev * rdev)2076acd884deSSumit Saxena static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev)
2077acd884deSSumit Saxena {
2078acd884deSSumit Saxena 	return rdev->chip_ctx->modes.wqe_mode;
2079acd884deSSumit Saxena }
2080acd884deSSumit Saxena 
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)2081acd884deSSumit Saxena static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
2082acd884deSSumit Saxena 				struct ib_qp_init_attr *init_attr,
2083acd884deSSumit Saxena 				struct ib_udata *udata)
2084acd884deSSumit Saxena {
2085acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
2086acd884deSSumit Saxena 	struct bnxt_re_ucontext *cntx = NULL;
2087acd884deSSumit Saxena 	struct ib_ucontext *context;
2088acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
2089acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2090acd884deSSumit Saxena 	struct bnxt_re_cq *cq;
2091acd884deSSumit Saxena 	int rc = 0, qptype;
2092acd884deSSumit Saxena 
2093acd884deSSumit Saxena 	rdev = qp->rdev;
2094acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
2095acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
2096acd884deSSumit Saxena 
2097acd884deSSumit Saxena 	if (udata) {
2098acd884deSSumit Saxena 		context = pd->ibpd.uobject->context;
2099acd884deSSumit Saxena 		cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
2100acd884deSSumit Saxena 	}
2101acd884deSSumit Saxena 
2102acd884deSSumit Saxena 	/* Setup misc params */
2103acd884deSSumit Saxena 	qplqp->is_user = false;
2104acd884deSSumit Saxena 	qplqp->pd = &pd->qplib_pd;
2105acd884deSSumit Saxena 	qplqp->qp_handle = (u64)qplqp;
2106acd884deSSumit Saxena 	qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
2107acd884deSSumit Saxena 			    true : false);
2108acd884deSSumit Saxena 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
2109acd884deSSumit Saxena 	if (qptype < 0) {
2110acd884deSSumit Saxena 		rc = qptype;
2111acd884deSSumit Saxena 		goto out;
2112acd884deSSumit Saxena 	}
2113acd884deSSumit Saxena 	qplqp->type = (u8)qptype;
2114acd884deSSumit Saxena 	qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
2115acd884deSSumit Saxena 	ether_addr_copy(qplqp->smac, rdev->dev_addr);
2116acd884deSSumit Saxena 
2117acd884deSSumit Saxena 	if (init_attr->qp_type == IB_QPT_RC) {
2118acd884deSSumit Saxena 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
2119acd884deSSumit Saxena 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
2120acd884deSSumit Saxena 	}
2121*84d7ec4cSJustin Hibbits 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(if_getmtu(rdev->netdev)));
2122acd884deSSumit Saxena 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
2123acd884deSSumit Saxena 	if (init_attr->create_flags) {
2124acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev),
2125acd884deSSumit Saxena 			"QP create flags 0x%x not supported\n",
2126acd884deSSumit Saxena 			init_attr->create_flags);
2127acd884deSSumit Saxena 		return -EOPNOTSUPP;
2128acd884deSSumit Saxena 	}
2129acd884deSSumit Saxena 
2130acd884deSSumit Saxena 	/* Setup CQs */
2131acd884deSSumit Saxena 	if (init_attr->send_cq) {
2132acd884deSSumit Saxena 		cq = to_bnxt_re(init_attr->send_cq, struct bnxt_re_cq, ibcq);
2133acd884deSSumit Saxena 		if (!cq) {
2134acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Send CQ not found\n");
2135acd884deSSumit Saxena 			rc = -EINVAL;
2136acd884deSSumit Saxena 			goto out;
2137acd884deSSumit Saxena 		}
2138acd884deSSumit Saxena 		qplqp->scq = &cq->qplib_cq;
2139acd884deSSumit Saxena 		qp->scq = cq;
2140acd884deSSumit Saxena 	}
2141acd884deSSumit Saxena 
2142acd884deSSumit Saxena 	if (init_attr->recv_cq) {
2143acd884deSSumit Saxena 		cq = to_bnxt_re(init_attr->recv_cq, struct bnxt_re_cq, ibcq);
2144acd884deSSumit Saxena 		if (!cq) {
2145acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Receive CQ not found\n");
2146acd884deSSumit Saxena 			rc = -EINVAL;
2147acd884deSSumit Saxena 			goto out;
2148acd884deSSumit Saxena 		}
2149acd884deSSumit Saxena 		qplqp->rcq = &cq->qplib_cq;
2150acd884deSSumit Saxena 		qp->rcq = cq;
2151acd884deSSumit Saxena 	}
2152acd884deSSumit Saxena 
2153acd884deSSumit Saxena 	/* Setup RQ/SRQ */
2154acd884deSSumit Saxena 	rc = bnxt_re_init_rq_attr(qp, init_attr, cntx);
2155acd884deSSumit Saxena 	if (rc)
2156acd884deSSumit Saxena 		goto out;
2157acd884deSSumit Saxena 	if (init_attr->qp_type == IB_QPT_GSI)
2158acd884deSSumit Saxena 		bnxt_re_adjust_gsi_rq_attr(qp);
2159acd884deSSumit Saxena 
2160acd884deSSumit Saxena 	/* Setup SQ */
2161acd884deSSumit Saxena 	rc = bnxt_re_init_sq_attr(qp, init_attr, cntx);
2162acd884deSSumit Saxena 	if (rc)
2163acd884deSSumit Saxena 		goto out;
2164acd884deSSumit Saxena 	if (init_attr->qp_type == IB_QPT_GSI)
2165acd884deSSumit Saxena 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, cntx);
2166acd884deSSumit Saxena 
2167acd884deSSumit Saxena 	if (udata) /* This will update DPI and qp_handle */
2168acd884deSSumit Saxena 		rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
2169acd884deSSumit Saxena out:
2170acd884deSSumit Saxena 	return rc;
2171acd884deSSumit Saxena }
2172acd884deSSumit Saxena 
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)2173acd884deSSumit Saxena static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
2174acd884deSSumit Saxena 				     struct bnxt_re_pd *pd)
2175acd884deSSumit Saxena {
2176acd884deSSumit Saxena 	struct bnxt_re_sqp_entries *sqp_tbl = NULL;
2177acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2178acd884deSSumit Saxena 	struct bnxt_re_qp *sqp;
2179acd884deSSumit Saxena 	struct bnxt_re_ah *sah;
2180acd884deSSumit Saxena 	int rc = 0;
2181acd884deSSumit Saxena 
2182acd884deSSumit Saxena 	rdev = qp->rdev;
2183acd884deSSumit Saxena 	/* Create a shadow QP to handle the QP1 traffic */
2184acd884deSSumit Saxena 	sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
2185acd884deSSumit Saxena 			  GFP_KERNEL);
2186acd884deSSumit Saxena 	if (!sqp_tbl)
2187acd884deSSumit Saxena 		return -ENOMEM;
2188acd884deSSumit Saxena 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
2189acd884deSSumit Saxena 
2190acd884deSSumit Saxena 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
2191acd884deSSumit Saxena 	if (!sqp) {
2192acd884deSSumit Saxena 		rc = -ENODEV;
2193acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
2194acd884deSSumit Saxena 			"Failed to create Shadow QP for QP1\n");
2195acd884deSSumit Saxena 		goto out;
2196acd884deSSumit Saxena 	}
2197acd884deSSumit Saxena 	rdev->gsi_ctx.gsi_sqp = sqp;
2198acd884deSSumit Saxena 
2199acd884deSSumit Saxena 	sqp->rcq = qp->rcq;
2200acd884deSSumit Saxena 	sqp->scq = qp->scq;
2201acd884deSSumit Saxena 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
2202acd884deSSumit Saxena 			&qp->qplib_qp);
2203acd884deSSumit Saxena 	if (!sah) {
2204acd884deSSumit Saxena 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
2205acd884deSSumit Saxena 				&sqp->qplib_qp);
2206acd884deSSumit Saxena 		rc = -ENODEV;
2207acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
2208acd884deSSumit Saxena 				"Failed to create AH entry for ShadowQP\n");
2209acd884deSSumit Saxena 		goto out;
2210acd884deSSumit Saxena 	}
2211acd884deSSumit Saxena 	rdev->gsi_ctx.gsi_sah = sah;
2212acd884deSSumit Saxena 
2213acd884deSSumit Saxena 	return 0;
2214acd884deSSumit Saxena out:
2215acd884deSSumit Saxena 	kfree(sqp_tbl);
2216acd884deSSumit Saxena 	return rc;
2217acd884deSSumit Saxena }
2218acd884deSSumit Saxena 
__get_rq_hdr_buf_size(u8 gsi_mode)2219acd884deSSumit Saxena static int __get_rq_hdr_buf_size(u8 gsi_mode)
2220acd884deSSumit Saxena {
2221acd884deSSumit Saxena 	return (gsi_mode == BNXT_RE_GSI_MODE_ALL) ?
2222acd884deSSumit Saxena 		BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 :
2223acd884deSSumit Saxena 		BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE;
2224acd884deSSumit Saxena }
2225acd884deSSumit Saxena 
__get_sq_hdr_buf_size(u8 gsi_mode)2226acd884deSSumit Saxena static int __get_sq_hdr_buf_size(u8 gsi_mode)
2227acd884deSSumit Saxena {
2228acd884deSSumit Saxena 	return (gsi_mode != BNXT_RE_GSI_MODE_ROCE_V1) ?
2229acd884deSSumit Saxena 		BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 :
2230acd884deSSumit Saxena 		BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE;
2231acd884deSSumit Saxena }
2232acd884deSSumit Saxena 
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)2233acd884deSSumit Saxena static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd)
2234acd884deSSumit Saxena {
2235acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplqp;
2236acd884deSSumit Saxena 	struct bnxt_qplib_res *res;
2237acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2238acd884deSSumit Saxena 	u32 sstep, rstep;
2239acd884deSSumit Saxena 	u8 gsi_mode;
2240acd884deSSumit Saxena 	int rc = 0;
2241acd884deSSumit Saxena 
2242acd884deSSumit Saxena 	rdev = qp->rdev;
2243acd884deSSumit Saxena 	qplqp = &qp->qplib_qp;
2244acd884deSSumit Saxena 	res = &rdev->qplib_res;
2245acd884deSSumit Saxena 	gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
2246acd884deSSumit Saxena 
2247acd884deSSumit Saxena 	rstep = __get_rq_hdr_buf_size(gsi_mode);
2248acd884deSSumit Saxena 	sstep = __get_sq_hdr_buf_size(gsi_mode);
2249acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_hdr_buf(res, qplqp, sstep, rstep);
2250acd884deSSumit Saxena 	if (rc)
2251acd884deSSumit Saxena 		goto out;
2252acd884deSSumit Saxena 
2253acd884deSSumit Saxena 	rc = bnxt_qplib_create_qp1(res, qplqp);
2254acd884deSSumit Saxena 	if (rc) {
2255acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "create HW QP1 failed!\n");
2256acd884deSSumit Saxena 		goto out;
2257acd884deSSumit Saxena 	}
2258acd884deSSumit Saxena 
2259acd884deSSumit Saxena 	if (gsi_mode == BNXT_RE_GSI_MODE_ALL)
2260acd884deSSumit Saxena 		rc = bnxt_re_create_shadow_gsi(qp, pd);
2261acd884deSSumit Saxena out:
2262acd884deSSumit Saxena 	return rc;
2263acd884deSSumit Saxena }
2264acd884deSSumit Saxena 
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)2265acd884deSSumit Saxena static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
2266acd884deSSumit Saxena 				   struct ib_qp_init_attr *init_attr,
2267acd884deSSumit Saxena 				   struct bnxt_qplib_dev_attr *dev_attr)
2268acd884deSSumit Saxena {
2269acd884deSSumit Saxena 	bool rc = true;
2270acd884deSSumit Saxena 	int ilsize;
2271acd884deSSumit Saxena 
2272acd884deSSumit Saxena 	ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge));
2273acd884deSSumit Saxena 	if ((init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
2274acd884deSSumit Saxena 	    (init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
2275acd884deSSumit Saxena 	    (init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
2276acd884deSSumit Saxena 	    (init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
2277acd884deSSumit Saxena 	    (ilsize > dev_attr->max_inline_data)) {
2278acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! "
2279acd884deSSumit Saxena 			"0x%x/0x%x 0x%x/0x%x 0x%x/0x%x "
2280acd884deSSumit Saxena 			"0x%x/0x%x 0x%x/0x%x\n",
2281acd884deSSumit Saxena 			init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
2282acd884deSSumit Saxena 			init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
2283acd884deSSumit Saxena 			init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
2284acd884deSSumit Saxena 			init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
2285acd884deSSumit Saxena 			init_attr->cap.max_inline_data,
2286acd884deSSumit Saxena 			dev_attr->max_inline_data);
2287acd884deSSumit Saxena 		rc = false;
2288acd884deSSumit Saxena 	}
2289acd884deSSumit Saxena 	return rc;
2290acd884deSSumit Saxena }
2291acd884deSSumit Saxena 
2292acd884deSSumit Saxena static inline struct
__get_qp_from_qp_in(struct ib_pd * qp_in,struct bnxt_re_dev * rdev)2293acd884deSSumit Saxena bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
2294acd884deSSumit Saxena 				struct bnxt_re_dev *rdev)
2295acd884deSSumit Saxena {
2296acd884deSSumit Saxena 	struct bnxt_re_qp *qp;
2297acd884deSSumit Saxena 
2298acd884deSSumit Saxena 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2299acd884deSSumit Saxena 	if (!qp)
2300acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n");
2301acd884deSSumit Saxena 	return qp;
2302acd884deSSumit Saxena }
2303acd884deSSumit Saxena 
bnxt_re_create_qp(struct ib_pd * qp_in,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)2304acd884deSSumit Saxena struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
2305acd884deSSumit Saxena 			       struct ib_qp_init_attr *qp_init_attr,
2306acd884deSSumit Saxena 			       struct ib_udata *udata)
2307acd884deSSumit Saxena {
2308acd884deSSumit Saxena 	struct bnxt_re_pd *pd;
2309acd884deSSumit Saxena 	struct ib_pd *ib_pd = qp_in;
2310acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
2311acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2312acd884deSSumit Saxena 	u32 active_qps, tmp_qps;
2313acd884deSSumit Saxena 	struct bnxt_re_qp *qp;
2314acd884deSSumit Saxena 	int rc;
2315acd884deSSumit Saxena 
2316acd884deSSumit Saxena 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
2317acd884deSSumit Saxena 	rdev = pd->rdev;
2318acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
2319acd884deSSumit Saxena 	if (rdev->mod_exit) {
2320acd884deSSumit Saxena 		rc = -EIO;
2321acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
2322acd884deSSumit Saxena 		goto exit;
2323acd884deSSumit Saxena 	}
2324acd884deSSumit Saxena 
2325acd884deSSumit Saxena 	if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) {
2326acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n",
2327acd884deSSumit Saxena 			atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp);
2328acd884deSSumit Saxena 		rc = -EINVAL;
2329acd884deSSumit Saxena 		goto exit;
2330acd884deSSumit Saxena 	}
2331acd884deSSumit Saxena 
2332acd884deSSumit Saxena 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
2333acd884deSSumit Saxena 	if (!rc) {
2334acd884deSSumit Saxena 		rc = -EINVAL;
2335acd884deSSumit Saxena 		goto exit;
2336acd884deSSumit Saxena 	}
2337acd884deSSumit Saxena 	qp = __get_qp_from_qp_in(qp_in, rdev);
2338acd884deSSumit Saxena 	if (!qp) {
2339acd884deSSumit Saxena 		rc = -ENOMEM;
2340acd884deSSumit Saxena 		goto exit;
2341acd884deSSumit Saxena 	}
2342acd884deSSumit Saxena 	qp->rdev = rdev;
2343acd884deSSumit Saxena 
2344acd884deSSumit Saxena 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
2345acd884deSSumit Saxena 	if (rc)
2346acd884deSSumit Saxena 		goto fail;
2347acd884deSSumit Saxena 
2348acd884deSSumit Saxena 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
2349acd884deSSumit Saxena 	    !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
2350acd884deSSumit Saxena 		rc = bnxt_re_create_gsi_qp(qp, pd);
2351acd884deSSumit Saxena 		if (rc == -ENODEV)
2352acd884deSSumit Saxena 			goto qp_destroy;
2353acd884deSSumit Saxena 		if (rc)
2354acd884deSSumit Saxena 			goto fail;
2355acd884deSSumit Saxena 	} else {
2356acd884deSSumit Saxena 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
2357acd884deSSumit Saxena 		if (rc) {
2358acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
2359acd884deSSumit Saxena 			goto free_umem;
2360acd884deSSumit Saxena 		}
2361acd884deSSumit Saxena 
2362acd884deSSumit Saxena 		if (udata) {
2363acd884deSSumit Saxena 			struct bnxt_re_qp_resp resp;
2364acd884deSSumit Saxena 
2365acd884deSSumit Saxena 			resp.qpid = qp->qplib_qp.id;
2366acd884deSSumit Saxena 			rc = bnxt_re_copy_to_udata(rdev, &resp,
2367acd884deSSumit Saxena 						   min(udata->outlen, sizeof(resp)),
2368acd884deSSumit Saxena 						   udata);
2369acd884deSSumit Saxena 			if (rc)
2370acd884deSSumit Saxena 				goto qp_destroy;
2371acd884deSSumit Saxena 		}
2372acd884deSSumit Saxena 	}
2373acd884deSSumit Saxena 
2374acd884deSSumit Saxena 	qp->ib_qp.qp_num = qp->qplib_qp.id;
2375acd884deSSumit Saxena 	if (qp_init_attr->qp_type == IB_QPT_GSI)
2376acd884deSSumit Saxena 		rdev->gsi_ctx.gsi_qp = qp;
2377acd884deSSumit Saxena 	spin_lock_init(&qp->sq_lock);
2378acd884deSSumit Saxena 	spin_lock_init(&qp->rq_lock);
2379acd884deSSumit Saxena 	INIT_LIST_HEAD(&qp->list);
2380acd884deSSumit Saxena 	mutex_lock(&rdev->qp_lock);
2381acd884deSSumit Saxena 	list_add_tail(&qp->list, &rdev->qp_list);
2382acd884deSSumit Saxena 	mutex_unlock(&rdev->qp_lock);
2383acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.qp_count);
2384acd884deSSumit Saxena 	active_qps = atomic_read(&rdev->stats.rsors.qp_count);
2385acd884deSSumit Saxena 	if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count))
2386acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_qp_count, active_qps);
2387acd884deSSumit Saxena 
2388acd884deSSumit Saxena 	bnxt_re_dump_debug_stats(rdev, active_qps);
2389acd884deSSumit Saxena 
2390acd884deSSumit Saxena 	/* Get the counters for RC QPs and UD QPs */
2391acd884deSSumit Saxena 	if (qp_init_attr->qp_type == IB_QPT_RC) {
2392acd884deSSumit Saxena 		tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count);
2393acd884deSSumit Saxena 		if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count))
2394acd884deSSumit Saxena 			atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps);
2395acd884deSSumit Saxena 	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
2396acd884deSSumit Saxena 		tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count);
2397acd884deSSumit Saxena 		if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count))
2398acd884deSSumit Saxena 			atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps);
2399acd884deSSumit Saxena 	}
2400acd884deSSumit Saxena 
2401acd884deSSumit Saxena 	return &qp->ib_qp;
2402acd884deSSumit Saxena 
2403acd884deSSumit Saxena qp_destroy:
2404acd884deSSumit Saxena 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
2405acd884deSSumit Saxena free_umem:
2406acd884deSSumit Saxena 	if (udata) {
2407acd884deSSumit Saxena 		if (qp->rumem && !IS_ERR(qp->rumem))
2408acd884deSSumit Saxena 			ib_umem_release(qp->rumem);
2409acd884deSSumit Saxena 		if (qp->sumem && !IS_ERR(qp->sumem))
2410acd884deSSumit Saxena 			ib_umem_release(qp->sumem);
2411acd884deSSumit Saxena 	}
2412acd884deSSumit Saxena fail:
2413acd884deSSumit Saxena 	kfree(qp);
2414acd884deSSumit Saxena exit:
2415acd884deSSumit Saxena 	return ERR_PTR(rc);
2416acd884deSSumit Saxena }
2417acd884deSSumit Saxena 
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)2418acd884deSSumit Saxena static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
2419acd884deSSumit Saxena 			     struct bnxt_re_qp *qp1_qp,
2420acd884deSSumit Saxena 			     int qp_attr_mask)
2421acd884deSSumit Saxena {
2422acd884deSSumit Saxena 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2423acd884deSSumit Saxena 	int rc = 0;
2424acd884deSSumit Saxena 
2425acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_STATE) {
2426acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2427acd884deSSumit Saxena 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
2428acd884deSSumit Saxena 	}
2429acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2430acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2431acd884deSSumit Saxena 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
2432acd884deSSumit Saxena 	}
2433acd884deSSumit Saxena 
2434acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_QKEY) {
2435acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2436acd884deSSumit Saxena 		/* Using a Random  QKEY */
2437acd884deSSumit Saxena 		qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY;
2438acd884deSSumit Saxena 	}
2439acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2440acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2441acd884deSSumit Saxena 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
2442acd884deSSumit Saxena 	}
2443acd884deSSumit Saxena 
2444acd884deSSumit Saxena 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2445acd884deSSumit Saxena 	if (rc)
2446acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n");
2447acd884deSSumit Saxena 	return rc;
2448acd884deSSumit Saxena }
2449acd884deSSumit Saxena 
ipv4_from_gid(u8 * gid)2450acd884deSSumit Saxena static u32 ipv4_from_gid(u8 *gid)
2451acd884deSSumit Saxena {
2452acd884deSSumit Saxena 	return (gid[15] << 24 | gid[14] << 16 | gid[13] << 8 | gid[12]);
2453acd884deSSumit Saxena }
2454acd884deSSumit Saxena 
get_source_port(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)2455acd884deSSumit Saxena static u16 get_source_port(struct bnxt_re_dev *rdev,
2456acd884deSSumit Saxena 			   struct bnxt_re_qp *qp)
2457acd884deSSumit Saxena {
2458acd884deSSumit Saxena 	u8 ip_off, data[48], smac[ETH_ALEN];
2459acd884deSSumit Saxena 	u16 crc = 0, buf_len = 0, i;
2460acd884deSSumit Saxena 	u8 addr_len;
2461acd884deSSumit Saxena 	u32 qpn;
2462acd884deSSumit Saxena 
2463acd884deSSumit Saxena 	if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) {
2464acd884deSSumit Saxena 		addr_len = 6;
2465acd884deSSumit Saxena 		ip_off = 10;
2466acd884deSSumit Saxena 	} else {
2467acd884deSSumit Saxena 		addr_len = 4;
2468acd884deSSumit Saxena 		ip_off = 12;
2469acd884deSSumit Saxena 	}
2470acd884deSSumit Saxena 
2471acd884deSSumit Saxena 	memcpy(smac, qp->qplib_qp.smac, ETH_ALEN);
2472acd884deSSumit Saxena 
2473acd884deSSumit Saxena 	memset(data, 0, 48);
2474acd884deSSumit Saxena 	memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN);
2475acd884deSSumit Saxena 	buf_len += ETH_ALEN;
2476acd884deSSumit Saxena 
2477acd884deSSumit Saxena 	memcpy(data + buf_len, smac, ETH_ALEN);
2478acd884deSSumit Saxena 	buf_len += ETH_ALEN;
2479acd884deSSumit Saxena 
2480acd884deSSumit Saxena 	memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len);
2481acd884deSSumit Saxena 	buf_len += addr_len;
2482acd884deSSumit Saxena 
2483acd884deSSumit Saxena 	memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len);
2484acd884deSSumit Saxena 	buf_len += addr_len;
2485acd884deSSumit Saxena 
2486acd884deSSumit Saxena 	qpn = htonl(qp->qplib_qp.dest_qpn);
2487acd884deSSumit Saxena 	memcpy(data + buf_len, (u8 *)&qpn + 1, 3);
2488acd884deSSumit Saxena 	buf_len += 3;
2489acd884deSSumit Saxena 
2490acd884deSSumit Saxena 	for (i = 0; i < buf_len; i++)
2491acd884deSSumit Saxena 		crc = crc16(crc, (data + i), 1);
2492acd884deSSumit Saxena 
2493acd884deSSumit Saxena 	return crc;
2494acd884deSSumit Saxena }
2495acd884deSSumit Saxena 
bnxt_re_update_qp_info(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)2496acd884deSSumit Saxena static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
2497acd884deSSumit Saxena {
2498acd884deSSumit Saxena 	u16 type;
2499acd884deSSumit Saxena 
2500acd884deSSumit Saxena 	type = __from_hw_to_ib_qp_type(qp->qplib_qp.type);
2501acd884deSSumit Saxena 
2502acd884deSSumit Saxena 	/* User-space can extract ip address with sgid_index. */
2503acd884deSSumit Saxena 	if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) {
2504acd884deSSumit Saxena 		qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw);
2505acd884deSSumit Saxena 		qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data);
2506acd884deSSumit Saxena 	} else {
2507acd884deSSumit Saxena 		memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw,
2508acd884deSSumit Saxena 		       sizeof(qp->qp_info_entry.s_ip.ipv6_addr));
2509acd884deSSumit Saxena 		memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data,
2510acd884deSSumit Saxena 		       sizeof(qp->qp_info_entry.d_ip.ipv6_addr));
2511acd884deSSumit Saxena 	}
2512acd884deSSumit Saxena 
2513acd884deSSumit Saxena 	if (type == IB_QPT_RC &&
2514acd884deSSumit Saxena 	    (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 ||
2515acd884deSSumit Saxena 	     qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) {
2516acd884deSSumit Saxena 		qp->qp_info_entry.s_port = get_source_port(rdev, qp);
2517acd884deSSumit Saxena 	}
2518acd884deSSumit Saxena 	qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT;
2519acd884deSSumit Saxena }
2520acd884deSSumit Saxena 
bnxt_qplib_manage_flush_qp(struct bnxt_re_qp * qp)2521acd884deSSumit Saxena static void bnxt_qplib_manage_flush_qp(struct bnxt_re_qp *qp)
2522acd884deSSumit Saxena {
2523acd884deSSumit Saxena 	struct bnxt_qplib_q *rq, *sq;
2524acd884deSSumit Saxena 	unsigned long flags;
2525acd884deSSumit Saxena 
2526acd884deSSumit Saxena 	if (qp->sumem)
2527acd884deSSumit Saxena 		return;
2528acd884deSSumit Saxena 
2529acd884deSSumit Saxena 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2530acd884deSSumit Saxena 		rq = &qp->qplib_qp.rq;
2531acd884deSSumit Saxena 		sq = &qp->qplib_qp.sq;
2532acd884deSSumit Saxena 
2533acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(qp->rdev),
2534acd884deSSumit Saxena 			"Move QP = %p to flush list\n", qp);
2535acd884deSSumit Saxena 		flags = bnxt_re_lock_cqs(qp);
2536acd884deSSumit Saxena 		bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2537acd884deSSumit Saxena 		bnxt_re_unlock_cqs(qp, flags);
2538acd884deSSumit Saxena 
2539acd884deSSumit Saxena 		if (sq->hwq.prod != sq->hwq.cons)
2540acd884deSSumit Saxena 			bnxt_re_handle_cqn(&qp->scq->qplib_cq);
2541acd884deSSumit Saxena 
2542acd884deSSumit Saxena 		if (qp->rcq && (qp->rcq != qp->scq) &&
2543acd884deSSumit Saxena 		    (rq->hwq.prod != rq->hwq.cons))
2544acd884deSSumit Saxena 			bnxt_re_handle_cqn(&qp->rcq->qplib_cq);
2545acd884deSSumit Saxena 	}
2546acd884deSSumit Saxena 
2547acd884deSSumit Saxena 	if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2548acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(qp->rdev),
2549acd884deSSumit Saxena 			"Move QP = %p out of flush list\n", qp);
2550acd884deSSumit Saxena 		flags = bnxt_re_lock_cqs(qp);
2551acd884deSSumit Saxena 		bnxt_qplib_clean_qp(&qp->qplib_qp);
2552acd884deSSumit Saxena 		bnxt_re_unlock_cqs(qp, flags);
2553acd884deSSumit Saxena 	}
2554acd884deSSumit Saxena }
2555acd884deSSumit Saxena 
ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_type type,enum ib_qp_attr_mask mask)2556acd884deSSumit Saxena bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state,
2557acd884deSSumit Saxena 			       enum ib_qp_state next_state,
2558acd884deSSumit Saxena 			       enum ib_qp_type type,
2559acd884deSSumit Saxena 			       enum ib_qp_attr_mask mask)
2560acd884deSSumit Saxena {
2561acd884deSSumit Saxena 		return (ib_modify_qp_is_ok(cur_state, next_state,
2562acd884deSSumit Saxena 					   type, mask));
2563acd884deSSumit Saxena }
2564acd884deSSumit Saxena 
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)2565acd884deSSumit Saxena int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2566acd884deSSumit Saxena 		      int qp_attr_mask, struct ib_udata *udata)
2567acd884deSSumit Saxena {
2568acd884deSSumit Saxena 	enum ib_qp_state curr_qp_state, new_qp_state;
2569acd884deSSumit Saxena 	struct bnxt_re_modify_qp_ex_resp resp = {};
2570acd884deSSumit Saxena 	struct bnxt_re_modify_qp_ex_req ureq = {};
2571acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
2572acd884deSSumit Saxena 	struct bnxt_qplib_ppp *ppp = NULL;
2573acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
2574acd884deSSumit Saxena 	struct bnxt_re_qp *qp;
2575acd884deSSumit Saxena 	struct ib_gid_attr *sgid_attr;
2576acd884deSSumit Saxena 	struct ib_gid_attr gid_attr;
2577acd884deSSumit Saxena 	union ib_gid sgid, *gid_ptr = NULL;
2578acd884deSSumit Saxena 	u8 nw_type;
2579acd884deSSumit Saxena 	int rc, entries, status;
2580acd884deSSumit Saxena 	bool is_copy_to_udata = false;
2581acd884deSSumit Saxena 	bool is_qpmtu_high = false;
2582acd884deSSumit Saxena 
2583acd884deSSumit Saxena 	qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
2584acd884deSSumit Saxena 	rdev = qp->rdev;
2585acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
2586acd884deSSumit Saxena 
2587acd884deSSumit Saxena 	qp->qplib_qp.modify_flags = 0;
2588acd884deSSumit Saxena 	ppp = &qp->qplib_qp.ppp;
2589acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_STATE) {
2590acd884deSSumit Saxena 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2591acd884deSSumit Saxena 		new_qp_state = qp_attr->qp_state;
2592acd884deSSumit Saxena 		if (!ib_modify_qp_is_ok_compat(curr_qp_state, new_qp_state,
2593acd884deSSumit Saxena 					       ib_qp->qp_type, qp_attr_mask)) {
2594acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x"
2595acd884deSSumit Saxena 				" specified for qpn=0x%x of type=0x%x"
2596acd884deSSumit Saxena 				" current_qp_state=0x%x, new_qp_state=0x%x\n",
2597acd884deSSumit Saxena 				qp_attr_mask, ib_qp->qp_num, ib_qp->qp_type,
2598acd884deSSumit Saxena 				curr_qp_state, new_qp_state);
2599acd884deSSumit Saxena 			return -EINVAL;
2600acd884deSSumit Saxena 		}
2601acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x "
2602acd884deSSumit Saxena 			"of type=0x%x current_qp_state=0x%x, new_qp_state=0x%x\n",
2603acd884deSSumit Saxena 			__func__, __LINE__, qp_attr_mask, ib_qp->qp_num,
2604acd884deSSumit Saxena 			ib_qp->qp_type, curr_qp_state, new_qp_state);
2605acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2606acd884deSSumit Saxena 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2607acd884deSSumit Saxena 
2608acd884deSSumit Saxena 		if (udata && curr_qp_state == IB_QPS_RESET &&
2609acd884deSSumit Saxena 		    new_qp_state == IB_QPS_INIT) {
2610acd884deSSumit Saxena 			if (!ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
2611acd884deSSumit Saxena 				if (ureq.comp_mask &
2612acd884deSSumit Saxena 				    BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK) {
2613acd884deSSumit Saxena 					ppp->req = BNXT_QPLIB_PPP_REQ;
2614acd884deSSumit Saxena 					ppp->dpi = ureq.dpi;
2615acd884deSSumit Saxena 				}
2616acd884deSSumit Saxena 			}
2617acd884deSSumit Saxena 		}
2618acd884deSSumit Saxena 	}
2619acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2620acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2621acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2622acd884deSSumit Saxena 		qp->qplib_qp.en_sqd_async_notify = true;
2623acd884deSSumit Saxena 	}
2624acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2625acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2626acd884deSSumit Saxena 		qp->qplib_qp.access =
2627acd884deSSumit Saxena 			__from_ib_access_flags(qp_attr->qp_access_flags);
2628acd884deSSumit Saxena 		/* LOCAL_WRITE access must be set to allow RC receive */
2629acd884deSSumit Saxena 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
2630acd884deSSumit Saxena 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
2631acd884deSSumit Saxena 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2632acd884deSSumit Saxena 	}
2633acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2634acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2635acd884deSSumit Saxena 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2636acd884deSSumit Saxena 	}
2637acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_QKEY) {
2638acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2639acd884deSSumit Saxena 		qp->qplib_qp.qkey = qp_attr->qkey;
2640acd884deSSumit Saxena 	}
2641acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_AV) {
2642acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2643acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2644acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2645acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2646acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2647acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2648acd884deSSumit Saxena 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2649acd884deSSumit Saxena 		memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
2650acd884deSSumit Saxena 		       sizeof(qp->qplib_qp.ah.dgid.data));
2651acd884deSSumit Saxena 		qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
2652acd884deSSumit Saxena 		qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev,
2653acd884deSSumit Saxena 						qp_attr->ah_attr.grh.sgid_index);
2654acd884deSSumit Saxena 		qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index;
2655acd884deSSumit Saxena 		qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
2656acd884deSSumit Saxena 		qp->qplib_qp.ah.traffic_class =
2657acd884deSSumit Saxena 					qp_attr->ah_attr.grh.traffic_class;
2658acd884deSSumit Saxena 		qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
2659acd884deSSumit Saxena 		ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr));
2660acd884deSSumit Saxena 		sgid_attr = &gid_attr;
2661acd884deSSumit Saxena 		status = bnxt_re_get_cached_gid(&rdev->ibdev, 1,
2662acd884deSSumit Saxena 						qp_attr->ah_attr.grh.sgid_index,
2663acd884deSSumit Saxena 						&sgid, &sgid_attr,
2664acd884deSSumit Saxena 						&qp_attr->ah_attr.grh, NULL);
2665acd884deSSumit Saxena 		if (!status)
2666acd884deSSumit Saxena 			if_rele(sgid_attr->ndev);
2667acd884deSSumit Saxena 		gid_ptr = &sgid;
2668acd884deSSumit Saxena 		if (sgid_attr->ndev) {
2669acd884deSSumit Saxena 			memcpy(qp->qplib_qp.smac, rdev->dev_addr,
2670acd884deSSumit Saxena 			       ETH_ALEN);
2671acd884deSSumit Saxena 			nw_type = bnxt_re_gid_to_network_type(sgid_attr, &sgid);
2672acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
2673acd884deSSumit Saxena 				 "Connection using the nw_type %d\n", nw_type);
2674acd884deSSumit Saxena 			switch (nw_type) {
2675acd884deSSumit Saxena 			case RDMA_NETWORK_IPV4:
2676acd884deSSumit Saxena 				qp->qplib_qp.nw_type =
2677acd884deSSumit Saxena 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2678acd884deSSumit Saxena 				break;
2679acd884deSSumit Saxena 			case RDMA_NETWORK_IPV6:
2680acd884deSSumit Saxena 				qp->qplib_qp.nw_type =
2681acd884deSSumit Saxena 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2682acd884deSSumit Saxena 				break;
2683acd884deSSumit Saxena 			default:
2684acd884deSSumit Saxena 				qp->qplib_qp.nw_type =
2685acd884deSSumit Saxena 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2686acd884deSSumit Saxena 				break;
2687acd884deSSumit Saxena 			}
2688acd884deSSumit Saxena 		}
2689acd884deSSumit Saxena 		memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid));
2690acd884deSSumit Saxena 	}
2691acd884deSSumit Saxena 
2692acd884deSSumit Saxena 	/* MTU settings allowed only during INIT -> RTR */
2693acd884deSSumit Saxena 	if (qp_attr->qp_state == IB_QPS_RTR) {
2694*84d7ec4cSJustin Hibbits 		bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr,
2695acd884deSSumit Saxena 				   &is_qpmtu_high);
2696acd884deSSumit Saxena 		if (udata && !ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
2697acd884deSSumit Saxena 			if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK) {
2698acd884deSSumit Saxena 				resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK;
2699acd884deSSumit Saxena 				resp.path_mtu = qp->qplib_qp.mtu;
2700acd884deSSumit Saxena 				is_copy_to_udata = true;
2701acd884deSSumit Saxena 			} else if (is_qpmtu_high) {
2702acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n",
2703acd884deSSumit Saxena 					qp->qplib_qp.id);
2704acd884deSSumit Saxena 				return -EINVAL;
2705acd884deSSumit Saxena 			}
2706acd884deSSumit Saxena 		}
2707acd884deSSumit Saxena 	}
2708acd884deSSumit Saxena 
2709acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_TIMEOUT) {
2710acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2711acd884deSSumit Saxena 		qp->qplib_qp.timeout = qp_attr->timeout;
2712acd884deSSumit Saxena 	}
2713acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2714acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2715acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2716acd884deSSumit Saxena 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2717acd884deSSumit Saxena 	}
2718acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2719acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2720acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2721acd884deSSumit Saxena 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2722acd884deSSumit Saxena 	}
2723acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2724acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2725acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2726acd884deSSumit Saxena 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2727acd884deSSumit Saxena 	}
2728acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_RQ_PSN) {
2729acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2730acd884deSSumit Saxena 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2731acd884deSSumit Saxena 	}
2732acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2733acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2734acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2735acd884deSSumit Saxena 		/* Cap the max_rd_atomic to device max */
2736acd884deSSumit Saxena 		if (qp_attr->max_rd_atomic > dev_attr->max_qp_rd_atom)
2737acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(rdev),
2738acd884deSSumit Saxena 				"max_rd_atomic requested %d is > device max %d\n",
2739acd884deSSumit Saxena 				qp_attr->max_rd_atomic,
2740acd884deSSumit Saxena 				dev_attr->max_qp_rd_atom);
2741acd884deSSumit Saxena 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2742acd884deSSumit Saxena 						   dev_attr->max_qp_rd_atom);
2743acd884deSSumit Saxena 	}
2744acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2745acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2746acd884deSSumit Saxena 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2747acd884deSSumit Saxena 	}
2748acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2749acd884deSSumit Saxena 		if (qp_attr->max_dest_rd_atomic >
2750acd884deSSumit Saxena 		    dev_attr->max_qp_init_rd_atom) {
2751acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
2752acd884deSSumit Saxena 				"max_dest_rd_atomic requested %d is > device max %d\n",
2753acd884deSSumit Saxena 				qp_attr->max_dest_rd_atomic,
2754acd884deSSumit Saxena 				dev_attr->max_qp_init_rd_atom);
2755acd884deSSumit Saxena 			return -EINVAL;
2756acd884deSSumit Saxena 		}
2757acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2758acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2759acd884deSSumit Saxena 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2760acd884deSSumit Saxena 	}
2761acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_CAP) {
2762acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2763acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2764acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2765acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2766acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2767acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2768acd884deSSumit Saxena 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2769acd884deSSumit Saxena 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2770acd884deSSumit Saxena 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2771acd884deSSumit Saxena 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2772acd884deSSumit Saxena 		    (qp_attr->cap.max_inline_data >=
2773acd884deSSumit Saxena 						dev_attr->max_inline_data)) {
2774acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
2775acd884deSSumit Saxena 				"Create QP failed - max exceeded\n");
2776acd884deSSumit Saxena 			return -EINVAL;
2777acd884deSSumit Saxena 		}
2778acd884deSSumit Saxena 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2779acd884deSSumit Saxena 		if (entries > dev_attr->max_qp_wqes)
2780acd884deSSumit Saxena 			entries = dev_attr->max_qp_wqes;
2781acd884deSSumit Saxena 		entries = min_t(u32, entries, dev_attr->max_qp_wqes);
2782acd884deSSumit Saxena 		qp->qplib_qp.sq.max_wqe = entries;
2783acd884deSSumit Saxena 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2784acd884deSSumit Saxena 						qp_attr->cap.max_send_wr;
2785acd884deSSumit Saxena 		/*
2786acd884deSSumit Saxena 		 * Reserving one slot for Phantom WQE. Some application can
2787acd884deSSumit Saxena 		 * post one extra entry in this case. Allowing this to avoid
2788acd884deSSumit Saxena 		 * unexpected Queue full condition
2789acd884deSSumit Saxena 		 */
2790acd884deSSumit Saxena 		qp->qplib_qp.sq.q_full_delta -= 1;
2791acd884deSSumit Saxena 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2792acd884deSSumit Saxena 		if (qp->qplib_qp.rq.max_wqe) {
2793acd884deSSumit Saxena 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2794acd884deSSumit Saxena 			if (entries > dev_attr->max_qp_wqes)
2795acd884deSSumit Saxena 				entries = dev_attr->max_qp_wqes;
2796acd884deSSumit Saxena 			qp->qplib_qp.rq.max_wqe = entries;
2797acd884deSSumit Saxena 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2798acd884deSSumit Saxena 						       qp_attr->cap.max_recv_wr;
2799acd884deSSumit Saxena 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2800acd884deSSumit Saxena 		} else {
2801acd884deSSumit Saxena 			/* SRQ was used prior, just ignore the RQ caps */
2802acd884deSSumit Saxena 		}
2803acd884deSSumit Saxena 	}
2804acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2805acd884deSSumit Saxena 		qp->qplib_qp.modify_flags |=
2806acd884deSSumit Saxena 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2807acd884deSSumit Saxena 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2808acd884deSSumit Saxena 	}
2809acd884deSSumit Saxena 
2810acd884deSSumit Saxena 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2811acd884deSSumit Saxena 	if (rc) {
2812acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n");
2813acd884deSSumit Saxena 		return rc;
2814acd884deSSumit Saxena 	}
2815acd884deSSumit Saxena 	if (qp_attr_mask & IB_QP_STATE)
2816acd884deSSumit Saxena 		bnxt_qplib_manage_flush_qp(qp);
2817acd884deSSumit Saxena 	if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK &&
2818acd884deSSumit Saxena 	    ppp->st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) {
2819acd884deSSumit Saxena 		resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN;
2820acd884deSSumit Saxena 		resp.ppp_st_idx = ppp->st_idx_en >>
2821acd884deSSumit Saxena 				  BNXT_QPLIB_PPP_ST_IDX_SHIFT;
2822acd884deSSumit Saxena 		is_copy_to_udata = true;
2823acd884deSSumit Saxena 	}
2824acd884deSSumit Saxena 
2825acd884deSSumit Saxena 	if (is_copy_to_udata) {
2826acd884deSSumit Saxena 		rc = bnxt_re_copy_to_udata(rdev, &resp,
2827acd884deSSumit Saxena 					   min(udata->outlen, sizeof(resp)),
2828acd884deSSumit Saxena 					   udata);
2829acd884deSSumit Saxena 		if (rc)
2830acd884deSSumit Saxena 			return rc;
2831acd884deSSumit Saxena 	}
2832acd884deSSumit Saxena 
2833acd884deSSumit Saxena 	if (ib_qp->qp_type == IB_QPT_GSI &&
2834acd884deSSumit Saxena 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
2835acd884deSSumit Saxena 	    rdev->gsi_ctx.gsi_sqp)
2836acd884deSSumit Saxena 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2837acd884deSSumit Saxena 	/*
2838acd884deSSumit Saxena 	 * Update info when qp_info_info
2839acd884deSSumit Saxena 	 */
2840acd884deSSumit Saxena 	bnxt_re_update_qp_info(rdev, qp);
2841acd884deSSumit Saxena 	return rc;
2842acd884deSSumit Saxena }
2843acd884deSSumit Saxena 
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2844acd884deSSumit Saxena int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2845acd884deSSumit Saxena 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2846acd884deSSumit Saxena {
2847acd884deSSumit Saxena 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
2848acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = qp->rdev;
2849acd884deSSumit Saxena 	struct bnxt_qplib_qp *qplib_qp;
2850acd884deSSumit Saxena 	int rc;
2851acd884deSSumit Saxena 
2852acd884deSSumit Saxena 	qplib_qp = kcalloc(1, sizeof(*qplib_qp), GFP_KERNEL);
2853acd884deSSumit Saxena 	if (!qplib_qp)
2854acd884deSSumit Saxena 		return -ENOMEM;
2855acd884deSSumit Saxena 
2856acd884deSSumit Saxena 	qplib_qp->id = qp->qplib_qp.id;
2857acd884deSSumit Saxena 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2858acd884deSSumit Saxena 
2859acd884deSSumit Saxena 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2860acd884deSSumit Saxena 	if (rc) {
2861acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n",
2862acd884deSSumit Saxena 			qplib_qp->id, rc);
2863acd884deSSumit Saxena 		goto free_mem;
2864acd884deSSumit Saxena 	}
2865acd884deSSumit Saxena 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2866acd884deSSumit Saxena 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2867acd884deSSumit Saxena 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2868acd884deSSumit Saxena 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2869acd884deSSumit Saxena 	qp_attr->pkey_index = qplib_qp->pkey_index;
2870acd884deSSumit Saxena 	qp_attr->qkey = qplib_qp->qkey;
2871acd884deSSumit Saxena 	memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp->ah.dgid.data,
2872acd884deSSumit Saxena 	       sizeof(qplib_qp->ah.dgid.data));
2873acd884deSSumit Saxena 	qp_attr->ah_attr.grh.flow_label = qplib_qp->ah.flow_label;
2874acd884deSSumit Saxena 	qp_attr->ah_attr.grh.sgid_index = qplib_qp->ah.host_sgid_index;
2875acd884deSSumit Saxena 	qp_attr->ah_attr.grh.hop_limit = qplib_qp->ah.hop_limit;
2876acd884deSSumit Saxena 	qp_attr->ah_attr.grh.traffic_class = qplib_qp->ah.traffic_class;
2877acd884deSSumit Saxena 	qp_attr->ah_attr.sl = qplib_qp->ah.sl;
2878acd884deSSumit Saxena 	ether_addr_copy(ROCE_DMAC(&qp_attr->ah_attr), qplib_qp->ah.dmac);
2879acd884deSSumit Saxena 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2880acd884deSSumit Saxena 	qp_attr->timeout = qplib_qp->timeout;
2881acd884deSSumit Saxena 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2882acd884deSSumit Saxena 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2883acd884deSSumit Saxena 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2884acd884deSSumit Saxena 	qp_attr->rq_psn = qplib_qp->rq.psn;
2885acd884deSSumit Saxena 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2886acd884deSSumit Saxena 	qp_attr->sq_psn = qplib_qp->sq.psn;
2887acd884deSSumit Saxena 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2888acd884deSSumit Saxena 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2889acd884deSSumit Saxena 							IB_SIGNAL_REQ_WR;
2890acd884deSSumit Saxena 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2891acd884deSSumit Saxena 
2892acd884deSSumit Saxena 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2893acd884deSSumit Saxena 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2894acd884deSSumit Saxena 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2895acd884deSSumit Saxena 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2896acd884deSSumit Saxena 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2897acd884deSSumit Saxena 	qp_init_attr->cap = qp_attr->cap;
2898acd884deSSumit Saxena 
2899acd884deSSumit Saxena free_mem:
2900acd884deSSumit Saxena 	kfree(qplib_qp);
2901acd884deSSumit Saxena 	return rc;
2902acd884deSSumit Saxena }
2903acd884deSSumit Saxena 
2904acd884deSSumit Saxena /* Builders */
2905acd884deSSumit Saxena 
2906acd884deSSumit Saxena /* For Raw, the application is responsible to build the entire packet */
bnxt_re_build_raw_send(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2907acd884deSSumit Saxena static void bnxt_re_build_raw_send(const struct ib_send_wr *wr,
2908acd884deSSumit Saxena 				   struct bnxt_qplib_swqe *wqe)
2909acd884deSSumit Saxena {
2910acd884deSSumit Saxena 	switch (wr->send_flags) {
2911acd884deSSumit Saxena 	case IB_SEND_IP_CSUM:
2912acd884deSSumit Saxena 		wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2913acd884deSSumit Saxena 		break;
2914acd884deSSumit Saxena 	default:
2915acd884deSSumit Saxena 		/* Pad HW RoCE iCRC */
2916acd884deSSumit Saxena 		wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2917acd884deSSumit Saxena 		break;
2918acd884deSSumit Saxena 	}
2919acd884deSSumit Saxena }
2920acd884deSSumit Saxena 
2921acd884deSSumit Saxena /* For QP1, the driver must build the entire RoCE (v1/v2) packet hdr
2922acd884deSSumit Saxena  * as according to the sgid and AV
2923acd884deSSumit Saxena  */
bnxt_re_build_qp1_send(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2924acd884deSSumit Saxena static int bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr,
2925acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe, int payload_size)
2926acd884deSSumit Saxena {
2927acd884deSSumit Saxena 	struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah,
2928acd884deSSumit Saxena 					   ibah);
2929acd884deSSumit Saxena 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2930acd884deSSumit Saxena 	struct bnxt_qplib_sge sge;
2931acd884deSSumit Saxena 	int i, rc = 0;
2932acd884deSSumit Saxena 	union ib_gid sgid;
2933acd884deSSumit Saxena 	u16 vlan_id;
2934acd884deSSumit Saxena 	u8 *ptmac;
2935acd884deSSumit Saxena 	void *buf;
2936acd884deSSumit Saxena 
2937acd884deSSumit Saxena 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2938acd884deSSumit Saxena 
2939acd884deSSumit Saxena 	/* Get sgid */
2940acd884deSSumit Saxena 	rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid);
2941acd884deSSumit Saxena 	if (rc)
2942acd884deSSumit Saxena 		return rc;
2943acd884deSSumit Saxena 
2944acd884deSSumit Saxena 	/* ETH */
2945acd884deSSumit Saxena 	qp->qp1_hdr.eth_present = 1;
2946acd884deSSumit Saxena 	ptmac = ah->qplib_ah.dmac;
2947acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4);
2948acd884deSSumit Saxena 	ptmac += 4;
2949acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2);
2950acd884deSSumit Saxena 
2951acd884deSSumit Saxena 	ptmac = qp->qplib_qp.smac;
2952acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2);
2953acd884deSSumit Saxena 	ptmac += 2;
2954acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4);
2955acd884deSSumit Saxena 
2956acd884deSSumit Saxena 	qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1);
2957acd884deSSumit Saxena 
2958acd884deSSumit Saxena 	/* For vlan, check the sgid for vlan existence */
2959acd884deSSumit Saxena 	vlan_id = rdma_get_vlan_id(&sgid);
2960acd884deSSumit Saxena 	if (vlan_id && vlan_id < 0x1000) {
2961acd884deSSumit Saxena 		qp->qp1_hdr.vlan_present = 1;
2962acd884deSSumit Saxena 		qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q);
2963acd884deSSumit Saxena 	}
2964acd884deSSumit Saxena 	/* GRH */
2965acd884deSSumit Saxena 	qp->qp1_hdr.grh_present = 1;
2966acd884deSSumit Saxena 	qp->qp1_hdr.grh.ip_version = 6;
2967acd884deSSumit Saxena 	qp->qp1_hdr.grh.payload_length =
2968acd884deSSumit Saxena 		cpu_to_be16((IB_BTH_BYTES + IB_DETH_BYTES + payload_size + 7)
2969acd884deSSumit Saxena 			    & ~3);
2970acd884deSSumit Saxena 	qp->qp1_hdr.grh.next_header = 0x1b;
2971acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
2972acd884deSSumit Saxena 	memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2973acd884deSSumit Saxena 	       sizeof(sgid));
2974acd884deSSumit Saxena 
2975acd884deSSumit Saxena 	/* BTH */
2976acd884deSSumit Saxena 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2977acd884deSSumit Saxena 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2978acd884deSSumit Saxena 		qp->qp1_hdr.immediate_present = 1;
2979acd884deSSumit Saxena 	} else {
2980acd884deSSumit Saxena 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2981acd884deSSumit Saxena 	}
2982acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SOLICITED)
2983acd884deSSumit Saxena 		qp->qp1_hdr.bth.solicited_event = 1;
2984acd884deSSumit Saxena 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2985acd884deSSumit Saxena 	/* P_key for QP1 is for all members */
2986acd884deSSumit Saxena 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2987acd884deSSumit Saxena 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2988acd884deSSumit Saxena 	qp->qp1_hdr.bth.ack_req = 0;
2989acd884deSSumit Saxena 	qp->send_psn++;
2990acd884deSSumit Saxena 	qp->send_psn &= BTH_PSN_MASK;
2991acd884deSSumit Saxena 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2992acd884deSSumit Saxena 	/* DETH */
2993acd884deSSumit Saxena 	/* Use the priviledged Q_Key for QP1 */
2994acd884deSSumit Saxena 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2995acd884deSSumit Saxena 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2996acd884deSSumit Saxena 
2997acd884deSSumit Saxena 	/* Pack the QP1 to the transmit buffer */
2998acd884deSSumit Saxena 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2999acd884deSSumit Saxena 	if (!buf) {
3000acd884deSSumit Saxena 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3001acd884deSSumit Saxena 		rc = -ENOMEM;
3002acd884deSSumit Saxena 	}
3003acd884deSSumit Saxena 	for (i = wqe->num_sge; i; i--) {
3004acd884deSSumit Saxena 		wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
3005acd884deSSumit Saxena 		wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
3006acd884deSSumit Saxena 		wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
3007acd884deSSumit Saxena 	}
3008acd884deSSumit Saxena 	wqe->sg_list[0].addr = sge.addr;
3009acd884deSSumit Saxena 	wqe->sg_list[0].lkey = sge.lkey;
3010acd884deSSumit Saxena 	wqe->sg_list[0].size = sge.size;
3011acd884deSSumit Saxena 	wqe->num_sge++;
3012acd884deSSumit Saxena 
3013acd884deSSumit Saxena 	return rc;
3014acd884deSSumit Saxena }
3015acd884deSSumit Saxena 
bnxt_re_build_gsi_send(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3016acd884deSSumit Saxena static int bnxt_re_build_gsi_send(struct bnxt_re_qp *qp,
3017acd884deSSumit Saxena 				  const struct ib_send_wr *wr,
3018acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe)
3019acd884deSSumit Saxena {
3020acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
3021acd884deSSumit Saxena 	int rc, indx, len = 0;
3022acd884deSSumit Saxena 
3023acd884deSSumit Saxena 	rdev = qp->rdev;
3024acd884deSSumit Saxena 
3025acd884deSSumit Saxena 	/* Mode UD is applicable to Gen P5 only */
3026acd884deSSumit Saxena 	if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)
3027acd884deSSumit Saxena 		return 0;
3028acd884deSSumit Saxena 
3029acd884deSSumit Saxena 	for (indx = 0; indx < wr->num_sge; indx++) {
3030acd884deSSumit Saxena 		wqe->sg_list[indx].addr = wr->sg_list[indx].addr;
3031acd884deSSumit Saxena 		wqe->sg_list[indx].lkey = wr->sg_list[indx].lkey;
3032acd884deSSumit Saxena 		wqe->sg_list[indx].size = wr->sg_list[indx].length;
3033acd884deSSumit Saxena 		len += wr->sg_list[indx].length;
3034acd884deSSumit Saxena 	}
3035acd884deSSumit Saxena 	rc = bnxt_re_build_qp1_send(qp, wr, wqe, len);
3036acd884deSSumit Saxena 	wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
3037acd884deSSumit Saxena 
3038acd884deSSumit Saxena 	return rc;
3039acd884deSSumit Saxena }
3040acd884deSSumit Saxena 
3041acd884deSSumit Saxena /* For the MAD layer, it only provides the recv SGE the size of
3042acd884deSSumit Saxena    ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
3043acd884deSSumit Saxena    nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
3044acd884deSSumit Saxena    receive packet (334 bytes) with no VLAN and then copy the GRH
3045acd884deSSumit Saxena    and the MAD datagram out to the provided SGE.
3046acd884deSSumit Saxena */
3047acd884deSSumit Saxena 
bnxt_re_build_qp1_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3048acd884deSSumit Saxena static int bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp,
3049acd884deSSumit Saxena 				  const struct ib_recv_wr *wr,
3050acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe)
3051acd884deSSumit Saxena {
3052acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = qp->rdev;
3053acd884deSSumit Saxena 	struct bnxt_qplib_sge ref, sge;
3054acd884deSSumit Saxena 	u8 udp_hdr_size = 0;
3055acd884deSSumit Saxena 	u8 ip_hdr_size = 0;
3056acd884deSSumit Saxena 	int rc = 0;
3057acd884deSSumit Saxena 	int size;
3058acd884deSSumit Saxena 
3059acd884deSSumit Saxena 	if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3060acd884deSSumit Saxena 		/* Create 5 SGEs as according to the following:
3061acd884deSSumit Saxena 		 * Ethernet header (14)
3062acd884deSSumit Saxena 		 * ib_grh (40) - as provided from the wr
3063acd884deSSumit Saxena 		 * ib_bth + ib_deth + UDP(RoCE v2 only)  (28)
3064acd884deSSumit Saxena 		 * MAD (256) - as provided from the wr
3065acd884deSSumit Saxena 		 * iCRC (4)
3066acd884deSSumit Saxena 		 */
3067acd884deSSumit Saxena 
3068acd884deSSumit Saxena 		/* Set RoCE v2 header size and offsets */
3069acd884deSSumit Saxena 		if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4)
3070acd884deSSumit Saxena 			ip_hdr_size = 20;
3071acd884deSSumit Saxena 		if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1)
3072acd884deSSumit Saxena 			udp_hdr_size = 8;
3073acd884deSSumit Saxena 
3074acd884deSSumit Saxena 		/* Save the reference from ULP */
3075acd884deSSumit Saxena 		ref.addr = wr->sg_list[0].addr;
3076acd884deSSumit Saxena 		ref.lkey = wr->sg_list[0].lkey;
3077acd884deSSumit Saxena 		ref.size = wr->sg_list[0].length;
3078acd884deSSumit Saxena 
3079acd884deSSumit Saxena 		/* SGE 1 */
3080acd884deSSumit Saxena 		size = sge.size;
3081acd884deSSumit Saxena 		wqe->sg_list[0].addr = sge.addr;
3082acd884deSSumit Saxena 		wqe->sg_list[0].lkey = sge.lkey;
3083acd884deSSumit Saxena 		wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE;
3084acd884deSSumit Saxena 		size -= wqe->sg_list[0].size;
3085acd884deSSumit Saxena 		if (size <= 0) {
3086acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n");
3087acd884deSSumit Saxena 			rc = -ENOMEM;
3088acd884deSSumit Saxena 			goto done;
3089acd884deSSumit Saxena 		}
3090acd884deSSumit Saxena 		sge.size = (u32)size;
3091acd884deSSumit Saxena 		sge.addr += wqe->sg_list[0].size;
3092acd884deSSumit Saxena 
3093acd884deSSumit Saxena 		/* SGE 2 */
3094acd884deSSumit Saxena 		/* In case of RoCE v2 ipv4 lower 20 bytes should have IP hdr */
3095acd884deSSumit Saxena 		wqe->sg_list[1].addr = ref.addr + ip_hdr_size;
3096acd884deSSumit Saxena 		wqe->sg_list[1].lkey = ref.lkey;
3097acd884deSSumit Saxena 		wqe->sg_list[1].size = sizeof(struct ib_grh) - ip_hdr_size;
3098acd884deSSumit Saxena 		ref.size -= wqe->sg_list[1].size;
3099acd884deSSumit Saxena 		if (ref.size <= 0) {
3100acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3101acd884deSSumit Saxena 				"QP1 ref buffer is empty!\n");
3102acd884deSSumit Saxena 			rc = -ENOMEM;
3103acd884deSSumit Saxena 			goto done;
3104acd884deSSumit Saxena 		}
3105acd884deSSumit Saxena 		ref.addr += wqe->sg_list[1].size + ip_hdr_size;
3106acd884deSSumit Saxena 
3107acd884deSSumit Saxena 		/* SGE 3 */
3108acd884deSSumit Saxena 		wqe->sg_list[2].addr = sge.addr;
3109acd884deSSumit Saxena 		wqe->sg_list[2].lkey = sge.lkey;
3110acd884deSSumit Saxena 		wqe->sg_list[2].size = BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE +
3111acd884deSSumit Saxena 				       udp_hdr_size;
3112acd884deSSumit Saxena 		size -= wqe->sg_list[2].size;
3113acd884deSSumit Saxena 		if (size <= 0) {
3114acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3115acd884deSSumit Saxena 				"QP1 rq buffer is empty!\n");
3116acd884deSSumit Saxena 			rc = -ENOMEM;
3117acd884deSSumit Saxena 			goto done;
3118acd884deSSumit Saxena 		}
3119acd884deSSumit Saxena 		sge.size = (u32)size;
3120acd884deSSumit Saxena 		sge.addr += wqe->sg_list[2].size;
3121acd884deSSumit Saxena 
3122acd884deSSumit Saxena 		/* SGE 4 */
3123acd884deSSumit Saxena 		wqe->sg_list[3].addr = ref.addr;
3124acd884deSSumit Saxena 		wqe->sg_list[3].lkey = ref.lkey;
3125acd884deSSumit Saxena 		wqe->sg_list[3].size = ref.size;
3126acd884deSSumit Saxena 		ref.size -= wqe->sg_list[3].size;
3127acd884deSSumit Saxena 		if (ref.size) {
3128acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3129acd884deSSumit Saxena 				"QP1 ref buffer is incorrect!\n");
3130acd884deSSumit Saxena 			rc = -ENOMEM;
3131acd884deSSumit Saxena 			goto done;
3132acd884deSSumit Saxena 		}
3133acd884deSSumit Saxena 		/* SGE 5 */
3134acd884deSSumit Saxena 		wqe->sg_list[4].addr = sge.addr;
3135acd884deSSumit Saxena 		wqe->sg_list[4].lkey = sge.lkey;
3136acd884deSSumit Saxena 		wqe->sg_list[4].size = sge.size;
3137acd884deSSumit Saxena 		size -= wqe->sg_list[4].size;
3138acd884deSSumit Saxena 		if (size) {
3139acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3140acd884deSSumit Saxena 				"QP1 rq buffer is incorrect!\n");
3141acd884deSSumit Saxena 			rc = -ENOMEM;
3142acd884deSSumit Saxena 			goto done;
3143acd884deSSumit Saxena 		}
3144acd884deSSumit Saxena 		sge.size = (u32)size;
3145acd884deSSumit Saxena 		wqe->num_sge = 5;
3146acd884deSSumit Saxena 	} else {
3147acd884deSSumit Saxena 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3148acd884deSSumit Saxena 		rc = -ENOMEM;
3149acd884deSSumit Saxena 	}
3150acd884deSSumit Saxena done:
3151acd884deSSumit Saxena 	return rc;
3152acd884deSSumit Saxena }
3153acd884deSSumit Saxena 
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3154acd884deSSumit Saxena static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
3155acd884deSSumit Saxena 					    const struct ib_recv_wr *wr,
3156acd884deSSumit Saxena 					    struct bnxt_qplib_swqe *wqe)
3157acd884deSSumit Saxena {
3158acd884deSSumit Saxena 	struct bnxt_re_sqp_entries *sqp_entry;
3159acd884deSSumit Saxena 	struct bnxt_qplib_sge sge;
3160acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
3161acd884deSSumit Saxena 	u32 rq_prod_index;
3162acd884deSSumit Saxena 	int rc = 0;
3163acd884deSSumit Saxena 
3164acd884deSSumit Saxena 	rdev = qp->rdev;
3165acd884deSSumit Saxena 
3166acd884deSSumit Saxena 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
3167acd884deSSumit Saxena 
3168acd884deSSumit Saxena 	if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3169acd884deSSumit Saxena 		/* Create 1 SGE to receive the entire
3170acd884deSSumit Saxena 		 * ethernet packet
3171acd884deSSumit Saxena 		 */
3172acd884deSSumit Saxena 		/* SGE 1 */
3173acd884deSSumit Saxena 		wqe->sg_list[0].addr = sge.addr;
3174acd884deSSumit Saxena 		/* TODO check the lkey to be used */
3175acd884deSSumit Saxena 		wqe->sg_list[0].lkey = sge.lkey;
3176acd884deSSumit Saxena 		wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
3177acd884deSSumit Saxena 		if (sge.size < wqe->sg_list[0].size) {
3178acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3179acd884deSSumit Saxena 				"QP1 rq buffer is empty!\n");
3180acd884deSSumit Saxena 			rc = -ENOMEM;
3181acd884deSSumit Saxena 			goto done;
3182acd884deSSumit Saxena 		}
3183acd884deSSumit Saxena 
3184acd884deSSumit Saxena 		sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
3185acd884deSSumit Saxena 		sqp_entry->sge.addr = wr->sg_list[0].addr;
3186acd884deSSumit Saxena 		sqp_entry->sge.lkey = wr->sg_list[0].lkey;
3187acd884deSSumit Saxena 		sqp_entry->sge.size = wr->sg_list[0].length;
3188acd884deSSumit Saxena 		/* Store the wrid for reporting completion */
3189acd884deSSumit Saxena 		sqp_entry->wrid = wqe->wr_id;
3190acd884deSSumit Saxena 		/* change the wqe->wrid to table index */
3191acd884deSSumit Saxena 		wqe->wr_id = rq_prod_index;
3192acd884deSSumit Saxena 	}
3193acd884deSSumit Saxena done:
3194acd884deSSumit Saxena 	return rc;
3195acd884deSSumit Saxena }
3196acd884deSSumit Saxena 
is_ud_qp(struct bnxt_re_qp * qp)3197acd884deSSumit Saxena static bool is_ud_qp(struct bnxt_re_qp *qp)
3198acd884deSSumit Saxena {
3199acd884deSSumit Saxena 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
3200acd884deSSumit Saxena 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
3201acd884deSSumit Saxena }
3202acd884deSSumit Saxena 
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3203acd884deSSumit Saxena static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
3204acd884deSSumit Saxena 				  const struct ib_send_wr *wr,
3205acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe)
3206acd884deSSumit Saxena {
3207acd884deSSumit Saxena 	struct bnxt_re_ah *ah = NULL;
3208acd884deSSumit Saxena 
3209acd884deSSumit Saxena 	if(is_ud_qp(qp)) {
3210acd884deSSumit Saxena 		ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, ibah);
3211acd884deSSumit Saxena 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
3212acd884deSSumit Saxena 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
3213acd884deSSumit Saxena 		wqe->send.avid = ah->qplib_ah.id;
3214acd884deSSumit Saxena 	}
3215acd884deSSumit Saxena 	switch (wr->opcode) {
3216acd884deSSumit Saxena 	case IB_WR_SEND:
3217acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
3218acd884deSSumit Saxena 		break;
3219acd884deSSumit Saxena 	case IB_WR_SEND_WITH_IMM:
3220acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
3221acd884deSSumit Saxena 		wqe->send.imm_data = wr->ex.imm_data;
3222acd884deSSumit Saxena 		break;
3223acd884deSSumit Saxena 	case IB_WR_SEND_WITH_INV:
3224acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
3225acd884deSSumit Saxena 		wqe->send.inv_key = wr->ex.invalidate_rkey;
3226acd884deSSumit Saxena 		break;
3227acd884deSSumit Saxena 	default:
3228acd884deSSumit Saxena 		dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n",
3229acd884deSSumit Saxena 			__func__, wr->opcode);
3230acd884deSSumit Saxena 		return -EINVAL;
3231acd884deSSumit Saxena 	}
3232acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SIGNALED)
3233acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3234acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_FENCE)
3235acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3236acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SOLICITED)
3237acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3238acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_INLINE)
3239acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3240acd884deSSumit Saxena 
3241acd884deSSumit Saxena 	return 0;
3242acd884deSSumit Saxena }
3243acd884deSSumit Saxena 
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3244acd884deSSumit Saxena static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
3245acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe)
3246acd884deSSumit Saxena {
3247acd884deSSumit Saxena 	switch (wr->opcode) {
3248acd884deSSumit Saxena 	case IB_WR_RDMA_WRITE:
3249acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
3250acd884deSSumit Saxena 		break;
3251acd884deSSumit Saxena 	case IB_WR_RDMA_WRITE_WITH_IMM:
3252acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
3253acd884deSSumit Saxena 		wqe->rdma.imm_data = wr->ex.imm_data;
3254acd884deSSumit Saxena 		break;
3255acd884deSSumit Saxena 	case IB_WR_RDMA_READ:
3256acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
3257acd884deSSumit Saxena 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
3258acd884deSSumit Saxena 		break;
3259acd884deSSumit Saxena 	default:
3260acd884deSSumit Saxena 		return -EINVAL;
3261acd884deSSumit Saxena 	}
3262acd884deSSumit Saxena 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
3263acd884deSSumit Saxena 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
3264acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SIGNALED)
3265acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3266acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_FENCE)
3267acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3268acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SOLICITED)
3269acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3270acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_INLINE)
3271acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3272acd884deSSumit Saxena 
3273acd884deSSumit Saxena 	return 0;
3274acd884deSSumit Saxena }
3275acd884deSSumit Saxena 
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3276acd884deSSumit Saxena static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
3277acd884deSSumit Saxena 				    struct bnxt_qplib_swqe *wqe)
3278acd884deSSumit Saxena {
3279acd884deSSumit Saxena 	switch (wr->opcode) {
3280acd884deSSumit Saxena 	case IB_WR_ATOMIC_CMP_AND_SWP:
3281acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
3282acd884deSSumit Saxena 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3283acd884deSSumit Saxena 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
3284acd884deSSumit Saxena 		break;
3285acd884deSSumit Saxena 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3286acd884deSSumit Saxena 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
3287acd884deSSumit Saxena 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3288acd884deSSumit Saxena 		break;
3289acd884deSSumit Saxena 	default:
3290acd884deSSumit Saxena 		return -EINVAL;
3291acd884deSSumit Saxena 	}
3292acd884deSSumit Saxena 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
3293acd884deSSumit Saxena 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
3294acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SIGNALED)
3295acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3296acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_FENCE)
3297acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3298acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SOLICITED)
3299acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3300acd884deSSumit Saxena 	return 0;
3301acd884deSSumit Saxena }
3302acd884deSSumit Saxena 
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3303acd884deSSumit Saxena static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
3304acd884deSSumit Saxena 				 struct bnxt_qplib_swqe *wqe)
3305acd884deSSumit Saxena {
3306acd884deSSumit Saxena 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
3307acd884deSSumit Saxena 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
3308acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SIGNALED)
3309acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3310acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_FENCE)
3311acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3312acd884deSSumit Saxena 	if (wr->send_flags & IB_SEND_SOLICITED)
3313acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3314acd884deSSumit Saxena 
3315acd884deSSumit Saxena 	return 0;
3316acd884deSSumit Saxena }
3317acd884deSSumit Saxena 
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)3318acd884deSSumit Saxena static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
3319acd884deSSumit Saxena 				 struct bnxt_qplib_swqe *wqe)
3320acd884deSSumit Saxena {
3321acd884deSSumit Saxena 	struct bnxt_re_mr *mr = to_bnxt_re(wr->mr, struct bnxt_re_mr, ib_mr);
3322acd884deSSumit Saxena 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
3323acd884deSSumit Saxena 	int reg_len, i, access = wr->access;
3324acd884deSSumit Saxena 
3325acd884deSSumit Saxena 	if (mr->npages > qplib_frpl->max_pg_ptrs) {
3326acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(mr->rdev),
3327acd884deSSumit Saxena 			" %s: failed npages %d > %d\n", __func__,
3328acd884deSSumit Saxena 			mr->npages, qplib_frpl->max_pg_ptrs);
3329acd884deSSumit Saxena 		return -EINVAL;
3330acd884deSSumit Saxena 	}
3331acd884deSSumit Saxena 
3332acd884deSSumit Saxena 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
3333acd884deSSumit Saxena 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
3334acd884deSSumit Saxena 	wqe->frmr.levels = qplib_frpl->hwq.level;
3335acd884deSSumit Saxena 	wqe->frmr.page_list = mr->pages;
3336acd884deSSumit Saxena 	wqe->frmr.page_list_len = mr->npages;
3337acd884deSSumit Saxena 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
3338acd884deSSumit Saxena 
3339acd884deSSumit Saxena 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
3340acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3341acd884deSSumit Saxena 	if (access & IB_ACCESS_LOCAL_WRITE)
3342acd884deSSumit Saxena 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
3343acd884deSSumit Saxena 	if (access & IB_ACCESS_REMOTE_READ)
3344acd884deSSumit Saxena 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
3345acd884deSSumit Saxena 	if (access & IB_ACCESS_REMOTE_WRITE)
3346acd884deSSumit Saxena 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
3347acd884deSSumit Saxena 	if (access & IB_ACCESS_REMOTE_ATOMIC)
3348acd884deSSumit Saxena 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
3349acd884deSSumit Saxena 	if (access & IB_ACCESS_MW_BIND)
3350acd884deSSumit Saxena 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
3351acd884deSSumit Saxena 
3352acd884deSSumit Saxena 	/* TODO: OFED provides the rkey of the MR instead of the lkey */
3353acd884deSSumit Saxena 	wqe->frmr.l_key = wr->key;
3354acd884deSSumit Saxena 	wqe->frmr.length = wr->mr->length;
3355acd884deSSumit Saxena 	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
3356acd884deSSumit Saxena 	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
3357acd884deSSumit Saxena 	wqe->frmr.va = wr->mr->iova;
3358acd884deSSumit Saxena 	reg_len = wqe->frmr.page_list_len * wr->mr->page_size;
3359acd884deSSumit Saxena 
3360acd884deSSumit Saxena 	if (wqe->frmr.length > reg_len) {
3361acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(mr->rdev),
3362acd884deSSumit Saxena 				    "%s: bnxt_re_mr 0x%px  len (%d > %d)\n",
3363acd884deSSumit Saxena 				    __func__, (void *)mr, wqe->frmr.length,
3364acd884deSSumit Saxena 				    reg_len);
3365acd884deSSumit Saxena 
3366acd884deSSumit Saxena 		for (i = 0; i < mr->npages; i++)
3367acd884deSSumit Saxena 			dev_dbg(rdev_to_dev(mr->rdev),
3368acd884deSSumit Saxena 				"%s: build_reg_wqe page[%d] = 0x%llx\n",
3369acd884deSSumit Saxena 				__func__, i, mr->pages[i]);
3370acd884deSSumit Saxena 
3371acd884deSSumit Saxena 		return -EINVAL;
3372acd884deSSumit Saxena 	}
3373acd884deSSumit Saxena 
3374acd884deSSumit Saxena 	return 0;
3375acd884deSSumit Saxena }
3376acd884deSSumit Saxena 
bnxt_re_set_sg_list(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)3377acd884deSSumit Saxena static void bnxt_re_set_sg_list(const struct ib_send_wr *wr,
3378acd884deSSumit Saxena 				struct bnxt_qplib_swqe *wqe)
3379acd884deSSumit Saxena {
3380acd884deSSumit Saxena 	wqe->sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3381acd884deSSumit Saxena 	wqe->num_sge = wr->num_sge;
3382acd884deSSumit Saxena }
3383acd884deSSumit Saxena 
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)3384acd884deSSumit Saxena static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
3385acd884deSSumit Saxena {
3386acd884deSSumit Saxena 	if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI ||
3387acd884deSSumit Saxena 	    qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
3388acd884deSSumit Saxena 	    qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
3389acd884deSSumit Saxena 		int qp_attr_mask;
3390acd884deSSumit Saxena 		struct ib_qp_attr qp_attr;
3391acd884deSSumit Saxena 
3392acd884deSSumit Saxena 		qp_attr_mask = IB_QP_STATE;
3393acd884deSSumit Saxena 		qp_attr.qp_state = IB_QPS_RTS;
3394acd884deSSumit Saxena 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
3395acd884deSSumit Saxena 		qp->qplib_qp.wqe_cnt = 0;
3396acd884deSSumit Saxena 	}
3397acd884deSSumit Saxena }
3398acd884deSSumit Saxena 
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)3399acd884deSSumit Saxena static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
3400acd884deSSumit Saxena 				       struct bnxt_re_qp *qp,
3401acd884deSSumit Saxena 				       const struct ib_send_wr *wr)
3402acd884deSSumit Saxena {
3403acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe;
3404acd884deSSumit Saxena 	unsigned long flags;
3405acd884deSSumit Saxena 	int rc = 0;
3406acd884deSSumit Saxena 
3407acd884deSSumit Saxena 	spin_lock_irqsave(&qp->sq_lock, flags);
3408acd884deSSumit Saxena 	while (wr) {
3409acd884deSSumit Saxena 		/* House keeping */
3410acd884deSSumit Saxena 		memset(&wqe, 0, sizeof(wqe));
3411acd884deSSumit Saxena 		/* Common */
3412acd884deSSumit Saxena 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3413acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3414acd884deSSumit Saxena 				"Limit exceeded for Send SGEs\n");
3415acd884deSSumit Saxena 			rc = -EINVAL;
3416acd884deSSumit Saxena 			break;
3417acd884deSSumit Saxena 		}
3418acd884deSSumit Saxena 
3419acd884deSSumit Saxena 		bnxt_re_set_sg_list(wr, &wqe);
3420acd884deSSumit Saxena 		wqe.wr_id = wr->wr_id;
3421acd884deSSumit Saxena 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
3422acd884deSSumit Saxena 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3423acd884deSSumit Saxena 		if (rc)
3424acd884deSSumit Saxena 			break;
3425acd884deSSumit Saxena 
3426acd884deSSumit Saxena 		rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3427acd884deSSumit Saxena 		if (rc) {
3428acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3429acd884deSSumit Saxena 				"bad_wr seen with opcode = 0x%x rc = %d\n",
3430acd884deSSumit Saxena 				wr->opcode, rc);
3431acd884deSSumit Saxena 			break;
3432acd884deSSumit Saxena 		}
3433acd884deSSumit Saxena 		wr = wr->next;
3434acd884deSSumit Saxena 	}
3435acd884deSSumit Saxena 	bnxt_qplib_post_send_db(&qp->qplib_qp);
3436acd884deSSumit Saxena 	bnxt_ud_qp_hw_stall_workaround(qp);
3437acd884deSSumit Saxena 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3438acd884deSSumit Saxena 	return rc;
3439acd884deSSumit Saxena }
3440acd884deSSumit Saxena 
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)3441acd884deSSumit Saxena static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
3442acd884deSSumit Saxena {
3443acd884deSSumit Saxena 	/* Need unconditional fence for non-wire memory opcode
3444acd884deSSumit Saxena 	 * to work as expected.
3445acd884deSSumit Saxena 	 */
3446acd884deSSumit Saxena 	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
3447acd884deSSumit Saxena 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
3448acd884deSSumit Saxena 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
3449acd884deSSumit Saxena 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
3450acd884deSSumit Saxena 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3451acd884deSSumit Saxena }
3452acd884deSSumit Saxena 
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3453acd884deSSumit Saxena int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
3454acd884deSSumit Saxena 		      const struct ib_send_wr **bad_wr)
3455acd884deSSumit Saxena {
3456acd884deSSumit Saxena 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
3457acd884deSSumit Saxena 	struct bnxt_qplib_sge sge[6];
3458acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe;
3459acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
3460acd884deSSumit Saxena 	unsigned long flags;
3461acd884deSSumit Saxena 	int rc = 0;
3462acd884deSSumit Saxena 
3463acd884deSSumit Saxena 	rdev = qp->rdev;
3464acd884deSSumit Saxena 	spin_lock_irqsave(&qp->sq_lock, flags);
3465acd884deSSumit Saxena 	while (wr) {
3466acd884deSSumit Saxena 		/* House keeping */
3467acd884deSSumit Saxena 		memset(&wqe, 0, sizeof(wqe));
3468acd884deSSumit Saxena 		/* Common */
3469acd884deSSumit Saxena 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3470acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3471acd884deSSumit Saxena 				"Limit exceeded for Send SGEs\n");
3472acd884deSSumit Saxena 			rc = -EINVAL;
3473acd884deSSumit Saxena 			goto bad;
3474acd884deSSumit Saxena 		}
3475acd884deSSumit Saxena 
3476acd884deSSumit Saxena 		bnxt_re_set_sg_list(wr, &wqe);
3477acd884deSSumit Saxena 		wqe.wr_id = wr->wr_id;
3478acd884deSSumit Saxena 
3479acd884deSSumit Saxena 		switch (wr->opcode) {
3480acd884deSSumit Saxena 		case IB_WR_SEND:
3481acd884deSSumit Saxena 		case IB_WR_SEND_WITH_IMM:
3482acd884deSSumit Saxena 			if (ib_qp->qp_type == IB_QPT_GSI &&
3483acd884deSSumit Saxena 			    rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3484acd884deSSumit Saxena 				memset(sge, 0, sizeof(sge));
3485acd884deSSumit Saxena 				wqe.sg_list = sge;
3486acd884deSSumit Saxena 				rc = bnxt_re_build_gsi_send(qp, wr, &wqe);
3487acd884deSSumit Saxena 				if (rc)
3488acd884deSSumit Saxena 					goto bad;
3489acd884deSSumit Saxena 			} else if (ib_qp->qp_type == IB_QPT_RAW_ETHERTYPE) {
3490acd884deSSumit Saxena 				bnxt_re_build_raw_send(wr, &wqe);
3491acd884deSSumit Saxena 			}
3492acd884deSSumit Saxena 			switch (wr->send_flags) {
3493acd884deSSumit Saxena 			case IB_SEND_IP_CSUM:
3494acd884deSSumit Saxena 				wqe.rawqp1.lflags |=
3495acd884deSSumit Saxena 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
3496acd884deSSumit Saxena 				break;
3497acd884deSSumit Saxena 			default:
3498acd884deSSumit Saxena 				break;
3499acd884deSSumit Saxena 			}
3500acd884deSSumit Saxena 			fallthrough;
3501acd884deSSumit Saxena 		case IB_WR_SEND_WITH_INV:
3502acd884deSSumit Saxena 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3503acd884deSSumit Saxena 			break;
3504acd884deSSumit Saxena 		case IB_WR_RDMA_WRITE:
3505acd884deSSumit Saxena 		case IB_WR_RDMA_WRITE_WITH_IMM:
3506acd884deSSumit Saxena 		case IB_WR_RDMA_READ:
3507acd884deSSumit Saxena 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
3508acd884deSSumit Saxena 			break;
3509acd884deSSumit Saxena 		case IB_WR_ATOMIC_CMP_AND_SWP:
3510acd884deSSumit Saxena 		case IB_WR_ATOMIC_FETCH_AND_ADD:
3511acd884deSSumit Saxena 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
3512acd884deSSumit Saxena 			break;
3513acd884deSSumit Saxena 		case IB_WR_RDMA_READ_WITH_INV:
3514acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3515acd884deSSumit Saxena 				"RDMA Read with Invalidate is not supported\n");
3516acd884deSSumit Saxena 			rc = -EINVAL;
3517acd884deSSumit Saxena 			goto bad;
3518acd884deSSumit Saxena 		case IB_WR_LOCAL_INV:
3519acd884deSSumit Saxena 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
3520acd884deSSumit Saxena 			break;
3521acd884deSSumit Saxena 		case IB_WR_REG_MR:
3522acd884deSSumit Saxena 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
3523acd884deSSumit Saxena 			break;
3524acd884deSSumit Saxena 		default:
3525acd884deSSumit Saxena 			/* Unsupported WRs */
3526acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3527acd884deSSumit Saxena 				"WR (0x%x) is not supported\n", wr->opcode);
3528acd884deSSumit Saxena 			rc = -EINVAL;
3529acd884deSSumit Saxena 			goto bad;
3530acd884deSSumit Saxena 		}
3531acd884deSSumit Saxena 
3532acd884deSSumit Saxena 		if (likely(!rc)) {
3533acd884deSSumit Saxena 			if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3534acd884deSSumit Saxena 				bnxt_re_legacy_set_uc_fence(&wqe);
3535acd884deSSumit Saxena 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3536acd884deSSumit Saxena 		}
3537acd884deSSumit Saxena bad:
3538acd884deSSumit Saxena 		if (unlikely(rc)) {
3539acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3540acd884deSSumit Saxena 				"bad_wr seen with opcode = 0x%x\n", wr->opcode);
3541acd884deSSumit Saxena 			*bad_wr = wr;
3542acd884deSSumit Saxena 			break;
3543acd884deSSumit Saxena 		}
3544acd884deSSumit Saxena 		wr = wr->next;
3545acd884deSSumit Saxena 	}
3546acd884deSSumit Saxena 	bnxt_qplib_post_send_db(&qp->qplib_qp);
3547acd884deSSumit Saxena 	if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3548acd884deSSumit Saxena 		bnxt_ud_qp_hw_stall_workaround(qp);
3549acd884deSSumit Saxena 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3550acd884deSSumit Saxena 
3551acd884deSSumit Saxena 	return rc;
3552acd884deSSumit Saxena }
3553acd884deSSumit Saxena 
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,struct ib_recv_wr * wr)3554acd884deSSumit Saxena static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
3555acd884deSSumit Saxena 				struct bnxt_re_qp *qp,
3556acd884deSSumit Saxena 				struct ib_recv_wr *wr)
3557acd884deSSumit Saxena {
3558acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe;
3559acd884deSSumit Saxena 	int rc = 0;
3560acd884deSSumit Saxena 
3561acd884deSSumit Saxena 	/* rq lock can be pardoned here. */
3562acd884deSSumit Saxena 	while (wr) {
3563acd884deSSumit Saxena 		/* House keeping */
3564acd884deSSumit Saxena 		memset(&wqe, 0, sizeof(wqe));
3565acd884deSSumit Saxena 		/* Common */
3566acd884deSSumit Saxena 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3567acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3568acd884deSSumit Saxena 				"Limit exceeded for Receive SGEs\n");
3569acd884deSSumit Saxena 			rc = -EINVAL;
3570acd884deSSumit Saxena 			goto bad;
3571acd884deSSumit Saxena 		}
3572acd884deSSumit Saxena 
3573acd884deSSumit Saxena 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3574acd884deSSumit Saxena 		wqe.num_sge = wr->num_sge;
3575acd884deSSumit Saxena 		wqe.wr_id = wr->wr_id;
3576acd884deSSumit Saxena 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3577acd884deSSumit Saxena 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3578acd884deSSumit Saxena bad:
3579acd884deSSumit Saxena 		if (rc) {
3580acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3581acd884deSSumit Saxena 				"bad_wr seen with RQ post\n");
3582acd884deSSumit Saxena 			break;
3583acd884deSSumit Saxena 		}
3584acd884deSSumit Saxena 		wr = wr->next;
3585acd884deSSumit Saxena 	}
3586acd884deSSumit Saxena 	bnxt_qplib_post_recv_db(&qp->qplib_qp);
3587acd884deSSumit Saxena 	return rc;
3588acd884deSSumit Saxena }
3589acd884deSSumit Saxena 
bnxt_re_build_gsi_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe)3590acd884deSSumit Saxena static int bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp,
3591acd884deSSumit Saxena 				  const struct ib_recv_wr *wr,
3592acd884deSSumit Saxena 				  struct bnxt_qplib_swqe *wqe)
3593acd884deSSumit Saxena {
3594acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = qp->rdev;
3595acd884deSSumit Saxena 	int rc = 0;
3596acd884deSSumit Saxena 
3597acd884deSSumit Saxena 	if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
3598acd884deSSumit Saxena 		rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe);
3599acd884deSSumit Saxena 	else
3600acd884deSSumit Saxena 		rc = bnxt_re_build_qp1_recv(qp, wr, wqe);
3601acd884deSSumit Saxena 
3602acd884deSSumit Saxena 	return rc;
3603acd884deSSumit Saxena }
3604acd884deSSumit Saxena 
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3605acd884deSSumit Saxena int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
3606acd884deSSumit Saxena 		      const struct ib_recv_wr **bad_wr)
3607acd884deSSumit Saxena {
3608acd884deSSumit Saxena 	struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
3609acd884deSSumit Saxena 	struct bnxt_qplib_sge sge[6];
3610acd884deSSumit Saxena 	struct bnxt_qplib_swqe wqe;
3611acd884deSSumit Saxena 	unsigned long flags;
3612acd884deSSumit Saxena 	u32 count = 0;
3613acd884deSSumit Saxena 	int rc = 0;
3614acd884deSSumit Saxena 
3615acd884deSSumit Saxena 	spin_lock_irqsave(&qp->rq_lock, flags);
3616acd884deSSumit Saxena 	while (wr) {
3617acd884deSSumit Saxena 		memset(&wqe, 0, sizeof(wqe));
3618acd884deSSumit Saxena 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3619acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3620acd884deSSumit Saxena 				"Limit exceeded for Receive SGEs\n");
3621acd884deSSumit Saxena 			rc = -EINVAL;
3622acd884deSSumit Saxena 			goto bad;
3623acd884deSSumit Saxena 		}
3624acd884deSSumit Saxena 		wqe.num_sge = wr->num_sge;
3625acd884deSSumit Saxena 		wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3626acd884deSSumit Saxena 		wqe.wr_id = wr->wr_id;
3627acd884deSSumit Saxena 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3628acd884deSSumit Saxena 
3629acd884deSSumit Saxena 		if (ib_qp->qp_type == IB_QPT_GSI &&
3630acd884deSSumit Saxena 		    qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3631acd884deSSumit Saxena 			memset(sge, 0, sizeof(sge));
3632acd884deSSumit Saxena 			wqe.sg_list = sge;
3633acd884deSSumit Saxena 			rc = bnxt_re_build_gsi_recv(qp, wr, &wqe);
3634acd884deSSumit Saxena 			if (rc)
3635acd884deSSumit Saxena 				goto bad;
3636acd884deSSumit Saxena 		}
3637acd884deSSumit Saxena 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3638acd884deSSumit Saxena bad:
3639acd884deSSumit Saxena 		if (rc) {
3640acd884deSSumit Saxena 			dev_err(rdev_to_dev(qp->rdev),
3641acd884deSSumit Saxena 				"bad_wr seen with RQ post\n");
3642acd884deSSumit Saxena 			*bad_wr = wr;
3643acd884deSSumit Saxena 			break;
3644acd884deSSumit Saxena 		}
3645acd884deSSumit Saxena 		/* Ring DB if the RQEs posted reaches a threshold value */
3646acd884deSSumit Saxena 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
3647acd884deSSumit Saxena 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
3648acd884deSSumit Saxena 			count = 0;
3649acd884deSSumit Saxena 		}
3650acd884deSSumit Saxena 		wr = wr->next;
3651acd884deSSumit Saxena 	}
3652acd884deSSumit Saxena 
3653acd884deSSumit Saxena 	if (count)
3654acd884deSSumit Saxena 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3655acd884deSSumit Saxena 	spin_unlock_irqrestore(&qp->rq_lock, flags);
3656acd884deSSumit Saxena 
3657acd884deSSumit Saxena 	return rc;
3658acd884deSSumit Saxena }
3659acd884deSSumit Saxena 
3660acd884deSSumit Saxena /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)3661acd884deSSumit Saxena void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
3662acd884deSSumit Saxena {
3663acd884deSSumit Saxena 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
3664acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = cq->rdev;
3665acd884deSSumit Saxena 	int rc =  0;
3666acd884deSSumit Saxena 
3667acd884deSSumit Saxena 	if (cq->uctx_cq_page) {
3668acd884deSSumit Saxena 		BNXT_RE_CQ_PAGE_LIST_DEL(cq->uctx, cq);
3669acd884deSSumit Saxena 		free_page((u64)cq->uctx_cq_page);
3670acd884deSSumit Saxena 		cq->uctx_cq_page = NULL;
3671acd884deSSumit Saxena 	}
3672acd884deSSumit Saxena 	if (cq->is_dbr_soft_cq && cq->uctx) {
3673acd884deSSumit Saxena 		void *dbr_page;
3674acd884deSSumit Saxena 
3675acd884deSSumit Saxena 		if (cq->uctx->dbr_recov_cq) {
3676acd884deSSumit Saxena 			dbr_page = cq->uctx->dbr_recov_cq_page;
3677acd884deSSumit Saxena 			cq->uctx->dbr_recov_cq_page = NULL;
3678acd884deSSumit Saxena 			cq->uctx->dbr_recov_cq = NULL;
3679acd884deSSumit Saxena 			free_page((unsigned long)dbr_page);
3680acd884deSSumit Saxena 		}
3681acd884deSSumit Saxena 		goto end;
3682acd884deSSumit Saxena 	}
3683acd884deSSumit Saxena 	/* CQ getting destroyed. Set this state for cqn handler */
3684acd884deSSumit Saxena 	spin_lock_bh(&cq->qplib_cq.compl_lock);
3685acd884deSSumit Saxena 	cq->qplib_cq.destroyed = true;
3686acd884deSSumit Saxena 	spin_unlock_bh(&cq->qplib_cq.compl_lock);
3687acd884deSSumit Saxena 	if (ib_cq->poll_ctx == IB_POLL_WORKQUEUE ||
3688acd884deSSumit Saxena 	    ib_cq->poll_ctx == IB_POLL_UNBOUND_WORKQUEUE)
3689acd884deSSumit Saxena 		cancel_work_sync(&ib_cq->work);
3690acd884deSSumit Saxena 
3691acd884deSSumit Saxena 	rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3692acd884deSSumit Saxena 	if (rc)
3693acd884deSSumit Saxena 		dev_err_ratelimited(rdev_to_dev(rdev),
3694acd884deSSumit Saxena 				   "%s id = %d failed rc = %d\n",
3695acd884deSSumit Saxena 				   __func__, cq->qplib_cq.id, rc);
3696acd884deSSumit Saxena 
3697acd884deSSumit Saxena 	bnxt_re_put_nq(rdev, cq->qplib_cq.nq);
3698acd884deSSumit Saxena 	if (cq->umem && !IS_ERR(cq->umem))
3699acd884deSSumit Saxena 		ib_umem_release(cq->umem);
3700acd884deSSumit Saxena 
3701acd884deSSumit Saxena 	kfree(cq->cql);
3702acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.cq_count);
3703acd884deSSumit Saxena end:
3704acd884deSSumit Saxena 	return;
3705acd884deSSumit Saxena }
3706acd884deSSumit Saxena 
3707acd884deSSumit Saxena static inline struct
__get_cq_from_cq_in(struct ib_cq * cq_in,struct bnxt_re_dev * rdev)3708acd884deSSumit Saxena bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
3709acd884deSSumit Saxena 				struct bnxt_re_dev *rdev)
3710acd884deSSumit Saxena {
3711acd884deSSumit Saxena 	struct bnxt_re_cq *cq;
3712acd884deSSumit Saxena 	cq = container_of(cq_in, struct bnxt_re_cq, ibcq);
3713acd884deSSumit Saxena 	return cq;
3714acd884deSSumit Saxena }
3715acd884deSSumit Saxena 
bnxt_re_create_cq(struct ib_cq * cq_in,const struct ib_cq_init_attr * attr,struct ib_udata * udata)3716acd884deSSumit Saxena int bnxt_re_create_cq(struct ib_cq *cq_in,
3717acd884deSSumit Saxena 		      const struct ib_cq_init_attr *attr,
3718acd884deSSumit Saxena 		      struct ib_udata *udata)
3719acd884deSSumit Saxena {
3720acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
3721acd884deSSumit Saxena 	struct bnxt_re_ucontext *uctx = NULL;
3722acd884deSSumit Saxena 	struct ib_ucontext *context = NULL;
3723acd884deSSumit Saxena 	struct bnxt_qplib_cq *qplcq;
3724acd884deSSumit Saxena 	struct bnxt_re_cq_req ureq;
3725acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
3726acd884deSSumit Saxena 	int rc, entries;
3727acd884deSSumit Saxena 	struct bnxt_re_cq *cq;
3728acd884deSSumit Saxena 	u32 max_active_cqs;
3729acd884deSSumit Saxena 	int cqe = attr->cqe;
3730acd884deSSumit Saxena 
3731acd884deSSumit Saxena 	if (attr->flags)
3732acd884deSSumit Saxena 		return -EOPNOTSUPP;
3733acd884deSSumit Saxena 
3734acd884deSSumit Saxena 	rdev = rdev_from_cq_in(cq_in);
3735acd884deSSumit Saxena 	if (rdev->mod_exit) {
3736acd884deSSumit Saxena 		rc = -EIO;
3737acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
3738acd884deSSumit Saxena 		goto exit;
3739acd884deSSumit Saxena 	}
3740acd884deSSumit Saxena 	if (udata) {
3741acd884deSSumit Saxena 		uctx = rdma_udata_to_drv_context(udata,
3742acd884deSSumit Saxena 						 struct bnxt_re_ucontext,
3743acd884deSSumit Saxena 						 ibucontext);
3744acd884deSSumit Saxena 		context = &uctx->ibucontext;
3745acd884deSSumit Saxena 	}
3746acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
3747acd884deSSumit Saxena 
3748acd884deSSumit Saxena 	if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) {
3749acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n");
3750acd884deSSumit Saxena 		rc = -EINVAL;
3751acd884deSSumit Saxena 		goto exit;
3752acd884deSSumit Saxena 	}
3753acd884deSSumit Saxena 	/* Validate CQ fields */
3754acd884deSSumit Saxena 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3755acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n");
3756acd884deSSumit Saxena 		rc = -EINVAL;
3757acd884deSSumit Saxena 		goto exit;
3758acd884deSSumit Saxena 	}
3759acd884deSSumit Saxena 
3760acd884deSSumit Saxena 	cq = __get_cq_from_cq_in(cq_in, rdev);
3761acd884deSSumit Saxena 	if (!cq) {
3762acd884deSSumit Saxena 		rc = -ENOMEM;
3763acd884deSSumit Saxena 		goto exit;
3764acd884deSSumit Saxena 	}
3765acd884deSSumit Saxena 	cq->rdev = rdev;
3766acd884deSSumit Saxena 	cq->uctx = uctx;
3767acd884deSSumit Saxena 	qplcq = &cq->qplib_cq;
3768acd884deSSumit Saxena 	qplcq->cq_handle = (u64)qplcq;
3769acd884deSSumit Saxena 	/*
3770acd884deSSumit Saxena 	 * Since CQ is for QP1 is shared with Shadow CQ, the size
3771acd884deSSumit Saxena 	 * should be double the size. There is no way to identify
3772acd884deSSumit Saxena 	 * whether this CQ is for GSI QP. So assuming that the first
3773acd884deSSumit Saxena 	 * CQ created is for QP1
3774acd884deSSumit Saxena 	 */
3775acd884deSSumit Saxena 	if (!udata && !rdev->gsi_ctx.first_cq_created &&
3776acd884deSSumit Saxena 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) {
3777acd884deSSumit Saxena 		rdev->gsi_ctx.first_cq_created = true;
3778acd884deSSumit Saxena 		/*
3779acd884deSSumit Saxena 		 * Total CQE required for the CQ = CQE for QP1 RQ +
3780acd884deSSumit Saxena 		 * CQE for Shadow QP SQEs + CQE for Shadow QP RQEs.
3781acd884deSSumit Saxena 		 * Max entries of shadow QP SQ and RQ = QP1 RQEs = cqe
3782acd884deSSumit Saxena 		 */
3783acd884deSSumit Saxena 		cqe *= 3;
3784acd884deSSumit Saxena 	}
3785acd884deSSumit Saxena 
3786acd884deSSumit Saxena 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3787acd884deSSumit Saxena 	if (entries > dev_attr->max_cq_wqes + 1)
3788acd884deSSumit Saxena 		entries = dev_attr->max_cq_wqes + 1;
3789acd884deSSumit Saxena 
3790acd884deSSumit Saxena 	qplcq->sginfo.pgshft = PAGE_SHIFT;
3791acd884deSSumit Saxena 	qplcq->sginfo.pgsize = PAGE_SIZE;
3792acd884deSSumit Saxena 	if (udata) {
3793acd884deSSumit Saxena 		if (udata->inlen < sizeof(ureq))
3794acd884deSSumit Saxena 			dev_warn(rdev_to_dev(rdev),
3795acd884deSSumit Saxena 				 "Update the library ulen %d klen %d\n",
3796acd884deSSumit Saxena 				 (unsigned int)udata->inlen,
3797acd884deSSumit Saxena 				 (unsigned int)sizeof(ureq));
3798acd884deSSumit Saxena 
3799acd884deSSumit Saxena 		rc = ib_copy_from_udata(&ureq, udata,
3800acd884deSSumit Saxena 					min(udata->inlen, sizeof(ureq)));
3801acd884deSSumit Saxena 		if (rc)
3802acd884deSSumit Saxena 			goto fail;
3803acd884deSSumit Saxena 
3804acd884deSSumit Saxena 		if (BNXT_RE_IS_DBR_PACING_NOTIFY_CQ(ureq)) {
3805acd884deSSumit Saxena 			cq->is_dbr_soft_cq = true;
3806acd884deSSumit Saxena 			goto success;
3807acd884deSSumit Saxena 		}
3808acd884deSSumit Saxena 
3809acd884deSSumit Saxena 		if (BNXT_RE_IS_DBR_RECOV_CQ(ureq)) {
3810acd884deSSumit Saxena 			void *dbr_page;
3811acd884deSSumit Saxena 			u32 *epoch;
3812acd884deSSumit Saxena 
3813acd884deSSumit Saxena 			dbr_page = (void *)__get_free_page(GFP_KERNEL);
3814acd884deSSumit Saxena 			if (!dbr_page) {
3815acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
3816acd884deSSumit Saxena 					"DBR recov CQ page allocation failed!");
3817acd884deSSumit Saxena 				rc = -ENOMEM;
3818acd884deSSumit Saxena 				goto fail;
3819acd884deSSumit Saxena 			}
3820acd884deSSumit Saxena 
3821acd884deSSumit Saxena 			/* memset the epoch and epoch_ack to 0 */
3822acd884deSSumit Saxena 			epoch = dbr_page;
3823acd884deSSumit Saxena 			epoch[0] = 0x0;
3824acd884deSSumit Saxena 			epoch[1] = 0x0;
3825acd884deSSumit Saxena 
3826acd884deSSumit Saxena 			uctx->dbr_recov_cq = cq;
3827acd884deSSumit Saxena 			uctx->dbr_recov_cq_page = dbr_page;
3828acd884deSSumit Saxena 
3829acd884deSSumit Saxena 			cq->is_dbr_soft_cq = true;
3830acd884deSSumit Saxena 			goto success;
3831acd884deSSumit Saxena 		}
3832acd884deSSumit Saxena 
3833acd884deSSumit Saxena 		cq->umem = ib_umem_get_compat
3834acd884deSSumit Saxena 				      (rdev, context, udata, ureq.cq_va,
3835acd884deSSumit Saxena 				       entries * sizeof(struct cq_base),
3836acd884deSSumit Saxena 				       IB_ACCESS_LOCAL_WRITE, 1);
3837acd884deSSumit Saxena 		if (IS_ERR(cq->umem)) {
3838acd884deSSumit Saxena 			rc = PTR_ERR(cq->umem);
3839acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3840acd884deSSumit Saxena 				"%s: ib_umem_get failed! rc = %d\n",
3841acd884deSSumit Saxena 				__func__, rc);
3842acd884deSSumit Saxena 			goto fail;
3843acd884deSSumit Saxena 		}
3844acd884deSSumit Saxena 		qplcq->sginfo.sghead = get_ib_umem_sgl(cq->umem,
3845acd884deSSumit Saxena 						       &qplcq->sginfo.nmap);
3846acd884deSSumit Saxena 		qplcq->sginfo.npages = ib_umem_num_pages_compat(cq->umem);
3847acd884deSSumit Saxena 		if (!uctx->dpi.dbr) {
3848acd884deSSumit Saxena 			rc = bnxt_re_get_user_dpi(rdev, uctx);
3849acd884deSSumit Saxena 			if (rc)
3850acd884deSSumit Saxena 				goto c2fail;
3851acd884deSSumit Saxena 		}
3852acd884deSSumit Saxena 		qplcq->dpi = &uctx->dpi;
3853acd884deSSumit Saxena 	} else {
3854acd884deSSumit Saxena 		cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL : entries;
3855acd884deSSumit Saxena 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3856acd884deSSumit Saxena 				  GFP_KERNEL);
3857acd884deSSumit Saxena 		if (!cq->cql) {
3858acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
3859acd884deSSumit Saxena 				"Allocate CQL for %d failed!\n", cq->max_cql);
3860acd884deSSumit Saxena 			rc = -ENOMEM;
3861acd884deSSumit Saxena 			goto fail;
3862acd884deSSumit Saxena 		}
3863acd884deSSumit Saxena 		qplcq->dpi = &rdev->dpi_privileged;
3864acd884deSSumit Saxena 	}
3865acd884deSSumit Saxena 	/*
3866acd884deSSumit Saxena 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3867acd884deSSumit Saxena 	 * used for getting the NQ index.
3868acd884deSSumit Saxena 	 */
3869acd884deSSumit Saxena 	qplcq->max_wqe = entries;
3870acd884deSSumit Saxena 	qplcq->nq = bnxt_re_get_nq(rdev);
3871acd884deSSumit Saxena 	qplcq->cnq_hw_ring_id = qplcq->nq->ring_id;
3872acd884deSSumit Saxena 
3873acd884deSSumit Saxena 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq);
3874acd884deSSumit Saxena 	if (rc) {
3875acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Create HW CQ failed!\n");
3876acd884deSSumit Saxena 		goto fail;
3877acd884deSSumit Saxena 	}
3878acd884deSSumit Saxena 
3879acd884deSSumit Saxena 	INIT_LIST_HEAD(&cq->cq_list);
3880acd884deSSumit Saxena 	cq->ibcq.cqe = entries;
3881acd884deSSumit Saxena 	cq->cq_period = qplcq->period;
3882acd884deSSumit Saxena 
3883acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.cq_count);
3884acd884deSSumit Saxena 	max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count);
3885acd884deSSumit Saxena 	if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count))
3886acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs);
3887acd884deSSumit Saxena 	spin_lock_init(&cq->cq_lock);
3888acd884deSSumit Saxena 
3889acd884deSSumit Saxena 	if (udata) {
3890acd884deSSumit Saxena 		struct bnxt_re_cq_resp resp;
3891acd884deSSumit Saxena 
3892acd884deSSumit Saxena 		resp.cqid = qplcq->id;
3893acd884deSSumit Saxena 		resp.tail = qplcq->hwq.cons;
3894acd884deSSumit Saxena 		resp.phase = qplcq->period;
3895acd884deSSumit Saxena 		resp.comp_mask = 0;
3896acd884deSSumit Saxena 		resp.dbr = (u64)uctx->dpi.umdbr;
3897acd884deSSumit Saxena 		resp.dpi = uctx->dpi.dpi;
3898acd884deSSumit Saxena 		resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO;
3899acd884deSSumit Saxena 		/* Copy only on a valid wcpdi */
3900acd884deSSumit Saxena 		if (uctx->wcdpi.dpi) {
3901acd884deSSumit Saxena 			resp.wcdpi = uctx->wcdpi.dpi;
3902acd884deSSumit Saxena 			resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI;
3903acd884deSSumit Saxena 		}
3904acd884deSSumit Saxena 
3905acd884deSSumit Saxena 		if (_is_chip_p7(rdev->chip_ctx)) {
3906acd884deSSumit Saxena 			cq->uctx_cq_page = (void *)__get_free_page(GFP_KERNEL);
3907acd884deSSumit Saxena 
3908acd884deSSumit Saxena 			if (!cq->uctx_cq_page) {
3909acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
3910acd884deSSumit Saxena 					"CQ page allocation failed!\n");
3911acd884deSSumit Saxena 				bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3912acd884deSSumit Saxena 				rc = -ENOMEM;
3913acd884deSSumit Saxena 				goto c2fail;
3914acd884deSSumit Saxena 			}
3915acd884deSSumit Saxena 
3916acd884deSSumit Saxena 			resp.uctx_cq_page = (u64)cq->uctx_cq_page;
3917acd884deSSumit Saxena 			resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE;
3918acd884deSSumit Saxena 		}
3919acd884deSSumit Saxena 
3920acd884deSSumit Saxena 		rc = bnxt_re_copy_to_udata(rdev, &resp,
3921acd884deSSumit Saxena 					   min(udata->outlen, sizeof(resp)),
3922acd884deSSumit Saxena 					   udata);
3923acd884deSSumit Saxena 		if (rc) {
3924acd884deSSumit Saxena 			free_page((u64)cq->uctx_cq_page);
3925acd884deSSumit Saxena 			cq->uctx_cq_page = NULL;
3926acd884deSSumit Saxena 			bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3927acd884deSSumit Saxena 			goto c2fail;
3928acd884deSSumit Saxena 		}
3929acd884deSSumit Saxena 
3930acd884deSSumit Saxena 		if (cq->uctx_cq_page)
3931acd884deSSumit Saxena 			BNXT_RE_CQ_PAGE_LIST_ADD(uctx, cq);
3932acd884deSSumit Saxena 	}
3933acd884deSSumit Saxena 
3934acd884deSSumit Saxena success:
3935acd884deSSumit Saxena 	return 0;
3936acd884deSSumit Saxena c2fail:
3937acd884deSSumit Saxena 	if (udata && cq->umem && !IS_ERR(cq->umem))
3938acd884deSSumit Saxena 		ib_umem_release(cq->umem);
3939acd884deSSumit Saxena fail:
3940acd884deSSumit Saxena 	if (cq) {
3941acd884deSSumit Saxena 		if (cq->cql)
3942acd884deSSumit Saxena 			kfree(cq->cql);
3943acd884deSSumit Saxena 	}
3944acd884deSSumit Saxena exit:
3945acd884deSSumit Saxena 	return rc;
3946acd884deSSumit Saxena }
3947acd884deSSumit Saxena 
bnxt_re_modify_cq(struct ib_cq * ib_cq,u16 cq_count,u16 cq_period)3948acd884deSSumit Saxena int bnxt_re_modify_cq(struct ib_cq *ib_cq, u16 cq_count, u16 cq_period)
3949acd884deSSumit Saxena {
3950acd884deSSumit Saxena 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
3951acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = cq->rdev;
3952acd884deSSumit Saxena 	int rc;
3953acd884deSSumit Saxena 
3954acd884deSSumit Saxena 	if ((cq->cq_count != cq_count) || (cq->cq_period != cq_period)) {
3955acd884deSSumit Saxena 		cq->qplib_cq.count = cq_count;
3956acd884deSSumit Saxena 		cq->qplib_cq.period = cq_period;
3957acd884deSSumit Saxena 		rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq);
3958acd884deSSumit Saxena 		if (rc) {
3959acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n",
3960acd884deSSumit Saxena 				cq->qplib_cq.id);
3961acd884deSSumit Saxena 			return rc;
3962acd884deSSumit Saxena 		}
3963acd884deSSumit Saxena 		/* On success, update the shadow */
3964acd884deSSumit Saxena 		cq->cq_count = cq_count;
3965acd884deSSumit Saxena 		cq->cq_period = cq_period;
3966acd884deSSumit Saxena 	}
3967acd884deSSumit Saxena 	return 0;
3968acd884deSSumit Saxena }
3969acd884deSSumit Saxena 
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3970acd884deSSumit Saxena static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3971acd884deSSumit Saxena {
3972acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = cq->rdev;
3973acd884deSSumit Saxena 
3974acd884deSSumit Saxena 	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3975acd884deSSumit Saxena 
3976acd884deSSumit Saxena 	cq->qplib_cq.max_wqe = cq->resize_cqe;
3977acd884deSSumit Saxena 	if (cq->resize_umem) {
3978acd884deSSumit Saxena 		ib_umem_release(cq->umem);
3979acd884deSSumit Saxena 		cq->umem = cq->resize_umem;
3980acd884deSSumit Saxena 		cq->resize_umem = NULL;
3981acd884deSSumit Saxena 		cq->resize_cqe = 0;
3982acd884deSSumit Saxena 	}
3983acd884deSSumit Saxena }
3984acd884deSSumit Saxena 
bnxt_re_resize_cq(struct ib_cq * ib_cq,int cqe,struct ib_udata * udata)3985acd884deSSumit Saxena int bnxt_re_resize_cq(struct ib_cq *ib_cq, int cqe, struct ib_udata *udata)
3986acd884deSSumit Saxena {
3987acd884deSSumit Saxena 	struct bnxt_qplib_sg_info sginfo = {};
3988acd884deSSumit Saxena 	struct bnxt_qplib_dpi *orig_dpi = NULL;
3989acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr;
3990acd884deSSumit Saxena 	struct bnxt_re_ucontext *uctx = NULL;
3991acd884deSSumit Saxena 	struct bnxt_re_resize_cq_req ureq;
3992acd884deSSumit Saxena 	struct ib_ucontext *context = NULL;
3993acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
3994acd884deSSumit Saxena 	struct bnxt_re_cq *cq;
3995acd884deSSumit Saxena 	int rc, entries;
3996acd884deSSumit Saxena 
3997acd884deSSumit Saxena 	/* Don't allow more than one resize request at the same time.
3998acd884deSSumit Saxena 	 * TODO: need a mutex here when we support kernel consumers of resize.
3999acd884deSSumit Saxena 	 */
4000acd884deSSumit Saxena 	cq =  to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4001acd884deSSumit Saxena 	rdev = cq->rdev;
4002acd884deSSumit Saxena 	dev_attr = rdev->dev_attr;
4003acd884deSSumit Saxena 	if (ib_cq->uobject) {
4004acd884deSSumit Saxena 		uctx = rdma_udata_to_drv_context(udata,
4005acd884deSSumit Saxena 						 struct bnxt_re_ucontext,
4006acd884deSSumit Saxena 						 ibucontext);
4007acd884deSSumit Saxena 		context = &uctx->ibucontext;
4008acd884deSSumit Saxena 	}
4009acd884deSSumit Saxena 
4010acd884deSSumit Saxena 	if (cq->resize_umem) {
4011acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n",
4012acd884deSSumit Saxena 			cq->qplib_cq.id);
4013acd884deSSumit Saxena 		return -EBUSY;
4014acd884deSSumit Saxena 	}
4015acd884deSSumit Saxena 
4016acd884deSSumit Saxena 	/* Check the requested cq depth out of supported depth */
4017acd884deSSumit Saxena 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
4018acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n",
4019acd884deSSumit Saxena 			cq->qplib_cq.id);
4020acd884deSSumit Saxena 		return -EINVAL;
4021acd884deSSumit Saxena 	}
4022acd884deSSumit Saxena 
4023acd884deSSumit Saxena 	entries = bnxt_re_init_depth(cqe + 1, uctx);
4024acd884deSSumit Saxena 	entries = min_t(u32, (u32)entries, dev_attr->max_cq_wqes + 1);
4025acd884deSSumit Saxena 
4026acd884deSSumit Saxena 	/* Check to see if the new requested size can be handled by already
4027acd884deSSumit Saxena 	 * existing CQ
4028acd884deSSumit Saxena 	 */
4029acd884deSSumit Saxena 	if (entries == cq->ibcq.cqe) {
4030acd884deSSumit Saxena 		dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe);
4031acd884deSSumit Saxena 		return 0;
4032acd884deSSumit Saxena 	}
4033acd884deSSumit Saxena 
4034acd884deSSumit Saxena 	if (ib_cq->uobject && udata) {
4035acd884deSSumit Saxena 		if (udata->inlen < sizeof(ureq))
4036acd884deSSumit Saxena 			dev_warn(rdev_to_dev(rdev),
4037acd884deSSumit Saxena 				 "Update the library ulen %d klen %d\n",
4038acd884deSSumit Saxena 				 (unsigned int)udata->inlen,
4039acd884deSSumit Saxena 				 (unsigned int)sizeof(ureq));
4040acd884deSSumit Saxena 
4041acd884deSSumit Saxena 		rc = ib_copy_from_udata(&ureq, udata,
4042acd884deSSumit Saxena 					min(udata->inlen, sizeof(ureq)));
4043acd884deSSumit Saxena 		if (rc)
4044acd884deSSumit Saxena 			goto fail;
4045acd884deSSumit Saxena 
4046acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s: va %p\n", __func__,
4047acd884deSSumit Saxena 			(void *)ureq.cq_va);
4048acd884deSSumit Saxena 		cq->resize_umem = ib_umem_get_compat
4049acd884deSSumit Saxena 				       (rdev,
4050acd884deSSumit Saxena 					context, udata, ureq.cq_va,
4051acd884deSSumit Saxena 					entries * sizeof(struct cq_base),
4052acd884deSSumit Saxena 					IB_ACCESS_LOCAL_WRITE, 1);
4053acd884deSSumit Saxena 		if (IS_ERR(cq->resize_umem)) {
4054acd884deSSumit Saxena 			rc = PTR_ERR(cq->resize_umem);
4055acd884deSSumit Saxena 			cq->resize_umem = NULL;
4056acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
4057acd884deSSumit Saxena 				__func__, rc);
4058acd884deSSumit Saxena 			goto fail;
4059acd884deSSumit Saxena 		}
4060acd884deSSumit Saxena 		cq->resize_cqe = entries;
4061acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n",
4062acd884deSSumit Saxena 			__func__);
4063acd884deSSumit Saxena 		memcpy(&sginfo, &cq->qplib_cq.sginfo, sizeof(sginfo));
4064acd884deSSumit Saxena 		orig_dpi = cq->qplib_cq.dpi;
4065acd884deSSumit Saxena 
4066acd884deSSumit Saxena 		cq->qplib_cq.sginfo.sghead = get_ib_umem_sgl(cq->resize_umem,
4067acd884deSSumit Saxena 						&cq->qplib_cq.sginfo.nmap);
4068acd884deSSumit Saxena 		cq->qplib_cq.sginfo.npages =
4069acd884deSSumit Saxena 				ib_umem_num_pages_compat(cq->resize_umem);
4070acd884deSSumit Saxena 		cq->qplib_cq.sginfo.pgsize = PAGE_SIZE;
4071acd884deSSumit Saxena 		cq->qplib_cq.sginfo.pgshft = PAGE_SHIFT;
4072acd884deSSumit Saxena 		cq->qplib_cq.dpi = &uctx->dpi;
4073acd884deSSumit Saxena 	} else {
4074acd884deSSumit Saxena 		/* TODO: kernel consumer */
4075acd884deSSumit Saxena 	}
4076acd884deSSumit Saxena 
4077acd884deSSumit Saxena 	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
4078acd884deSSumit Saxena 	if (rc) {
4079acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n",
4080acd884deSSumit Saxena 			cq->qplib_cq.id);
4081acd884deSSumit Saxena 		goto fail;
4082acd884deSSumit Saxena 	}
4083acd884deSSumit Saxena 
4084acd884deSSumit Saxena 	cq->ibcq.cqe = cq->resize_cqe;
4085acd884deSSumit Saxena 	/* For kernel consumers complete resize here. For uverbs consumers,
4086acd884deSSumit Saxena 	 * we complete it in the context of ibv_poll_cq().
4087acd884deSSumit Saxena 	 */
4088acd884deSSumit Saxena 	if (!cq->resize_umem)
4089acd884deSSumit Saxena 		bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
4090acd884deSSumit Saxena 
4091acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.resize_count);
4092acd884deSSumit Saxena 	return 0;
4093acd884deSSumit Saxena 
4094acd884deSSumit Saxena fail:
4095acd884deSSumit Saxena 	if (cq->resize_umem) {
4096acd884deSSumit Saxena 		ib_umem_release(cq->resize_umem);
4097acd884deSSumit Saxena 		cq->resize_umem = NULL;
4098acd884deSSumit Saxena 		cq->resize_cqe = 0;
4099acd884deSSumit Saxena 		memcpy(&cq->qplib_cq.sginfo, &sginfo, sizeof(sginfo));
4100acd884deSSumit Saxena 		cq->qplib_cq.dpi = orig_dpi;
4101acd884deSSumit Saxena 	}
4102acd884deSSumit Saxena 	return rc;
4103acd884deSSumit Saxena }
4104acd884deSSumit Saxena 
__req_to_ib_wc_status(u8 qstatus)4105acd884deSSumit Saxena static enum ib_wc_status __req_to_ib_wc_status(u8 qstatus)
4106acd884deSSumit Saxena {
4107acd884deSSumit Saxena 	switch(qstatus) {
4108acd884deSSumit Saxena 	case CQ_REQ_STATUS_OK:
4109acd884deSSumit Saxena 		return IB_WC_SUCCESS;
4110acd884deSSumit Saxena 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
4111acd884deSSumit Saxena 		return IB_WC_BAD_RESP_ERR;
4112acd884deSSumit Saxena 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
4113acd884deSSumit Saxena 		return IB_WC_LOC_LEN_ERR;
4114acd884deSSumit Saxena 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
4115acd884deSSumit Saxena 		return IB_WC_LOC_QP_OP_ERR;
4116acd884deSSumit Saxena 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
4117acd884deSSumit Saxena 		return IB_WC_LOC_PROT_ERR;
4118acd884deSSumit Saxena 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
4119acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4120acd884deSSumit Saxena 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
4121acd884deSSumit Saxena 		return IB_WC_REM_INV_REQ_ERR;
4122acd884deSSumit Saxena 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
4123acd884deSSumit Saxena 		return IB_WC_REM_ACCESS_ERR;
4124acd884deSSumit Saxena 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
4125acd884deSSumit Saxena 		return IB_WC_REM_OP_ERR;
4126acd884deSSumit Saxena 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
4127acd884deSSumit Saxena 		return IB_WC_RNR_RETRY_EXC_ERR;
4128acd884deSSumit Saxena 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
4129acd884deSSumit Saxena 		return IB_WC_RETRY_EXC_ERR;
4130acd884deSSumit Saxena 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
4131acd884deSSumit Saxena 		return IB_WC_WR_FLUSH_ERR;
4132acd884deSSumit Saxena 	default:
4133acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4134acd884deSSumit Saxena 	}
4135acd884deSSumit Saxena 	return 0;
4136acd884deSSumit Saxena }
4137acd884deSSumit Saxena 
__rawqp1_to_ib_wc_status(u8 qstatus)4138acd884deSSumit Saxena static enum ib_wc_status __rawqp1_to_ib_wc_status(u8 qstatus)
4139acd884deSSumit Saxena {
4140acd884deSSumit Saxena 	switch(qstatus) {
4141acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_OK:
4142acd884deSSumit Saxena 		return IB_WC_SUCCESS;
4143acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
4144acd884deSSumit Saxena 		return IB_WC_LOC_ACCESS_ERR;
4145acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
4146acd884deSSumit Saxena 		return IB_WC_LOC_LEN_ERR;
4147acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
4148acd884deSSumit Saxena 		return IB_WC_LOC_PROT_ERR;
4149acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
4150acd884deSSumit Saxena 		return IB_WC_LOC_QP_OP_ERR;
4151acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
4152acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4153acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
4154acd884deSSumit Saxena 		return IB_WC_WR_FLUSH_ERR;
4155acd884deSSumit Saxena 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
4156acd884deSSumit Saxena 		return IB_WC_WR_FLUSH_ERR;
4157acd884deSSumit Saxena 	default:
4158acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4159acd884deSSumit Saxena 	}
4160acd884deSSumit Saxena }
4161acd884deSSumit Saxena 
__rc_to_ib_wc_status(u8 qstatus)4162acd884deSSumit Saxena static enum ib_wc_status __rc_to_ib_wc_status(u8 qstatus)
4163acd884deSSumit Saxena {
4164acd884deSSumit Saxena 	switch(qstatus) {
4165acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_OK:
4166acd884deSSumit Saxena 		return IB_WC_SUCCESS;
4167acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
4168acd884deSSumit Saxena 		return IB_WC_LOC_ACCESS_ERR;
4169acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
4170acd884deSSumit Saxena 		return IB_WC_LOC_LEN_ERR;
4171acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
4172acd884deSSumit Saxena 		return IB_WC_LOC_PROT_ERR;
4173acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
4174acd884deSSumit Saxena 		return IB_WC_LOC_QP_OP_ERR;
4175acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
4176acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4177acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
4178acd884deSSumit Saxena 		return IB_WC_REM_INV_REQ_ERR;
4179acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
4180acd884deSSumit Saxena 		return IB_WC_WR_FLUSH_ERR;
4181acd884deSSumit Saxena 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
4182acd884deSSumit Saxena 		return IB_WC_WR_FLUSH_ERR;
4183acd884deSSumit Saxena 	default:
4184acd884deSSumit Saxena 		return IB_WC_GENERAL_ERR;
4185acd884deSSumit Saxena 	}
4186acd884deSSumit Saxena }
4187acd884deSSumit Saxena 
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4188acd884deSSumit Saxena static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
4189acd884deSSumit Saxena {
4190acd884deSSumit Saxena 	switch (cqe->type) {
4191acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_SEND:
4192acd884deSSumit Saxena 		wc->opcode = IB_WC_SEND;
4193acd884deSSumit Saxena 		break;
4194acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
4195acd884deSSumit Saxena 		wc->opcode = IB_WC_SEND;
4196acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_IMM;
4197acd884deSSumit Saxena 		break;
4198acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
4199acd884deSSumit Saxena 		wc->opcode = IB_WC_SEND;
4200acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4201acd884deSSumit Saxena 		break;
4202acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
4203acd884deSSumit Saxena 		wc->opcode = IB_WC_RDMA_WRITE;
4204acd884deSSumit Saxena 		break;
4205acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
4206acd884deSSumit Saxena 		wc->opcode = IB_WC_RDMA_WRITE;
4207acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_IMM;
4208acd884deSSumit Saxena 		break;
4209acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
4210acd884deSSumit Saxena 		wc->opcode = IB_WC_RDMA_READ;
4211acd884deSSumit Saxena 		break;
4212acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
4213acd884deSSumit Saxena 		wc->opcode = IB_WC_COMP_SWAP;
4214acd884deSSumit Saxena 		break;
4215acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
4216acd884deSSumit Saxena 		wc->opcode = IB_WC_FETCH_ADD;
4217acd884deSSumit Saxena 		break;
4218acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
4219acd884deSSumit Saxena 		wc->opcode = IB_WC_LOCAL_INV;
4220acd884deSSumit Saxena 		break;
4221acd884deSSumit Saxena 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
4222acd884deSSumit Saxena 		wc->opcode = IB_WC_REG_MR;
4223acd884deSSumit Saxena 		break;
4224acd884deSSumit Saxena 	default:
4225acd884deSSumit Saxena 		wc->opcode = IB_WC_SEND;
4226acd884deSSumit Saxena 		break;
4227acd884deSSumit Saxena 	}
4228acd884deSSumit Saxena 
4229acd884deSSumit Saxena 	wc->status = __req_to_ib_wc_status(cqe->status);
4230acd884deSSumit Saxena }
4231acd884deSSumit Saxena 
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)4232acd884deSSumit Saxena static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, u16 raweth_qp1_flags2)
4233acd884deSSumit Saxena {
4234acd884deSSumit Saxena 	bool is_ipv6 = false, is_ipv4 = false;
4235acd884deSSumit Saxena 
4236acd884deSSumit Saxena 	/* raweth_qp1_flags Bit 9-6 indicates itype */
4237acd884deSSumit Saxena 
4238acd884deSSumit Saxena 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
4239acd884deSSumit Saxena 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
4240acd884deSSumit Saxena 		return -1;
4241acd884deSSumit Saxena 
4242acd884deSSumit Saxena 	if (raweth_qp1_flags2 &
4243acd884deSSumit Saxena 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
4244acd884deSSumit Saxena 	    raweth_qp1_flags2 &
4245acd884deSSumit Saxena 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
4246acd884deSSumit Saxena 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
4247acd884deSSumit Saxena 		(raweth_qp1_flags2 &
4248acd884deSSumit Saxena 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
4249acd884deSSumit Saxena 			(is_ipv6 = true) : (is_ipv4 = true);
4250acd884deSSumit Saxena 		return ((is_ipv6) ?
4251acd884deSSumit Saxena 			 BNXT_RE_ROCEV2_IPV6_PACKET :
4252acd884deSSumit Saxena 			 BNXT_RE_ROCEV2_IPV4_PACKET);
4253acd884deSSumit Saxena 	} else {
4254acd884deSSumit Saxena 		return BNXT_RE_ROCE_V1_PACKET;
4255acd884deSSumit Saxena 	}
4256acd884deSSumit Saxena }
4257acd884deSSumit Saxena 
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)4258acd884deSSumit Saxena static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
4259acd884deSSumit Saxena 					    void *rq_hdr_buf)
4260acd884deSSumit Saxena {
4261acd884deSSumit Saxena 	u8 *tmp_buf = NULL;
4262acd884deSSumit Saxena 	struct ethhdr *eth_hdr;
4263acd884deSSumit Saxena 	u16 eth_type;
4264acd884deSSumit Saxena 	bool rc = false;
4265acd884deSSumit Saxena 
4266acd884deSSumit Saxena 	tmp_buf = (u8 *)rq_hdr_buf;
4267acd884deSSumit Saxena 	/*
4268acd884deSSumit Saxena 	 * If dest mac is not same as I/F mac, this could be a
4269acd884deSSumit Saxena 	 * loopback address or multicast address, check whether
4270acd884deSSumit Saxena 	 * it is a loopback packet
4271acd884deSSumit Saxena 	 */
4272acd884deSSumit Saxena 	if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) {
4273acd884deSSumit Saxena 		tmp_buf += 4;
4274acd884deSSumit Saxena 		/* Check the  ether type */
4275acd884deSSumit Saxena 		eth_hdr = (struct ethhdr *)tmp_buf;
4276acd884deSSumit Saxena 		eth_type = ntohs(eth_hdr->h_proto);
4277acd884deSSumit Saxena 		switch (eth_type) {
4278acd884deSSumit Saxena 		case BNXT_QPLIB_ETHTYPE_ROCEV1:
4279acd884deSSumit Saxena 			rc = true;
4280acd884deSSumit Saxena 			break;
4281acd884deSSumit Saxena 		default:
4282acd884deSSumit Saxena 			break;
4283acd884deSSumit Saxena 		}
4284acd884deSSumit Saxena 	}
4285acd884deSSumit Saxena 
4286acd884deSSumit Saxena 	return rc;
4287acd884deSSumit Saxena }
4288acd884deSSumit Saxena 
bnxt_re_is_vlan_in_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf,struct bnxt_qplib_cqe * cqe)4289acd884deSSumit Saxena static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev,
4290acd884deSSumit Saxena 				      void *rq_hdr_buf,
4291acd884deSSumit Saxena 				      struct bnxt_qplib_cqe *cqe)
4292acd884deSSumit Saxena {
4293acd884deSSumit Saxena 	struct vlan_hdr *vlan_hdr;
4294acd884deSSumit Saxena 	struct ethhdr *eth_hdr;
4295acd884deSSumit Saxena 	u8 *tmp_buf = NULL;
4296acd884deSSumit Saxena 	u16 eth_type;
4297acd884deSSumit Saxena 
4298acd884deSSumit Saxena 	tmp_buf = (u8 *)rq_hdr_buf;
4299acd884deSSumit Saxena 	/* Check the  ether type */
4300acd884deSSumit Saxena 	eth_hdr = (struct ethhdr *)tmp_buf;
4301acd884deSSumit Saxena 	eth_type = ntohs(eth_hdr->h_proto);
4302acd884deSSumit Saxena 	if (eth_type == ETH_P_8021Q) {
4303acd884deSSumit Saxena 		tmp_buf += sizeof(struct ethhdr);
4304acd884deSSumit Saxena 		vlan_hdr = (struct vlan_hdr *)tmp_buf;
4305acd884deSSumit Saxena 		cqe->raweth_qp1_metadata =
4306acd884deSSumit Saxena 			ntohs(vlan_hdr->h_vlan_TCI) |
4307acd884deSSumit Saxena 			(eth_type <<
4308acd884deSSumit Saxena 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
4309acd884deSSumit Saxena 		cqe->raweth_qp1_flags2 |=
4310acd884deSSumit Saxena 			CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN;
4311acd884deSSumit Saxena 		return true;
4312acd884deSSumit Saxena 	}
4313acd884deSSumit Saxena 
4314acd884deSSumit Saxena 	return false;
4315acd884deSSumit Saxena }
4316acd884deSSumit Saxena 
bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)4317acd884deSSumit Saxena static int bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp *gsi_qp,
4318acd884deSSumit Saxena 						 struct bnxt_qplib_cqe *cqe)
4319acd884deSSumit Saxena {
4320acd884deSSumit Saxena 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4321acd884deSSumit Saxena 	struct bnxt_qplib_hdrbuf *hdr_buf;
4322acd884deSSumit Saxena 	dma_addr_t shrq_hdr_buf_map;
4323acd884deSSumit Saxena 	struct ib_sge s_sge[2] = {};
4324acd884deSSumit Saxena 	struct ib_sge r_sge[2] = {};
4325acd884deSSumit Saxena 	struct ib_recv_wr rwr = {};
4326acd884deSSumit Saxena 	struct bnxt_re_ah *gsi_sah;
4327acd884deSSumit Saxena 	struct bnxt_re_qp *gsi_sqp;
4328acd884deSSumit Saxena 	dma_addr_t rq_hdr_buf_map;
4329acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
4330acd884deSSumit Saxena 	struct ib_send_wr *swr;
4331acd884deSSumit Saxena 	u32 skip_bytes = 0;
4332acd884deSSumit Saxena 	void *rq_hdr_buf;
4333acd884deSSumit Saxena 	int pkt_type = 0;
4334acd884deSSumit Saxena 	u32 offset = 0;
4335acd884deSSumit Saxena 	u32 tbl_idx;
4336acd884deSSumit Saxena 	int rc;
4337acd884deSSumit Saxena 	struct ib_ud_wr udwr = {};
4338acd884deSSumit Saxena 
4339acd884deSSumit Saxena 	swr = &udwr.wr;
4340acd884deSSumit Saxena 	rdev = gsi_qp->rdev;
4341acd884deSSumit Saxena 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
4342acd884deSSumit Saxena 	tbl_idx = cqe->wr_id;
4343acd884deSSumit Saxena 
4344acd884deSSumit Saxena 	hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf;
4345acd884deSSumit Saxena 	rq_hdr_buf = (u8 *) hdr_buf->va + tbl_idx * hdr_buf->step;
4346acd884deSSumit Saxena 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
4347acd884deSSumit Saxena 							  tbl_idx);
4348acd884deSSumit Saxena 	/* Shadow QP header buffer */
4349acd884deSSumit Saxena 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_sqp->qplib_qp,
4350acd884deSSumit Saxena 							    tbl_idx);
4351acd884deSSumit Saxena 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4352acd884deSSumit Saxena 
4353acd884deSSumit Saxena 	/* Find packet type from the cqe */
4354acd884deSSumit Saxena 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
4355acd884deSSumit Saxena 					     cqe->raweth_qp1_flags2);
4356acd884deSSumit Saxena 	if (pkt_type < 0) {
4357acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Not handling this packet\n");
4358acd884deSSumit Saxena 		return -EINVAL;
4359acd884deSSumit Saxena 	}
4360acd884deSSumit Saxena 
4361acd884deSSumit Saxena 	/* Adjust the offset for the user buffer and post in the rq */
4362acd884deSSumit Saxena 
4363acd884deSSumit Saxena 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
4364acd884deSSumit Saxena 		offset = 20;
4365acd884deSSumit Saxena 
4366acd884deSSumit Saxena 	/*
4367acd884deSSumit Saxena 	 * QP1 loopback packet has 4 bytes of internal header before
4368acd884deSSumit Saxena 	 * ether header. Skip these four bytes.
4369acd884deSSumit Saxena 	 */
4370acd884deSSumit Saxena 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
4371acd884deSSumit Saxena 		skip_bytes = 4;
4372acd884deSSumit Saxena 
4373acd884deSSumit Saxena 	if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe))
4374acd884deSSumit Saxena 		skip_bytes += VLAN_HLEN;
4375acd884deSSumit Saxena 
4376acd884deSSumit Saxena 	/* Store this cqe */
4377acd884deSSumit Saxena 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
4378acd884deSSumit Saxena 	sqp_entry->qp1_qp = gsi_qp;
4379acd884deSSumit Saxena 
4380acd884deSSumit Saxena 	/* First send SGE . Skip the ether header*/
4381acd884deSSumit Saxena 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
4382acd884deSSumit Saxena 			+ skip_bytes;
4383acd884deSSumit Saxena 	s_sge[0].lkey = 0xFFFFFFFF;
4384acd884deSSumit Saxena 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
4385acd884deSSumit Saxena 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
4386acd884deSSumit Saxena 
4387acd884deSSumit Saxena 	/* Second Send SGE */
4388acd884deSSumit Saxena 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
4389acd884deSSumit Saxena 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
4390acd884deSSumit Saxena 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
4391acd884deSSumit Saxena 		s_sge[1].addr += 8;
4392acd884deSSumit Saxena 	s_sge[1].lkey = 0xFFFFFFFF;
4393acd884deSSumit Saxena 	s_sge[1].length = 256;
4394acd884deSSumit Saxena 
4395acd884deSSumit Saxena 	/* First recv SGE */
4396acd884deSSumit Saxena 	r_sge[0].addr = shrq_hdr_buf_map;
4397acd884deSSumit Saxena 	r_sge[0].lkey = 0xFFFFFFFF;
4398acd884deSSumit Saxena 	r_sge[0].length = 40;
4399acd884deSSumit Saxena 
4400acd884deSSumit Saxena 	r_sge[1].addr = sqp_entry->sge.addr + offset;
4401acd884deSSumit Saxena 	r_sge[1].lkey = sqp_entry->sge.lkey;
4402acd884deSSumit Saxena 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
4403acd884deSSumit Saxena 
4404acd884deSSumit Saxena 	/* Create receive work request */
4405acd884deSSumit Saxena 	rwr.num_sge = 2;
4406acd884deSSumit Saxena 	rwr.sg_list = r_sge;
4407acd884deSSumit Saxena 	rwr.wr_id = tbl_idx;
4408acd884deSSumit Saxena 	rwr.next = NULL;
4409acd884deSSumit Saxena 
4410acd884deSSumit Saxena 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
4411acd884deSSumit Saxena 	if (rc) {
4412acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
4413acd884deSSumit Saxena 			"Failed to post Rx buffers to shadow QP\n");
4414acd884deSSumit Saxena 		return -ENOMEM;
4415acd884deSSumit Saxena 	}
4416acd884deSSumit Saxena 
4417acd884deSSumit Saxena 	swr->num_sge = 2;
4418acd884deSSumit Saxena 	swr->sg_list = s_sge;
4419acd884deSSumit Saxena 	swr->wr_id = tbl_idx;
4420acd884deSSumit Saxena 	swr->opcode = IB_WR_SEND;
4421acd884deSSumit Saxena 	swr->next = NULL;
4422acd884deSSumit Saxena 
4423acd884deSSumit Saxena 	gsi_sah = rdev->gsi_ctx.gsi_sah;
4424acd884deSSumit Saxena 	udwr.ah = &gsi_sah->ibah;
4425acd884deSSumit Saxena 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
4426acd884deSSumit Saxena 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
4427acd884deSSumit Saxena 	/* post data received in the send queue */
4428acd884deSSumit Saxena 	rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
4429acd884deSSumit Saxena 
4430acd884deSSumit Saxena 	return rc;
4431acd884deSSumit Saxena }
4432acd884deSSumit Saxena 
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4433acd884deSSumit Saxena static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
4434acd884deSSumit Saxena 					  struct bnxt_qplib_cqe *cqe)
4435acd884deSSumit Saxena {
4436acd884deSSumit Saxena 	wc->opcode = IB_WC_RECV;
4437acd884deSSumit Saxena 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
4438acd884deSSumit Saxena 	wc->wc_flags |= IB_WC_GRH;
4439acd884deSSumit Saxena }
4440acd884deSSumit Saxena 
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4441acd884deSSumit Saxena static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
4442acd884deSSumit Saxena 				      struct bnxt_qplib_cqe *cqe)
4443acd884deSSumit Saxena {
4444acd884deSSumit Saxena 	wc->opcode = IB_WC_RECV;
4445acd884deSSumit Saxena 	wc->status = __rc_to_ib_wc_status(cqe->status);
4446acd884deSSumit Saxena 
4447acd884deSSumit Saxena 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
4448acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_IMM;
4449acd884deSSumit Saxena 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4450acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4451acd884deSSumit Saxena 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
4452acd884deSSumit Saxena 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
4453acd884deSSumit Saxena 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4454acd884deSSumit Saxena }
4455acd884deSSumit Saxena 
4456acd884deSSumit Saxena /* Returns TRUE if pkt has valid VLAN and if VLAN id is non-zero */
bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)4457acd884deSSumit Saxena static bool bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe *orig_cqe,
4458acd884deSSumit Saxena 					  u16 *vid, u8 *sl)
4459acd884deSSumit Saxena {
4460acd884deSSumit Saxena 	u32 metadata;
4461acd884deSSumit Saxena 	u16 tpid;
4462acd884deSSumit Saxena 	bool ret = false;
4463acd884deSSumit Saxena 	metadata = orig_cqe->raweth_qp1_metadata;
4464acd884deSSumit Saxena 	if (orig_cqe->raweth_qp1_flags2 &
4465acd884deSSumit Saxena 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
4466acd884deSSumit Saxena 		tpid = ((metadata &
4467acd884deSSumit Saxena 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
4468acd884deSSumit Saxena 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
4469acd884deSSumit Saxena 		if (tpid == ETH_P_8021Q) {
4470acd884deSSumit Saxena 			*vid = metadata &
4471acd884deSSumit Saxena 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
4472acd884deSSumit Saxena 			*sl = (metadata &
4473acd884deSSumit Saxena 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
4474acd884deSSumit Saxena 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
4475acd884deSSumit Saxena 			ret = !!(*vid);
4476acd884deSSumit Saxena 		}
4477acd884deSSumit Saxena 	}
4478acd884deSSumit Saxena 
4479acd884deSSumit Saxena 	return ret;
4480acd884deSSumit Saxena }
4481acd884deSSumit Saxena 
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4482acd884deSSumit Saxena static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
4483acd884deSSumit Saxena 					     struct ib_wc *wc,
4484acd884deSSumit Saxena 					     struct bnxt_qplib_cqe *cqe)
4485acd884deSSumit Saxena {
4486acd884deSSumit Saxena 	u32 tbl_idx;
4487acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
4488acd884deSSumit Saxena 	struct bnxt_re_qp *gsi_qp = NULL;
4489acd884deSSumit Saxena 	struct bnxt_qplib_cqe *orig_cqe = NULL;
4490acd884deSSumit Saxena 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4491acd884deSSumit Saxena 	int nw_type;
4492acd884deSSumit Saxena 	u16 vlan_id;
4493acd884deSSumit Saxena 	u8 sl;
4494acd884deSSumit Saxena 
4495acd884deSSumit Saxena 	tbl_idx = cqe->wr_id;
4496acd884deSSumit Saxena 
4497acd884deSSumit Saxena 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4498acd884deSSumit Saxena 	gsi_qp = sqp_entry->qp1_qp;
4499acd884deSSumit Saxena 	orig_cqe = &sqp_entry->cqe;
4500acd884deSSumit Saxena 
4501acd884deSSumit Saxena 	wc->wr_id = sqp_entry->wrid;
4502acd884deSSumit Saxena 	wc->byte_len = orig_cqe->length;
4503acd884deSSumit Saxena 	wc->qp = &gsi_qp->ib_qp;
4504acd884deSSumit Saxena 
4505acd884deSSumit Saxena 	wc->ex.imm_data = orig_cqe->immdata;
4506acd884deSSumit Saxena 	wc->src_qp = orig_cqe->src_qp;
4507acd884deSSumit Saxena 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
4508acd884deSSumit Saxena 	if (bnxt_re_is_nonzero_vlanid_pkt(orig_cqe, &vlan_id, &sl)) {
4509acd884deSSumit Saxena 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4510acd884deSSumit Saxena 			wc->sl = sl;
4511acd884deSSumit Saxena 			wc->vlan_id = vlan_id;
4512acd884deSSumit Saxena 			wc->wc_flags |= IB_WC_WITH_VLAN;
4513acd884deSSumit Saxena 		}
4514acd884deSSumit Saxena 	}
4515acd884deSSumit Saxena 	wc->port_num = 1;
4516acd884deSSumit Saxena 	wc->vendor_err = orig_cqe->status;
4517acd884deSSumit Saxena 
4518acd884deSSumit Saxena 	wc->opcode = IB_WC_RECV;
4519acd884deSSumit Saxena 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
4520acd884deSSumit Saxena 	wc->wc_flags |= IB_WC_GRH;
4521acd884deSSumit Saxena 
4522acd884deSSumit Saxena 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
4523acd884deSSumit Saxena 					    orig_cqe->raweth_qp1_flags2);
4524acd884deSSumit Saxena 	if(nw_type >= 0)
4525acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type);
4526acd884deSSumit Saxena }
4527acd884deSSumit Saxena 
bnxt_re_process_res_ud_wc(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)4528acd884deSSumit Saxena static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev,
4529acd884deSSumit Saxena 				      struct bnxt_re_qp *qp, struct ib_wc *wc,
4530acd884deSSumit Saxena 				      struct bnxt_qplib_cqe *cqe)
4531acd884deSSumit Saxena {
4532acd884deSSumit Saxena 	u16 vlan_id = 0;
4533acd884deSSumit Saxena 
4534acd884deSSumit Saxena 	wc->opcode = IB_WC_RECV;
4535acd884deSSumit Saxena 	wc->status = __rc_to_ib_wc_status(cqe->status);
4536acd884deSSumit Saxena 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
4537acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_IMM;
4538acd884deSSumit Saxena 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4539acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4540acd884deSSumit Saxena 	/* report only on GSI QP for Thor */
4541acd884deSSumit Saxena 	if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id &&
4542acd884deSSumit Saxena 	    rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) {
4543acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_GRH;
4544acd884deSSumit Saxena 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
4545acd884deSSumit Saxena 		wc->wc_flags |= IB_WC_WITH_SMAC;
4546acd884deSSumit Saxena 		if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) {
4547acd884deSSumit Saxena 			if (cqe->flags & CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK) {
4548acd884deSSumit Saxena 				if (cqe->cfa_meta &
4549acd884deSSumit Saxena 				    BNXT_QPLIB_CQE_CFA_META1_VALID)
4550acd884deSSumit Saxena 					vlan_id = (cqe->cfa_meta & 0xFFF);
4551acd884deSSumit Saxena 			}
4552acd884deSSumit Saxena 		} else if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
4553acd884deSSumit Saxena 			vlan_id = (cqe->cfa_meta & 0xFFF);
4554acd884deSSumit Saxena 		}
4555acd884deSSumit Saxena 		/* Mark only if vlan_id is non zero */
4556acd884deSSumit Saxena 		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4557acd884deSSumit Saxena 			wc->vlan_id = vlan_id;
4558acd884deSSumit Saxena 			wc->wc_flags |= IB_WC_WITH_VLAN;
4559acd884deSSumit Saxena 		}
4560acd884deSSumit Saxena 	}
4561acd884deSSumit Saxena }
4562acd884deSSumit Saxena 
bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp * qp)4563acd884deSSumit Saxena static int bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp *qp)
4564acd884deSSumit Saxena {
4565acd884deSSumit Saxena 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
4566acd884deSSumit Saxena 	unsigned long flags;
4567acd884deSSumit Saxena 	int rc = 0;
4568acd884deSSumit Saxena 
4569acd884deSSumit Saxena 	spin_lock_irqsave(&qp->sq_lock, flags);
4570acd884deSSumit Saxena 
4571acd884deSSumit Saxena 	rc = bnxt_re_legacy_bind_fence_mw(lib_qp);
4572acd884deSSumit Saxena 	if (!rc) {
4573acd884deSSumit Saxena 		lib_qp->sq.phantom_wqe_cnt++;
4574acd884deSSumit Saxena 		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
4575acd884deSSumit Saxena 			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
4576acd884deSSumit Saxena 			lib_qp->id, lib_qp->sq.hwq.prod,
4577acd884deSSumit Saxena 			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
4578acd884deSSumit Saxena 			lib_qp->sq.phantom_wqe_cnt);
4579acd884deSSumit Saxena 	}
4580acd884deSSumit Saxena 
4581acd884deSSumit Saxena 	spin_unlock_irqrestore(&qp->sq_lock, flags);
4582acd884deSSumit Saxena 	return rc;
4583acd884deSSumit Saxena }
4584acd884deSSumit Saxena 
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)4585acd884deSSumit Saxena int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
4586acd884deSSumit Saxena {
4587acd884deSSumit Saxena 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4588acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = cq->rdev;
4589acd884deSSumit Saxena 	struct bnxt_re_qp *qp;
4590acd884deSSumit Saxena 	struct bnxt_qplib_cqe *cqe;
4591acd884deSSumit Saxena 	int i, ncqe, budget, init_budget;
4592acd884deSSumit Saxena 	struct bnxt_qplib_q *sq;
4593acd884deSSumit Saxena 	struct bnxt_qplib_qp *lib_qp;
4594acd884deSSumit Saxena 	u32 tbl_idx;
4595acd884deSSumit Saxena 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
4596acd884deSSumit Saxena 	unsigned long flags;
4597acd884deSSumit Saxena 	u8 gsi_mode;
4598acd884deSSumit Saxena 
4599acd884deSSumit Saxena 	/*
4600acd884deSSumit Saxena 	 * DB recovery CQ; only process the door bell pacing alert from
4601acd884deSSumit Saxena 	 * the user lib
4602acd884deSSumit Saxena 	 */
4603acd884deSSumit Saxena 	if (cq->is_dbr_soft_cq) {
4604acd884deSSumit Saxena 		bnxt_re_pacing_alert(rdev);
4605acd884deSSumit Saxena 		return 0;
4606acd884deSSumit Saxena 	}
4607acd884deSSumit Saxena 
4608acd884deSSumit Saxena 	/* User CQ; the only processing we do is to
4609acd884deSSumit Saxena 	 * complete any pending CQ resize operation.
4610acd884deSSumit Saxena 	 */
4611acd884deSSumit Saxena 	if (cq->umem) {
4612acd884deSSumit Saxena 		if (cq->resize_umem)
4613acd884deSSumit Saxena 			bnxt_re_resize_cq_complete(cq);
4614acd884deSSumit Saxena 		return 0;
4615acd884deSSumit Saxena 	}
4616acd884deSSumit Saxena 
4617acd884deSSumit Saxena 	spin_lock_irqsave(&cq->cq_lock, flags);
4618acd884deSSumit Saxena 
4619acd884deSSumit Saxena 	budget = min_t(u32, num_entries, cq->max_cql);
4620acd884deSSumit Saxena 	init_budget = budget;
4621acd884deSSumit Saxena 	if (!cq->cql) {
4622acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use\n");
4623acd884deSSumit Saxena 		goto exit;
4624acd884deSSumit Saxena 	}
4625acd884deSSumit Saxena 	cqe = &cq->cql[0];
4626acd884deSSumit Saxena 	gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
4627acd884deSSumit Saxena 	while (budget) {
4628acd884deSSumit Saxena 		lib_qp = NULL;
4629acd884deSSumit Saxena 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
4630acd884deSSumit Saxena 		if (lib_qp) {
4631acd884deSSumit Saxena 			sq = &lib_qp->sq;
4632acd884deSSumit Saxena 			if (sq->legacy_send_phantom == true) {
4633acd884deSSumit Saxena 				qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
4634acd884deSSumit Saxena 				if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM)
4635acd884deSSumit Saxena 					dev_err(rdev_to_dev(rdev),
4636acd884deSSumit Saxena 						"Phantom failed! Scheduled to send again\n");
4637acd884deSSumit Saxena 				else
4638acd884deSSumit Saxena 					sq->legacy_send_phantom = false;
4639acd884deSSumit Saxena 			}
4640acd884deSSumit Saxena 		}
4641acd884deSSumit Saxena 		if (ncqe < budget)
4642acd884deSSumit Saxena 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
4643acd884deSSumit Saxena 							      cqe + ncqe,
4644acd884deSSumit Saxena 							      budget - ncqe);
4645acd884deSSumit Saxena 
4646acd884deSSumit Saxena 		if (!ncqe)
4647acd884deSSumit Saxena 			break;
4648acd884deSSumit Saxena 
4649acd884deSSumit Saxena 		for (i = 0; i < ncqe; i++, cqe++) {
4650acd884deSSumit Saxena 			/* Transcribe each qplib_wqe back to ib_wc */
4651acd884deSSumit Saxena 			memset(wc, 0, sizeof(*wc));
4652acd884deSSumit Saxena 
4653acd884deSSumit Saxena 			wc->wr_id = cqe->wr_id;
4654acd884deSSumit Saxena 			wc->byte_len = cqe->length;
4655acd884deSSumit Saxena 			qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle,
4656acd884deSSumit Saxena 					struct bnxt_re_qp, qplib_qp);
4657acd884deSSumit Saxena 			if (!qp) {
4658acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
4659acd884deSSumit Saxena 					"POLL CQ bad QP handle\n");
4660acd884deSSumit Saxena 				continue;
4661acd884deSSumit Saxena 			}
4662acd884deSSumit Saxena 			wc->qp = &qp->ib_qp;
4663acd884deSSumit Saxena 			wc->ex.imm_data = cqe->immdata;
4664acd884deSSumit Saxena 			wc->src_qp = cqe->src_qp;
4665acd884deSSumit Saxena 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
4666acd884deSSumit Saxena 			wc->port_num = 1;
4667acd884deSSumit Saxena 			wc->vendor_err = cqe->status;
4668acd884deSSumit Saxena 
4669acd884deSSumit Saxena 			switch(cqe->opcode) {
4670acd884deSSumit Saxena 			case CQ_BASE_CQE_TYPE_REQ:
4671acd884deSSumit Saxena 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
4672acd884deSSumit Saxena 				    qp->qplib_qp.id ==
4673acd884deSSumit Saxena 				    rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4674acd884deSSumit Saxena 					/* Handle this completion with
4675acd884deSSumit Saxena 					 * the stored completion */
4676acd884deSSumit Saxena 					 dev_dbg(rdev_to_dev(rdev),
4677acd884deSSumit Saxena 						 "Skipping this UD Send CQ\n");
4678acd884deSSumit Saxena 					memset(wc, 0, sizeof(*wc));
4679acd884deSSumit Saxena 					continue;
4680acd884deSSumit Saxena 				}
4681acd884deSSumit Saxena 				bnxt_re_process_req_wc(wc, cqe);
4682acd884deSSumit Saxena 				break;
4683acd884deSSumit Saxena 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
4684acd884deSSumit Saxena 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL) {
4685acd884deSSumit Saxena 					if (!cqe->status) {
4686acd884deSSumit Saxena 						int rc = 0;
4687acd884deSSumit Saxena 						rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe);
4688acd884deSSumit Saxena 						if (!rc) {
4689acd884deSSumit Saxena 							memset(wc, 0,
4690acd884deSSumit Saxena 							       sizeof(*wc));
4691acd884deSSumit Saxena 							continue;
4692acd884deSSumit Saxena 						}
4693acd884deSSumit Saxena 						cqe->status = -1;
4694acd884deSSumit Saxena 					}
4695acd884deSSumit Saxena 					/* Errors need not be looped back.
4696acd884deSSumit Saxena 					 * But change the wr_id to the one
4697acd884deSSumit Saxena 					 * stored in the table
4698acd884deSSumit Saxena 					 */
4699acd884deSSumit Saxena 					tbl_idx = cqe->wr_id;
4700acd884deSSumit Saxena 					sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4701acd884deSSumit Saxena 					wc->wr_id = sqp_entry->wrid;
4702acd884deSSumit Saxena 				}
4703acd884deSSumit Saxena 
4704acd884deSSumit Saxena 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
4705acd884deSSumit Saxena 				break;
4706acd884deSSumit Saxena 			case CQ_BASE_CQE_TYPE_RES_RC:
4707acd884deSSumit Saxena 				bnxt_re_process_res_rc_wc(wc, cqe);
4708acd884deSSumit Saxena 				break;
4709acd884deSSumit Saxena 			case CQ_BASE_CQE_TYPE_RES_UD:
4710acd884deSSumit Saxena 				if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
4711acd884deSSumit Saxena 				    qp->qplib_qp.id ==
4712acd884deSSumit Saxena 				    rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4713acd884deSSumit Saxena 					/* Handle this completion with
4714acd884deSSumit Saxena 					 * the stored completion
4715acd884deSSumit Saxena 					 */
4716acd884deSSumit Saxena 					dev_dbg(rdev_to_dev(rdev),
4717acd884deSSumit Saxena 						"Handling the UD receive CQ\n");
4718acd884deSSumit Saxena 					if (cqe->status) {
4719acd884deSSumit Saxena 						/* TODO handle this completion  as a failure in
4720acd884deSSumit Saxena 						 * loopback porocedure
4721acd884deSSumit Saxena 						 */
4722acd884deSSumit Saxena 						continue;
4723acd884deSSumit Saxena 					} else {
4724acd884deSSumit Saxena 						bnxt_re_process_res_shadow_qp_wc(qp, wc, cqe);
4725acd884deSSumit Saxena 						break;
4726acd884deSSumit Saxena 					}
4727acd884deSSumit Saxena 				}
4728acd884deSSumit Saxena 				bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe);
4729acd884deSSumit Saxena 				break;
4730acd884deSSumit Saxena 			default:
4731acd884deSSumit Saxena 				dev_err(rdev_to_dev(cq->rdev),
4732acd884deSSumit Saxena 					"POLL CQ type 0x%x not handled, skip!\n",
4733acd884deSSumit Saxena 					cqe->opcode);
4734acd884deSSumit Saxena 				continue;
4735acd884deSSumit Saxena 			}
4736acd884deSSumit Saxena 			wc++;
4737acd884deSSumit Saxena 			budget--;
4738acd884deSSumit Saxena 		}
4739acd884deSSumit Saxena 	}
4740acd884deSSumit Saxena exit:
4741acd884deSSumit Saxena 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4742acd884deSSumit Saxena 	return init_budget - budget;
4743acd884deSSumit Saxena }
4744acd884deSSumit Saxena 
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)4745acd884deSSumit Saxena int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
4746acd884deSSumit Saxena 			  enum ib_cq_notify_flags ib_cqn_flags)
4747acd884deSSumit Saxena {
4748acd884deSSumit Saxena 	struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
4749acd884deSSumit Saxena 	int type = 0, rc = 0;
4750acd884deSSumit Saxena 	unsigned long flags;
4751acd884deSSumit Saxena 
4752acd884deSSumit Saxena 	spin_lock_irqsave(&cq->cq_lock, flags);
4753acd884deSSumit Saxena 	/* Trigger on the very next completion */
4754acd884deSSumit Saxena 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
4755acd884deSSumit Saxena 		type = DBC_DBC_TYPE_CQ_ARMALL;
4756acd884deSSumit Saxena 	/* Trigger on the next solicited completion */
4757acd884deSSumit Saxena 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
4758acd884deSSumit Saxena 		type = DBC_DBC_TYPE_CQ_ARMSE;
4759acd884deSSumit Saxena 
4760acd884deSSumit Saxena 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
4761acd884deSSumit Saxena 
4762acd884deSSumit Saxena 	/* Poll to see if there are missed events */
4763acd884deSSumit Saxena 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
4764acd884deSSumit Saxena 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
4765acd884deSSumit Saxena 		rc = 1;
4766acd884deSSumit Saxena 
4767acd884deSSumit Saxena 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4768acd884deSSumit Saxena 
4769acd884deSSumit Saxena 	return rc;
4770acd884deSSumit Saxena }
4771acd884deSSumit Saxena 
4772acd884deSSumit Saxena /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)4773acd884deSSumit Saxena struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
4774acd884deSSumit Saxena {
4775acd884deSSumit Saxena 	struct bnxt_qplib_mrinfo mrinfo;
4776acd884deSSumit Saxena 	struct bnxt_re_dev *rdev;
4777acd884deSSumit Saxena 	struct bnxt_re_mr *mr;
4778acd884deSSumit Saxena 	struct bnxt_re_pd *pd;
4779acd884deSSumit Saxena 	u32 max_mr_count;
4780acd884deSSumit Saxena 	u64 pbl = 0;
4781acd884deSSumit Saxena 	int rc;
4782acd884deSSumit Saxena 
4783acd884deSSumit Saxena 	memset(&mrinfo, 0, sizeof(mrinfo));
4784acd884deSSumit Saxena 	pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4785acd884deSSumit Saxena 	rdev = pd->rdev;
4786acd884deSSumit Saxena 
4787acd884deSSumit Saxena 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4788acd884deSSumit Saxena 	if (!mr) {
4789acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
4790acd884deSSumit Saxena 			"Allocate memory for DMA MR failed!\n");
4791acd884deSSumit Saxena 		return ERR_PTR(-ENOMEM);
4792acd884deSSumit Saxena 	}
4793acd884deSSumit Saxena 	mr->rdev = rdev;
4794acd884deSSumit Saxena 	mr->qplib_mr.pd = &pd->qplib_pd;
4795acd884deSSumit Saxena 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4796acd884deSSumit Saxena 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4797acd884deSSumit Saxena 
4798acd884deSSumit Saxena 	/* Allocate and register 0 as the address */
4799acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4800acd884deSSumit Saxena 	if (rc) {
4801acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!\n");
4802acd884deSSumit Saxena 		goto fail;
4803acd884deSSumit Saxena 	}
4804acd884deSSumit Saxena 	mr->qplib_mr.total_size = -1; /* Infinite length */
4805acd884deSSumit Saxena 	mrinfo.ptes = &pbl;
4806acd884deSSumit Saxena 	mrinfo.sg.npages = 0;
4807acd884deSSumit Saxena 	mrinfo.sg.pgsize = PAGE_SIZE;
4808acd884deSSumit Saxena 	mrinfo.sg.pgshft = PAGE_SHIFT;
4809acd884deSSumit Saxena 	mrinfo.sg.pgsize = PAGE_SIZE;
4810acd884deSSumit Saxena 	mrinfo.mrw = &mr->qplib_mr;
4811acd884deSSumit Saxena 	mrinfo.is_dma = true;
4812acd884deSSumit Saxena 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
4813acd884deSSumit Saxena 	if (rc) {
4814acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Register DMA MR failed!\n");
4815acd884deSSumit Saxena 		goto fail_mr;
4816acd884deSSumit Saxena 	}
4817acd884deSSumit Saxena 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4818acd884deSSumit Saxena 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
4819acd884deSSumit Saxena 			       IB_ACCESS_REMOTE_ATOMIC))
4820acd884deSSumit Saxena 		mr->ib_mr.rkey = mr->ib_mr.lkey;
4821acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.mr_count);
4822acd884deSSumit Saxena 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
4823acd884deSSumit Saxena 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4824acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4825acd884deSSumit Saxena 
4826acd884deSSumit Saxena 	return &mr->ib_mr;
4827acd884deSSumit Saxena 
4828acd884deSSumit Saxena fail_mr:
4829acd884deSSumit Saxena 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4830acd884deSSumit Saxena fail:
4831acd884deSSumit Saxena 	kfree(mr);
4832acd884deSSumit Saxena 	return ERR_PTR(rc);
4833acd884deSSumit Saxena }
4834acd884deSSumit Saxena 
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)4835acd884deSSumit Saxena int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
4836acd884deSSumit Saxena {
4837acd884deSSumit Saxena 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4838acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = mr->rdev;
4839acd884deSSumit Saxena 	int rc = 0;
4840acd884deSSumit Saxena 
4841acd884deSSumit Saxena 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4842acd884deSSumit Saxena 	if (rc)
4843acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n",
4844acd884deSSumit Saxena 			mr->qplib_mr.lkey, rc);
4845acd884deSSumit Saxena 
4846acd884deSSumit Saxena 	if (mr->pages) {
4847acd884deSSumit Saxena 		bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4848acd884deSSumit Saxena 						   &mr->qplib_frpl);
4849acd884deSSumit Saxena 		kfree(mr->pages);
4850acd884deSSumit Saxena 		mr->npages = 0;
4851acd884deSSumit Saxena 		mr->pages = NULL;
4852acd884deSSumit Saxena 	}
4853acd884deSSumit Saxena 	if (!IS_ERR(mr->ib_umem) && mr->ib_umem) {
4854acd884deSSumit Saxena 		mr->is_invalcb_active = false;
4855acd884deSSumit Saxena 		bnxt_re_peer_mem_release(mr->ib_umem);
4856acd884deSSumit Saxena 	}
4857acd884deSSumit Saxena 	kfree(mr);
4858acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.mr_count);
4859acd884deSSumit Saxena 	return 0;
4860acd884deSSumit Saxena }
4861acd884deSSumit Saxena 
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)4862acd884deSSumit Saxena static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
4863acd884deSSumit Saxena {
4864acd884deSSumit Saxena 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4865acd884deSSumit Saxena 
4866acd884deSSumit Saxena 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
4867acd884deSSumit Saxena 		return -ENOMEM;
4868acd884deSSumit Saxena 
4869acd884deSSumit Saxena 	mr->pages[mr->npages++] = addr;
4870acd884deSSumit Saxena 	dev_dbg(NULL, "%s: ibdev %p Set MR pages[%d] = 0x%lx\n",
4871acd884deSSumit Saxena 		ROCE_DRV_MODULE_NAME, ib_mr->device, mr->npages - 1,
4872acd884deSSumit Saxena 		mr->pages[mr->npages - 1]);
4873acd884deSSumit Saxena 	return 0;
4874acd884deSSumit Saxena }
4875acd884deSSumit Saxena 
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)4876acd884deSSumit Saxena int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
4877acd884deSSumit Saxena 		      int sg_nents, unsigned int *sg_offset)
4878acd884deSSumit Saxena {
4879acd884deSSumit Saxena 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
4880acd884deSSumit Saxena 
4881acd884deSSumit Saxena 	mr->npages = 0;
4882acd884deSSumit Saxena 	return ib_sg_to_pages(ib_mr, sg, sg_nents,
4883acd884deSSumit Saxena 			      sg_offset, bnxt_re_set_page);
4884acd884deSSumit Saxena }
4885acd884deSSumit Saxena 
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg,struct ib_udata * udata)4886acd884deSSumit Saxena struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
4887acd884deSSumit Saxena 			       u32 max_num_sg, struct ib_udata *udata)
4888acd884deSSumit Saxena {
4889acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4890acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
4891acd884deSSumit Saxena 	struct bnxt_re_mr *mr;
4892acd884deSSumit Saxena 	u32 max_mr_count;
4893acd884deSSumit Saxena 	int rc;
4894acd884deSSumit Saxena 
4895acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Alloc MR\n");
4896acd884deSSumit Saxena 	if (type != IB_MR_TYPE_MEM_REG) {
4897acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type);
4898acd884deSSumit Saxena 		return ERR_PTR(-EINVAL);
4899acd884deSSumit Saxena 	}
4900acd884deSSumit Saxena 	if (max_num_sg > MAX_PBL_LVL_1_PGS) {
4901acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "Max SG exceeded\n");
4902acd884deSSumit Saxena 		return ERR_PTR(-EINVAL);
4903acd884deSSumit Saxena 	}
4904acd884deSSumit Saxena 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4905acd884deSSumit Saxena 	if (!mr) {
4906acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate MR mem failed!\n");
4907acd884deSSumit Saxena 		return ERR_PTR(-ENOMEM);
4908acd884deSSumit Saxena 	}
4909acd884deSSumit Saxena 	mr->rdev = rdev;
4910acd884deSSumit Saxena 	mr->qplib_mr.pd = &pd->qplib_pd;
4911acd884deSSumit Saxena 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
4912acd884deSSumit Saxena 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4913acd884deSSumit Saxena 
4914acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4915acd884deSSumit Saxena 	if (rc) {
4916acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
4917acd884deSSumit Saxena 		goto fail;
4918acd884deSSumit Saxena 	}
4919acd884deSSumit Saxena 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4920acd884deSSumit Saxena 	mr->ib_mr.rkey = mr->ib_mr.lkey;
4921acd884deSSumit Saxena 	mr->pages = kzalloc(sizeof(u64) * max_num_sg, GFP_KERNEL);
4922acd884deSSumit Saxena 	if (!mr->pages) {
4923acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
4924acd884deSSumit Saxena 			"Allocate MR page list mem failed!\n");
4925acd884deSSumit Saxena 		rc = -ENOMEM;
4926acd884deSSumit Saxena 		goto fail_mr;
4927acd884deSSumit Saxena 	}
4928acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4929acd884deSSumit Saxena 						 &mr->qplib_frpl, max_num_sg);
4930acd884deSSumit Saxena 	if (rc) {
4931acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev),
4932acd884deSSumit Saxena 			"Allocate HW Fast reg page list failed!\n");
4933acd884deSSumit Saxena 		goto free_page;
4934acd884deSSumit Saxena 	}
4935acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages);
4936acd884deSSumit Saxena 
4937acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.mr_count);
4938acd884deSSumit Saxena 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
4939acd884deSSumit Saxena 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4940acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4941acd884deSSumit Saxena 	return &mr->ib_mr;
4942acd884deSSumit Saxena 
4943acd884deSSumit Saxena free_page:
4944acd884deSSumit Saxena 	kfree(mr->pages);
4945acd884deSSumit Saxena fail_mr:
4946acd884deSSumit Saxena 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4947acd884deSSumit Saxena fail:
4948acd884deSSumit Saxena 	kfree(mr);
4949acd884deSSumit Saxena 	return ERR_PTR(rc);
4950acd884deSSumit Saxena }
4951acd884deSSumit Saxena 
4952acd884deSSumit Saxena /* Memory Windows */
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)4953acd884deSSumit Saxena struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4954acd884deSSumit Saxena 			       struct ib_udata *udata)
4955acd884deSSumit Saxena {
4956acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
4957acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
4958acd884deSSumit Saxena 	struct bnxt_re_mw *mw;
4959acd884deSSumit Saxena 	u32 max_mw_count;
4960acd884deSSumit Saxena 	int rc;
4961acd884deSSumit Saxena 
4962acd884deSSumit Saxena 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4963acd884deSSumit Saxena 	if (!mw) {
4964acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4965acd884deSSumit Saxena 		rc = -ENOMEM;
4966acd884deSSumit Saxena 		goto exit;
4967acd884deSSumit Saxena 	}
4968acd884deSSumit Saxena 	mw->rdev = rdev;
4969acd884deSSumit Saxena 	mw->qplib_mw.pd = &pd->qplib_pd;
4970acd884deSSumit Saxena 
4971acd884deSSumit Saxena 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4972acd884deSSumit Saxena 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4973acd884deSSumit Saxena 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4974acd884deSSumit Saxena 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4975acd884deSSumit Saxena 	if (rc) {
4976acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4977acd884deSSumit Saxena 		goto fail;
4978acd884deSSumit Saxena 	}
4979acd884deSSumit Saxena 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4980acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.mw_count);
4981acd884deSSumit Saxena 	max_mw_count = atomic_read(&rdev->stats.rsors.mw_count);
4982acd884deSSumit Saxena 	if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count))
4983acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count);
4984acd884deSSumit Saxena 
4985acd884deSSumit Saxena 	return &mw->ib_mw;
4986acd884deSSumit Saxena fail:
4987acd884deSSumit Saxena 	kfree(mw);
4988acd884deSSumit Saxena exit:
4989acd884deSSumit Saxena 	return ERR_PTR(rc);
4990acd884deSSumit Saxena }
4991acd884deSSumit Saxena 
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4992acd884deSSumit Saxena int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4993acd884deSSumit Saxena {
4994acd884deSSumit Saxena 	struct bnxt_re_mw *mw = to_bnxt_re(ib_mw, struct bnxt_re_mw, ib_mw);
4995acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = mw->rdev;
4996acd884deSSumit Saxena 	int rc;
4997acd884deSSumit Saxena 
4998acd884deSSumit Saxena 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4999acd884deSSumit Saxena 	if (rc) {
5000acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
5001acd884deSSumit Saxena 		return rc;
5002acd884deSSumit Saxena 	}
5003acd884deSSumit Saxena 
5004acd884deSSumit Saxena 	kfree(mw);
5005acd884deSSumit Saxena 	atomic_dec(&rdev->stats.rsors.mw_count);
5006acd884deSSumit Saxena 	return rc;
5007acd884deSSumit Saxena }
5008acd884deSSumit Saxena 
bnxt_re_page_size_ok(int page_shift)5009acd884deSSumit Saxena static int bnxt_re_page_size_ok(int page_shift)
5010acd884deSSumit Saxena {
5011acd884deSSumit Saxena 	switch (page_shift) {
5012acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
5013acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
5014acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
5015acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
5016acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
5017acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
5018acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
5019acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB:
5020acd884deSSumit Saxena 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
5021acd884deSSumit Saxena 		return 1;
5022acd884deSSumit Saxena 	default:
5023acd884deSSumit Saxena 		return 0;
5024acd884deSSumit Saxena 	}
5025acd884deSSumit Saxena }
5026acd884deSSumit Saxena 
bnxt_re_get_page_shift(struct ib_umem * umem,u64 va,u64 st,u64 cmask)5027acd884deSSumit Saxena static int bnxt_re_get_page_shift(struct ib_umem *umem,
5028acd884deSSumit Saxena 				  u64 va, u64 st, u64 cmask)
5029acd884deSSumit Saxena {
5030acd884deSSumit Saxena 	int pgshft;
5031acd884deSSumit Saxena 
5032acd884deSSumit Saxena 	pgshft = ilog2(umem->page_size);
5033acd884deSSumit Saxena 
5034acd884deSSumit Saxena 	return pgshft;
5035acd884deSSumit Saxena }
5036acd884deSSumit Saxena 
bnxt_re_get_num_pages(struct ib_umem * umem,u64 start,u64 length,int page_shift)5037acd884deSSumit Saxena static int bnxt_re_get_num_pages(struct ib_umem *umem, u64 start, u64 length, int page_shift)
5038acd884deSSumit Saxena {
5039acd884deSSumit Saxena 	int npages = 0;
5040acd884deSSumit Saxena 
5041acd884deSSumit Saxena 	if (page_shift == PAGE_SHIFT) {
5042acd884deSSumit Saxena 		npages = ib_umem_num_pages_compat(umem);
5043acd884deSSumit Saxena 	} else {
5044acd884deSSumit Saxena 		npages = ALIGN(length, BIT(page_shift)) / BIT(page_shift);
5045acd884deSSumit Saxena 		if (start %  BIT(page_shift))
5046acd884deSSumit Saxena 			npages++;
5047acd884deSSumit Saxena 	}
5048acd884deSSumit Saxena 	return npages;
5049acd884deSSumit Saxena }
5050acd884deSSumit Saxena 
5051acd884deSSumit Saxena /* uverbs */
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)5052acd884deSSumit Saxena struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
5053acd884deSSumit Saxena 				  u64 virt_addr, int mr_access_flags,
5054acd884deSSumit Saxena 				  struct ib_udata *udata)
5055acd884deSSumit Saxena {
5056acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
5057acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = pd->rdev;
5058acd884deSSumit Saxena 	struct bnxt_qplib_mrinfo mrinfo;
5059acd884deSSumit Saxena 	int umem_pgs, page_shift, rc;
5060acd884deSSumit Saxena 	struct bnxt_re_mr *mr;
5061acd884deSSumit Saxena 	struct ib_umem *umem;
5062acd884deSSumit Saxena 	u32 max_mr_count;
5063acd884deSSumit Saxena 	int npages;
5064acd884deSSumit Saxena 
5065acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "Reg user MR\n");
5066acd884deSSumit Saxena 
5067acd884deSSumit Saxena 	if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr)
5068acd884deSSumit Saxena 		return ERR_PTR(-ENOMEM);
5069acd884deSSumit Saxena 
5070acd884deSSumit Saxena 	if (rdev->mod_exit) {
5071acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
5072acd884deSSumit Saxena 		return ERR_PTR(-EIO);
5073acd884deSSumit Saxena 	}
5074acd884deSSumit Saxena 	memset(&mrinfo, 0, sizeof(mrinfo));
5075acd884deSSumit Saxena 	if (length > BNXT_RE_MAX_MR_SIZE) {
5076acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Requested MR Size: %lu "
5077acd884deSSumit Saxena 			"> Max supported: %ld\n", length, BNXT_RE_MAX_MR_SIZE);
5078acd884deSSumit Saxena 		return ERR_PTR(-ENOMEM);
5079acd884deSSumit Saxena 	}
5080acd884deSSumit Saxena 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5081acd884deSSumit Saxena 	if (!mr) {
5082acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
5083acd884deSSumit Saxena 		return ERR_PTR (-ENOMEM);
5084acd884deSSumit Saxena 	}
5085acd884deSSumit Saxena 	mr->rdev = rdev;
5086acd884deSSumit Saxena 	mr->qplib_mr.pd = &pd->qplib_pd;
5087acd884deSSumit Saxena 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5088acd884deSSumit Saxena 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
5089acd884deSSumit Saxena 
5090acd884deSSumit Saxena 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
5091acd884deSSumit Saxena 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
5092acd884deSSumit Saxena 		if (rc) {
5093acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Alloc MR failed!\n");
5094acd884deSSumit Saxena 			goto fail;
5095acd884deSSumit Saxena 		}
5096acd884deSSumit Saxena 		/* The fixed portion of the rkey is the same as the lkey */
5097acd884deSSumit Saxena 		mr->ib_mr.rkey = mr->qplib_mr.rkey;
5098acd884deSSumit Saxena 	}
5099acd884deSSumit Saxena 
5100acd884deSSumit Saxena 	umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5101acd884deSSumit Saxena 					udata, start, length,
5102acd884deSSumit Saxena 					mr_access_flags, 0);
5103acd884deSSumit Saxena 	if (IS_ERR(umem)) {
5104acd884deSSumit Saxena 		rc = PTR_ERR(umem);
5105acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
5106acd884deSSumit Saxena 			__func__, rc);
5107acd884deSSumit Saxena 		goto free_mr;
5108acd884deSSumit Saxena 	}
5109acd884deSSumit Saxena 	mr->ib_umem = umem;
5110acd884deSSumit Saxena 
5111acd884deSSumit Saxena 	mr->qplib_mr.va = virt_addr;
5112acd884deSSumit Saxena 	umem_pgs = ib_umem_num_pages_compat(umem);
5113acd884deSSumit Saxena 	if (!umem_pgs) {
5114acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5115acd884deSSumit Saxena 		rc = -EINVAL;
5116acd884deSSumit Saxena 		goto free_umem;
5117acd884deSSumit Saxena 	}
5118acd884deSSumit Saxena 	mr->qplib_mr.total_size = length;
5119acd884deSSumit Saxena 	page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
5120acd884deSSumit Saxena 					    rdev->dev_attr->page_size_cap);
5121acd884deSSumit Saxena 	if (!bnxt_re_page_size_ok(page_shift)) {
5122acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!\n");
5123acd884deSSumit Saxena 		rc = -EFAULT;
5124acd884deSSumit Saxena 		goto free_umem;
5125acd884deSSumit Saxena 	}
5126acd884deSSumit Saxena 	npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
5127acd884deSSumit Saxena 
5128acd884deSSumit Saxena 	/* Map umem buf ptrs to the PBL */
5129acd884deSSumit Saxena 	mrinfo.sg.npages = npages;
5130acd884deSSumit Saxena 	mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
5131acd884deSSumit Saxena 	mrinfo.sg.pgshft = page_shift;
5132acd884deSSumit Saxena 	mrinfo.sg.pgsize = BIT(page_shift);
5133acd884deSSumit Saxena 
5134acd884deSSumit Saxena 	mrinfo.mrw = &mr->qplib_mr;
5135acd884deSSumit Saxena 
5136acd884deSSumit Saxena 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5137acd884deSSumit Saxena 	if (rc) {
5138acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Reg user MR failed!\n");
5139acd884deSSumit Saxena 		goto free_umem;
5140acd884deSSumit Saxena 	}
5141acd884deSSumit Saxena 
5142acd884deSSumit Saxena 	mr->ib_mr.lkey = mr->ib_mr.rkey = mr->qplib_mr.lkey;
5143acd884deSSumit Saxena 	atomic_inc(&rdev->stats.rsors.mr_count);
5144acd884deSSumit Saxena 	max_mr_count =  atomic_read(&rdev->stats.rsors.mr_count);
5145acd884deSSumit Saxena 	if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
5146acd884deSSumit Saxena 		atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
5147acd884deSSumit Saxena 
5148acd884deSSumit Saxena 	return &mr->ib_mr;
5149acd884deSSumit Saxena 
5150acd884deSSumit Saxena free_umem:
5151acd884deSSumit Saxena 	bnxt_re_peer_mem_release(mr->ib_umem);
5152acd884deSSumit Saxena free_mr:
5153acd884deSSumit Saxena 	if (!_is_alloc_mr_unified(rdev->qplib_res.dattr))
5154acd884deSSumit Saxena 		bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
5155acd884deSSumit Saxena fail:
5156acd884deSSumit Saxena 	kfree(mr);
5157acd884deSSumit Saxena 	return ERR_PTR(rc);
5158acd884deSSumit Saxena }
5159acd884deSSumit Saxena 
5160acd884deSSumit Saxena int
bnxt_re_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * ib_pd,struct ib_udata * udata)5161acd884deSSumit Saxena bnxt_re_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length,
5162acd884deSSumit Saxena 		      u64 virt_addr, int mr_access_flags,
5163acd884deSSumit Saxena 		      struct ib_pd *ib_pd, struct ib_udata *udata)
5164acd884deSSumit Saxena {
5165acd884deSSumit Saxena 	struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
5166acd884deSSumit Saxena 	struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
5167acd884deSSumit Saxena 	int umem_pgs = 0, page_shift = PAGE_SHIFT, rc;
5168acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = mr->rdev;
5169acd884deSSumit Saxena 	struct bnxt_qplib_mrinfo mrinfo;
5170acd884deSSumit Saxena 	struct ib_umem *umem;
5171acd884deSSumit Saxena 	u32 npages;
5172acd884deSSumit Saxena 
5173acd884deSSumit Saxena 	/* TODO: Must decipher what to modify based on the flags */
5174acd884deSSumit Saxena 	memset(&mrinfo, 0, sizeof(mrinfo));
5175acd884deSSumit Saxena 	if (flags & IB_MR_REREG_TRANS) {
5176acd884deSSumit Saxena 		umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5177acd884deSSumit Saxena 						udata, start, length,
5178acd884deSSumit Saxena 						mr_access_flags, 0);
5179acd884deSSumit Saxena 		if (IS_ERR(umem)) {
5180acd884deSSumit Saxena 			rc = PTR_ERR(umem);
5181acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
5182acd884deSSumit Saxena 				"%s: ib_umem_get failed! ret =  %d\n",
5183acd884deSSumit Saxena 				__func__, rc);
5184acd884deSSumit Saxena 			goto fail;
5185acd884deSSumit Saxena 		}
5186acd884deSSumit Saxena 		mr->ib_umem = umem;
5187acd884deSSumit Saxena 
5188acd884deSSumit Saxena 		mr->qplib_mr.va = virt_addr;
5189acd884deSSumit Saxena 		umem_pgs = ib_umem_num_pages_compat(umem);
5190acd884deSSumit Saxena 		if (!umem_pgs) {
5191acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5192acd884deSSumit Saxena 			rc = -EINVAL;
5193acd884deSSumit Saxena 			goto fail_free_umem;
5194acd884deSSumit Saxena 		}
5195acd884deSSumit Saxena 		mr->qplib_mr.total_size = length;
5196acd884deSSumit Saxena 		page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
5197acd884deSSumit Saxena 					    rdev->dev_attr->page_size_cap);
5198acd884deSSumit Saxena 		if (!bnxt_re_page_size_ok(page_shift)) {
5199acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev),
5200acd884deSSumit Saxena 				"umem page size unsupported!\n");
5201acd884deSSumit Saxena 			rc = -EFAULT;
5202acd884deSSumit Saxena 			goto fail_free_umem;
5203acd884deSSumit Saxena 		}
5204acd884deSSumit Saxena 		npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
5205acd884deSSumit Saxena 		/* Map umem buf ptrs to the PBL */
5206acd884deSSumit Saxena 		mrinfo.sg.npages = npages;
5207acd884deSSumit Saxena 		mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
5208acd884deSSumit Saxena 		mrinfo.sg.pgshft = page_shift;
5209acd884deSSumit Saxena 		mrinfo.sg.pgsize = BIT(page_shift);
5210acd884deSSumit Saxena 	}
5211acd884deSSumit Saxena 
5212acd884deSSumit Saxena 	mrinfo.mrw = &mr->qplib_mr;
5213acd884deSSumit Saxena 	if (flags & IB_MR_REREG_PD)
5214acd884deSSumit Saxena 		mr->qplib_mr.pd = &pd->qplib_pd;
5215acd884deSSumit Saxena 
5216acd884deSSumit Saxena 	if (flags & IB_MR_REREG_ACCESS)
5217acd884deSSumit Saxena 		mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5218acd884deSSumit Saxena 
5219acd884deSSumit Saxena 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5220acd884deSSumit Saxena 	if (rc) {
5221acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "Rereg user MR failed!\n");
5222acd884deSSumit Saxena 		goto fail_free_umem;
5223acd884deSSumit Saxena 	}
5224acd884deSSumit Saxena 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
5225acd884deSSumit Saxena 
5226acd884deSSumit Saxena 	return 0;
5227acd884deSSumit Saxena 
5228acd884deSSumit Saxena fail_free_umem:
5229acd884deSSumit Saxena 	bnxt_re_peer_mem_release(mr->ib_umem);
5230acd884deSSumit Saxena fail:
5231acd884deSSumit Saxena 	return rc;
5232acd884deSSumit Saxena }
5233acd884deSSumit Saxena 
bnxt_re_check_abi_version(struct bnxt_re_dev * rdev)5234acd884deSSumit Saxena static int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev)
5235acd884deSSumit Saxena {
5236acd884deSSumit Saxena 	struct ib_device *ibdev = &rdev->ibdev;
5237acd884deSSumit Saxena 	u32 uverbs_abi_ver;
5238acd884deSSumit Saxena 
5239acd884deSSumit Saxena 	uverbs_abi_ver = GET_UVERBS_ABI_VERSION(ibdev);
5240acd884deSSumit Saxena 	dev_dbg(rdev_to_dev(rdev), "ABI version requested %d\n",
5241acd884deSSumit Saxena 		uverbs_abi_ver);
5242acd884deSSumit Saxena 	if (uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
5243acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), " is different from the device %d \n",
5244acd884deSSumit Saxena 			BNXT_RE_ABI_VERSION);
5245acd884deSSumit Saxena 		return -EPERM;
5246acd884deSSumit Saxena 	}
5247acd884deSSumit Saxena 	return 0;
5248acd884deSSumit Saxena }
5249acd884deSSumit Saxena 
bnxt_re_alloc_ucontext(struct ib_ucontext * uctx_in,struct ib_udata * udata)5250acd884deSSumit Saxena int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
5251acd884deSSumit Saxena 			   struct ib_udata *udata)
5252acd884deSSumit Saxena {
5253acd884deSSumit Saxena 	struct ib_ucontext *ctx = uctx_in;
5254acd884deSSumit Saxena 	struct ib_device *ibdev = ctx->device;
5255acd884deSSumit Saxena 	struct bnxt_re_ucontext *uctx =
5256acd884deSSumit Saxena 		container_of(ctx, struct bnxt_re_ucontext, ibucontext);
5257acd884deSSumit Saxena 
5258acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
5259acd884deSSumit Saxena 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
5260acd884deSSumit Saxena 	struct bnxt_re_uctx_resp resp = {};
5261acd884deSSumit Saxena 	struct bnxt_re_uctx_req ureq = {};
5262acd884deSSumit Saxena 	struct bnxt_qplib_chip_ctx *cctx;
5263acd884deSSumit Saxena 	u32 chip_met_rev_num;
5264acd884deSSumit Saxena 	bool genp5 = false;
5265acd884deSSumit Saxena 	int rc;
5266acd884deSSumit Saxena 
5267acd884deSSumit Saxena 	cctx = rdev->chip_ctx;
5268acd884deSSumit Saxena 	rc = bnxt_re_check_abi_version(rdev);
5269acd884deSSumit Saxena 	if (rc)
5270acd884deSSumit Saxena 		goto fail;
5271acd884deSSumit Saxena 
5272acd884deSSumit Saxena 	uctx->rdev = rdev;
5273acd884deSSumit Saxena 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
5274acd884deSSumit Saxena 	if (!uctx->shpg) {
5275acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "shared memory allocation failed!\n");
5276acd884deSSumit Saxena 		rc = -ENOMEM;
5277acd884deSSumit Saxena 		goto fail;
5278acd884deSSumit Saxena 	}
5279acd884deSSumit Saxena 	spin_lock_init(&uctx->sh_lock);
5280acd884deSSumit Saxena 	if (BNXT_RE_ABI_VERSION >= 4) {
5281acd884deSSumit Saxena 		chip_met_rev_num = cctx->chip_num;
5282acd884deSSumit Saxena 		chip_met_rev_num |= ((u32)cctx->chip_rev & 0xFF) <<
5283acd884deSSumit Saxena 				     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
5284acd884deSSumit Saxena 		chip_met_rev_num |= ((u32)cctx->chip_metal & 0xFF) <<
5285acd884deSSumit Saxena 				     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
5286acd884deSSumit Saxena 		resp.chip_id0 = chip_met_rev_num;
5287acd884deSSumit Saxena 		resp.chip_id1 = 0; /* future extension of chip info */
5288acd884deSSumit Saxena 	}
5289acd884deSSumit Saxena 
5290acd884deSSumit Saxena 	if (BNXT_RE_ABI_VERSION != 4) {
5291acd884deSSumit Saxena 		/*Temp, Use idr_alloc instead*/
5292acd884deSSumit Saxena 		resp.dev_id = rdev->en_dev->pdev->devfn;
5293acd884deSSumit Saxena 		resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max;
5294acd884deSSumit Saxena 	}
5295acd884deSSumit Saxena 
5296acd884deSSumit Saxena 	genp5 = _is_chip_gen_p5_p7(cctx);
5297acd884deSSumit Saxena 	if (BNXT_RE_ABI_VERSION > 5) {
5298acd884deSSumit Saxena 		resp.modes = genp5 ? cctx->modes.wqe_mode : 0;
5299acd884deSSumit Saxena 		if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
5300acd884deSSumit Saxena 			resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED;
5301acd884deSSumit Saxena 	}
5302acd884deSSumit Saxena 
5303acd884deSSumit Saxena 	resp.pg_size = PAGE_SIZE;
5304acd884deSSumit Saxena 	resp.cqe_sz = sizeof(struct cq_base);
5305acd884deSSumit Saxena 	resp.max_cqd = dev_attr->max_cq_wqes;
5306acd884deSSumit Saxena 	if (genp5 && cctx->modes.db_push) {
5307acd884deSSumit Saxena 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
5308acd884deSSumit Saxena 		if (_is_chip_p7(cctx) &&
5309acd884deSSumit Saxena 		    !(dev_attr->dev_cap_flags &
5310acd884deSSumit Saxena 		      CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE))
5311acd884deSSumit Saxena 			resp.comp_mask &=
5312acd884deSSumit Saxena 				~BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
5313acd884deSSumit Saxena 	}
5314acd884deSSumit Saxena 
5315acd884deSSumit Saxena 	resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED;
5316acd884deSSumit Saxena 
5317acd884deSSumit Saxena 	if (rdev->dbr_pacing)
5318acd884deSSumit Saxena 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED;
5319acd884deSSumit Saxena 
5320acd884deSSumit Saxena 	if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov)
5321acd884deSSumit Saxena 		resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED;
5322acd884deSSumit Saxena 
5323acd884deSSumit Saxena 	if (udata->inlen >= sizeof(ureq)) {
5324acd884deSSumit Saxena 		rc = ib_copy_from_udata(&ureq, udata,
5325acd884deSSumit Saxena 					min(udata->inlen, sizeof(ureq)));
5326acd884deSSumit Saxena 		if (rc)
5327acd884deSSumit Saxena 			goto cfail;
5328acd884deSSumit Saxena 		if (bnxt_re_init_pow2_flag(&ureq, &resp))
5329acd884deSSumit Saxena 			dev_warn(rdev_to_dev(rdev),
5330acd884deSSumit Saxena 				 "Enabled roundup logic. Library bug?\n");
5331acd884deSSumit Saxena 		if (bnxt_re_init_rsvd_wqe_flag(&ureq, &resp, genp5))
5332acd884deSSumit Saxena 			dev_warn(rdev_to_dev(rdev),
5333acd884deSSumit Saxena 				 "Rsvd wqe in use! Try the updated library.\n");
5334acd884deSSumit Saxena 	} else {
5335acd884deSSumit Saxena 		dev_warn(rdev_to_dev(rdev),
5336acd884deSSumit Saxena 			 "Enabled roundup logic. Update the library!\n");
5337acd884deSSumit Saxena 		resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
5338acd884deSSumit Saxena 
5339acd884deSSumit Saxena 		dev_warn(rdev_to_dev(rdev),
5340acd884deSSumit Saxena 			 "Rsvd wqe in use. Update the library!\n");
5341acd884deSSumit Saxena 		resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
5342acd884deSSumit Saxena 	}
5343acd884deSSumit Saxena 
5344acd884deSSumit Saxena 	uctx->cmask = (uint64_t)resp.comp_mask;
5345acd884deSSumit Saxena 	rc = bnxt_re_copy_to_udata(rdev, &resp,
5346acd884deSSumit Saxena 				   min(udata->outlen, sizeof(resp)),
5347acd884deSSumit Saxena 				   udata);
5348acd884deSSumit Saxena 	if (rc)
5349acd884deSSumit Saxena 		goto cfail;
5350acd884deSSumit Saxena 
5351acd884deSSumit Saxena 	INIT_LIST_HEAD(&uctx->cq_list);
5352acd884deSSumit Saxena 	mutex_init(&uctx->cq_lock);
5353acd884deSSumit Saxena 
5354acd884deSSumit Saxena 	return 0;
5355acd884deSSumit Saxena cfail:
5356acd884deSSumit Saxena 	free_page((u64)uctx->shpg);
5357acd884deSSumit Saxena 	uctx->shpg = NULL;
5358acd884deSSumit Saxena fail:
5359acd884deSSumit Saxena 	return rc;
5360acd884deSSumit Saxena }
5361acd884deSSumit Saxena 
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)5362acd884deSSumit Saxena void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
5363acd884deSSumit Saxena {
5364acd884deSSumit Saxena 	struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
5365acd884deSSumit Saxena 						   struct bnxt_re_ucontext,
5366acd884deSSumit Saxena 						   ibucontext);
5367acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = uctx->rdev;
5368acd884deSSumit Saxena 	int rc = 0;
5369acd884deSSumit Saxena 
5370acd884deSSumit Saxena 	if (uctx->shpg)
5371acd884deSSumit Saxena 		free_page((u64)uctx->shpg);
5372acd884deSSumit Saxena 
5373acd884deSSumit Saxena 	if (uctx->dpi.dbr) {
5374acd884deSSumit Saxena 		/* Free DPI only if this is the first PD allocated by the
5375acd884deSSumit Saxena 		 * application and mark the context dpi as NULL
5376acd884deSSumit Saxena 		 */
5377acd884deSSumit Saxena 		if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) {
5378acd884deSSumit Saxena 			rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5379acd884deSSumit Saxena 						    &uctx->wcdpi);
5380acd884deSSumit Saxena 			if (rc)
5381acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
5382acd884deSSumit Saxena 						"dealloc push dp failed\n");
5383acd884deSSumit Saxena 			uctx->wcdpi.dbr = NULL;
5384acd884deSSumit Saxena 		}
5385acd884deSSumit Saxena 
5386acd884deSSumit Saxena 		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5387acd884deSSumit Saxena 					    &uctx->dpi);
5388acd884deSSumit Saxena 		if (rc)
5389acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!\n");
5390acd884deSSumit Saxena 			/* Don't fail, continue*/
5391acd884deSSumit Saxena 		uctx->dpi.dbr = NULL;
5392acd884deSSumit Saxena 	}
5393acd884deSSumit Saxena 	return;
5394acd884deSSumit Saxena }
5395acd884deSSumit Saxena 
is_bnxt_re_cq_page(struct bnxt_re_ucontext * uctx,u64 pg_off)5396acd884deSSumit Saxena static struct bnxt_re_cq *is_bnxt_re_cq_page(struct bnxt_re_ucontext *uctx,
5397acd884deSSumit Saxena 				      u64 pg_off)
5398acd884deSSumit Saxena {
5399acd884deSSumit Saxena 	struct bnxt_re_cq *cq = NULL, *tmp_cq;
5400acd884deSSumit Saxena 
5401acd884deSSumit Saxena 	if (!_is_chip_p7(uctx->rdev->chip_ctx))
5402acd884deSSumit Saxena 		return NULL;
5403acd884deSSumit Saxena 
5404acd884deSSumit Saxena 	mutex_lock(&uctx->cq_lock);
5405acd884deSSumit Saxena 	list_for_each_entry(tmp_cq, &uctx->cq_list, cq_list) {
5406acd884deSSumit Saxena 		if (((u64)tmp_cq->uctx_cq_page >> PAGE_SHIFT) == pg_off) {
5407acd884deSSumit Saxena 			cq = tmp_cq;
5408acd884deSSumit Saxena 			break;
5409acd884deSSumit Saxena 		}
5410acd884deSSumit Saxena 	}
5411acd884deSSumit Saxena 	mutex_unlock(&uctx->cq_lock);
5412acd884deSSumit Saxena 	return cq;
5413acd884deSSumit Saxena }
5414acd884deSSumit Saxena 
5415acd884deSSumit Saxena /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)5416acd884deSSumit Saxena int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
5417acd884deSSumit Saxena {
5418acd884deSSumit Saxena 	struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
5419acd884deSSumit Saxena 						   struct bnxt_re_ucontext,
5420acd884deSSumit Saxena 						   ibucontext);
5421acd884deSSumit Saxena 	struct bnxt_re_dev *rdev = uctx->rdev;
5422acd884deSSumit Saxena 	struct bnxt_re_cq *cq = NULL;
5423acd884deSSumit Saxena 	int rc = 0;
5424acd884deSSumit Saxena 	u64 pfn;
5425acd884deSSumit Saxena 
5426acd884deSSumit Saxena 	switch (vma->vm_pgoff) {
5427acd884deSSumit Saxena 	case BNXT_RE_MAP_SH_PAGE:
5428acd884deSSumit Saxena 		pfn = vtophys(uctx->shpg) >> PAGE_SHIFT;
5429acd884deSSumit Saxena 		return rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5430acd884deSSumit Saxena 		dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n",
5431acd884deSSumit Saxena 				__func__, __LINE__, (u64) uctx->shpg, vtophys(uctx->shpg), pfn);
5432acd884deSSumit Saxena 		if (rc) {
5433acd884deSSumit Saxena 			dev_err(rdev_to_dev(rdev), "Shared page mapping failed!\n");
5434acd884deSSumit Saxena 			rc = -EAGAIN;
5435acd884deSSumit Saxena 		}
5436acd884deSSumit Saxena 		return rc;
5437acd884deSSumit Saxena 	case BNXT_RE_MAP_WC:
5438acd884deSSumit Saxena 		vma->vm_page_prot =
5439acd884deSSumit Saxena 			pgprot_writecombine(vma->vm_page_prot);
5440acd884deSSumit Saxena 		pfn = (uctx->wcdpi.umdbr >> PAGE_SHIFT);
5441acd884deSSumit Saxena 		if (!pfn)
5442acd884deSSumit Saxena 			return -EFAULT;
5443acd884deSSumit Saxena 		break;
5444acd884deSSumit Saxena 	case BNXT_RE_DBR_PAGE:
5445acd884deSSumit Saxena 		/* Driver doesn't expect write access request */
5446acd884deSSumit Saxena 		if (vma->vm_flags & VM_WRITE)
5447acd884deSSumit Saxena 			return -EFAULT;
5448acd884deSSumit Saxena 
5449acd884deSSumit Saxena 		pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT;
5450acd884deSSumit Saxena 		if (!pfn)
5451acd884deSSumit Saxena 			return -EFAULT;
5452acd884deSSumit Saxena 		break;
5453acd884deSSumit Saxena 	case BNXT_RE_MAP_DB_RECOVERY_PAGE:
5454acd884deSSumit Saxena 		pfn = vtophys(uctx->dbr_recov_cq_page) >> PAGE_SHIFT;
5455acd884deSSumit Saxena 		if (!pfn)
5456acd884deSSumit Saxena 			return -EFAULT;
5457acd884deSSumit Saxena 		break;
5458acd884deSSumit Saxena 	default:
5459acd884deSSumit Saxena 		cq = is_bnxt_re_cq_page(uctx, vma->vm_pgoff);
5460acd884deSSumit Saxena 		if (cq) {
5461acd884deSSumit Saxena 			pfn = vtophys((void *)cq->uctx_cq_page) >> PAGE_SHIFT;
5462acd884deSSumit Saxena 			rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5463acd884deSSumit Saxena 			if (rc) {
5464acd884deSSumit Saxena 				dev_err(rdev_to_dev(rdev),
5465acd884deSSumit Saxena 					"CQ page mapping failed!\n");
5466acd884deSSumit Saxena 				rc = -EAGAIN;
5467acd884deSSumit Saxena 			}
5468acd884deSSumit Saxena 			goto out;
5469acd884deSSumit Saxena 		} else {
5470acd884deSSumit Saxena 			vma->vm_page_prot =
5471acd884deSSumit Saxena 				pgprot_noncached(vma->vm_page_prot);
5472acd884deSSumit Saxena 			pfn = vma->vm_pgoff;
5473acd884deSSumit Saxena 		}
5474acd884deSSumit Saxena 		break;
5475acd884deSSumit Saxena 	}
5476acd884deSSumit Saxena 
5477acd884deSSumit Saxena 	rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5478acd884deSSumit Saxena 	if (rc) {
5479acd884deSSumit Saxena 		dev_err(rdev_to_dev(rdev), "DPI mapping failed!\n");
5480acd884deSSumit Saxena 		return -EAGAIN;
5481acd884deSSumit Saxena 	}
5482acd884deSSumit Saxena 	rc = __bnxt_re_set_vma_data(uctx, vma);
5483acd884deSSumit Saxena out:
5484acd884deSSumit Saxena 	return rc;
5485acd884deSSumit Saxena }
5486acd884deSSumit Saxena 
bnxt_re_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * wc,const struct ib_grh * grh,const struct ib_mad_hdr * in_mad,size_t in_mad_size,struct ib_mad_hdr * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)5487acd884deSSumit Saxena int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
5488acd884deSSumit Saxena 			const struct ib_wc *wc, const struct ib_grh *grh,
5489acd884deSSumit Saxena 			const struct ib_mad_hdr *in_mad, size_t in_mad_size,
5490acd884deSSumit Saxena 			struct ib_mad_hdr *out_mad, size_t *out_mad_size,
5491acd884deSSumit Saxena 			u16 *out_mad_pkey_index)
5492acd884deSSumit Saxena {
5493acd884deSSumit Saxena 	return IB_MAD_RESULT_SUCCESS;
5494acd884deSSumit Saxena }
5495acd884deSSumit Saxena 
bnxt_re_disassociate_ucntx(struct ib_ucontext * ib_uctx)5496acd884deSSumit Saxena void bnxt_re_disassociate_ucntx(struct ib_ucontext *ib_uctx)
5497acd884deSSumit Saxena {
5498acd884deSSumit Saxena }
5499