xref: /linux/drivers/infiniband/hw/erdma/erdma_verbs.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
8 /* Copyright (c) 2008-2019, IBM Corporation */
9 
10 /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
11 
12 #include <linux/vmalloc.h>
13 #include <net/addrconf.h>
14 #include <rdma/erdma-abi.h>
15 #include <rdma/ib_umem.h>
16 #include <rdma/uverbs_ioctl.h>
17 
18 #include "erdma.h"
19 #include "erdma_cm.h"
20 #include "erdma_verbs.h"
21 
22 static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg,
23 				      u64 *addr0, u64 *addr1)
24 {
25 	struct erdma_mtt *mtt = mem->mtt;
26 
27 	if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) {
28 		*addr0 = mtt->buf_dma;
29 		*cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
30 				   ERDMA_MR_MTT_1LEVEL);
31 	} else {
32 		*addr0 = mtt->buf[0];
33 		memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
34 		*cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
35 				   ERDMA_MR_MTT_0LEVEL);
36 	}
37 }
38 
39 static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
40 {
41 	struct erdma_dev *dev = to_edev(qp->ibqp.device);
42 	struct erdma_pd *pd = to_epd(qp->ibqp.pd);
43 	struct erdma_cmdq_create_qp_req req;
44 	struct erdma_uqp *user_qp;
45 	u64 resp0, resp1;
46 	int err;
47 
48 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
49 				CMDQ_OPCODE_CREATE_QP);
50 
51 	req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK,
52 			      ilog2(qp->attrs.sq_size)) |
53 		   FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp));
54 	req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK,
55 			      ilog2(qp->attrs.rq_size)) |
56 		   FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
57 
58 	if (qp->ibqp.qp_type == IB_QPT_RC)
59 		req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
60 				      ERDMA_QPT_RC);
61 	else
62 		req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK,
63 				      ERDMA_QPT_UD);
64 
65 	if (rdma_is_kernel_res(&qp->ibqp.res)) {
66 		u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
67 
68 		req.sq_cqn_mtt_cfg =
69 			FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
70 				   pgsz_range) |
71 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
72 		req.rq_cqn_mtt_cfg =
73 			FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
74 				   pgsz_range) |
75 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
76 
77 		req.sq_mtt_cfg =
78 			FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
79 			FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
80 			FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
81 				   ERDMA_MR_MTT_0LEVEL);
82 		req.rq_mtt_cfg = req.sq_mtt_cfg;
83 
84 		req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
85 		req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
86 		req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
87 		req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
88 	} else {
89 		user_qp = &qp->user_qp;
90 		req.sq_cqn_mtt_cfg = FIELD_PREP(
91 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
92 			ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
93 		req.sq_cqn_mtt_cfg |=
94 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
95 
96 		req.rq_cqn_mtt_cfg = FIELD_PREP(
97 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
98 			ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
99 		req.rq_cqn_mtt_cfg |=
100 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
101 
102 		req.sq_mtt_cfg = user_qp->sq_mem.page_offset;
103 		req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
104 					     user_qp->sq_mem.mtt_nents);
105 
106 		req.rq_mtt_cfg = user_qp->rq_mem.page_offset;
107 		req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
108 					     user_qp->rq_mem.mtt_nents);
109 
110 		assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg,
111 					  &req.sq_buf_addr, req.sq_mtt_entry);
112 		assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
113 					  &req.rq_buf_addr, req.rq_mtt_entry);
114 
115 		req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
116 		req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
117 
118 		if (uctx->ext_db.enable) {
119 			req.sq_cqn_mtt_cfg |=
120 				FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1);
121 			req.db_cfg =
122 				FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK,
123 					   uctx->ext_db.sdb_off) |
124 				FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK,
125 					   uctx->ext_db.rdb_off);
126 		}
127 	}
128 
129 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1,
130 				  true);
131 	if (!err && erdma_device_iwarp(dev))
132 		qp->attrs.iwarp.cookie =
133 			FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0);
134 
135 	return err;
136 }
137 
138 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
139 {
140 	struct erdma_pd *pd = to_epd(mr->ibmr.pd);
141 	u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
142 	struct erdma_cmdq_reg_mr_req req;
143 
144 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
145 
146 	if (mr->type == ERDMA_MR_TYPE_FRMR ||
147 	    mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) {
148 		if (mr->mem.mtt->continuous) {
149 			req.phy_addr[0] = mr->mem.mtt->buf_dma;
150 			mtt_level = ERDMA_MR_MTT_1LEVEL;
151 		} else {
152 			req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
153 			mtt_level = mr->mem.mtt->level;
154 		}
155 	} else if (mr->type != ERDMA_MR_TYPE_DMA) {
156 		memcpy(req.phy_addr, mr->mem.mtt->buf,
157 		       MTT_SIZE(mr->mem.page_cnt));
158 	}
159 
160 	req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
161 		   FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
162 		   FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
163 	req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) |
164 		   FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) |
165 		   FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access);
166 	req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
167 			      ilog2(mr->mem.page_size)) |
168 		   FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) |
169 		   FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
170 
171 	if (mr->type == ERDMA_MR_TYPE_DMA)
172 		goto post_cmd;
173 
174 	if (mr->type == ERDMA_MR_TYPE_NORMAL) {
175 		req.start_va = mr->mem.va;
176 		req.size = mr->mem.len;
177 	}
178 
179 	if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
180 		req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1);
181 		req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK,
182 				       PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT);
183 		req.size_h = upper_32_bits(mr->mem.len);
184 		req.mtt_cnt_h = mr->mem.page_cnt >> 20;
185 	}
186 
187 post_cmd:
188 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
189 				   true);
190 }
191 
192 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
193 {
194 	struct erdma_dev *dev = to_edev(cq->ibcq.device);
195 	struct erdma_cmdq_create_cq_req req;
196 	struct erdma_mem *mem;
197 	u32 page_size;
198 
199 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
200 				CMDQ_OPCODE_CREATE_CQ);
201 
202 	req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) |
203 		   FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth));
204 	req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn);
205 
206 	if (rdma_is_kernel_res(&cq->ibcq.res)) {
207 		page_size = SZ_32M;
208 		req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
209 				       ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
210 		req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
211 		req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
212 
213 		req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
214 			    FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
215 				       ERDMA_MR_MTT_0LEVEL);
216 
217 		req.first_page_offset = 0;
218 		req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
219 	} else {
220 		mem = &cq->user_cq.qbuf_mem;
221 		req.cfg0 |=
222 			FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
223 				   ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
224 		if (mem->mtt_nents == 1) {
225 			req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
226 			req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
227 			req.cfg1 |=
228 				FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
229 					   ERDMA_MR_MTT_0LEVEL);
230 		} else {
231 			req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
232 			req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
233 			req.cfg1 |=
234 				FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
235 					   ERDMA_MR_MTT_1LEVEL);
236 		}
237 		req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
238 				       mem->mtt_nents);
239 
240 		req.first_page_offset = mem->page_offset;
241 		req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
242 
243 		if (uctx->ext_db.enable) {
244 			req.cfg1 |= FIELD_PREP(
245 				ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1);
246 			req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK,
247 					      uctx->ext_db.cdb_off);
248 		}
249 	}
250 
251 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
252 				   true);
253 }
254 
255 static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
256 {
257 	int idx;
258 	unsigned long flags;
259 
260 	spin_lock_irqsave(&res_cb->lock, flags);
261 	idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap,
262 				 res_cb->next_alloc_idx);
263 	if (idx == res_cb->max_cap) {
264 		idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap);
265 		if (idx == res_cb->max_cap) {
266 			res_cb->next_alloc_idx = 1;
267 			spin_unlock_irqrestore(&res_cb->lock, flags);
268 			return -ENOSPC;
269 		}
270 	}
271 
272 	set_bit(idx, res_cb->bitmap);
273 	res_cb->next_alloc_idx = idx + 1;
274 	spin_unlock_irqrestore(&res_cb->lock, flags);
275 
276 	return idx;
277 }
278 
279 static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx)
280 {
281 	unsigned long flags;
282 	u32 used;
283 
284 	spin_lock_irqsave(&res_cb->lock, flags);
285 	used = __test_and_clear_bit(idx, res_cb->bitmap);
286 	spin_unlock_irqrestore(&res_cb->lock, flags);
287 	WARN_ON(!used);
288 }
289 
290 static struct rdma_user_mmap_entry *
291 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address,
292 			     u32 size, u8 mmap_flag, u64 *mmap_offset)
293 {
294 	struct erdma_user_mmap_entry *entry =
295 		kzalloc(sizeof(*entry), GFP_KERNEL);
296 	int ret;
297 
298 	if (!entry)
299 		return NULL;
300 
301 	entry->address = (u64)address;
302 	entry->mmap_flag = mmap_flag;
303 
304 	size = PAGE_ALIGN(size);
305 
306 	ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry,
307 					  size);
308 	if (ret) {
309 		kfree(entry);
310 		return NULL;
311 	}
312 
313 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
314 
315 	return &entry->rdma_entry;
316 }
317 
318 int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
319 		       struct ib_udata *unused)
320 {
321 	struct erdma_dev *dev = to_edev(ibdev);
322 
323 	memset(attr, 0, sizeof(*attr));
324 
325 	attr->max_mr_size = dev->attrs.max_mr_size;
326 	attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
327 	attr->vendor_part_id = dev->pdev->device;
328 	attr->hw_ver = dev->pdev->revision;
329 	attr->max_qp = dev->attrs.max_qp - 1;
330 	attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
331 	attr->max_qp_rd_atom = dev->attrs.max_ord;
332 	attr->max_qp_init_rd_atom = dev->attrs.max_ird;
333 	attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird;
334 	attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
335 	attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
336 	ibdev->local_dma_lkey = dev->attrs.local_dma_key;
337 	attr->max_send_sge = dev->attrs.max_send_sge;
338 	attr->max_recv_sge = dev->attrs.max_recv_sge;
339 	attr->max_sge_rd = dev->attrs.max_sge_rd;
340 	attr->max_cq = dev->attrs.max_cq - 1;
341 	attr->max_cqe = dev->attrs.max_cqe;
342 	attr->max_mr = dev->attrs.max_mr;
343 	attr->max_pd = dev->attrs.max_pd;
344 	attr->max_mw = dev->attrs.max_mw;
345 	attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA;
346 	attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT;
347 
348 	if (erdma_device_rocev2(dev)) {
349 		attr->max_pkeys = ERDMA_MAX_PKEYS;
350 		attr->max_ah = dev->attrs.max_ah;
351 	}
352 
353 	if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC)
354 		attr->atomic_cap = IB_ATOMIC_GLOB;
355 
356 	attr->fw_ver = dev->attrs.fw_version;
357 
358 	if (dev->netdev)
359 		addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
360 				    dev->netdev->dev_addr);
361 
362 	return 0;
363 }
364 
365 int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx,
366 		    union ib_gid *gid)
367 {
368 	struct erdma_dev *dev = to_edev(ibdev);
369 
370 	memset(gid, 0, sizeof(*gid));
371 	ether_addr_copy(gid->raw, dev->attrs.peer_addr);
372 
373 	return 0;
374 }
375 
376 int erdma_query_port(struct ib_device *ibdev, u32 port,
377 		     struct ib_port_attr *attr)
378 {
379 	struct erdma_dev *dev = to_edev(ibdev);
380 	struct net_device *ndev = dev->netdev;
381 
382 	memset(attr, 0, sizeof(*attr));
383 
384 	if (erdma_device_iwarp(dev)) {
385 		attr->gid_tbl_len = 1;
386 	} else {
387 		attr->gid_tbl_len = dev->attrs.max_gid;
388 		attr->ip_gids = true;
389 		attr->pkey_tbl_len = ERDMA_MAX_PKEYS;
390 	}
391 
392 	attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
393 	attr->max_msg_sz = -1;
394 
395 	if (!ndev)
396 		goto out;
397 
398 	ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width);
399 	attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu);
400 	attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
401 	attr->state = ib_get_curr_port_state(ndev);
402 
403 out:
404 	if (attr->state == IB_PORT_ACTIVE)
405 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
406 	else
407 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
408 
409 	return 0;
410 }
411 
412 int erdma_get_port_immutable(struct ib_device *ibdev, u32 port,
413 			     struct ib_port_immutable *port_immutable)
414 {
415 	struct erdma_dev *dev = to_edev(ibdev);
416 
417 	if (erdma_device_iwarp(dev)) {
418 		port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
419 		port_immutable->gid_tbl_len = 1;
420 	} else {
421 		port_immutable->core_cap_flags =
422 			RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
423 		port_immutable->max_mad_size = IB_MGMT_MAD_SIZE;
424 		port_immutable->gid_tbl_len = dev->attrs.max_gid;
425 		port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS;
426 	}
427 
428 	return 0;
429 }
430 
431 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
432 {
433 	struct erdma_pd *pd = to_epd(ibpd);
434 	struct erdma_dev *dev = to_edev(ibpd->device);
435 	int pdn;
436 
437 	pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]);
438 	if (pdn < 0)
439 		return pdn;
440 
441 	pd->pdn = pdn;
442 
443 	return 0;
444 }
445 
446 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
447 {
448 	struct erdma_pd *pd = to_epd(ibpd);
449 	struct erdma_dev *dev = to_edev(ibpd->device);
450 
451 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn);
452 
453 	return 0;
454 }
455 
456 static void erdma_flush_worker(struct work_struct *work)
457 {
458 	struct delayed_work *dwork = to_delayed_work(work);
459 	struct erdma_qp *qp =
460 		container_of(dwork, struct erdma_qp, reflush_dwork);
461 	struct erdma_cmdq_reflush_req req;
462 
463 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
464 				CMDQ_OPCODE_REFLUSH);
465 	req.qpn = QP_ID(qp);
466 	req.sq_pi = qp->kern_qp.sq_pi;
467 	req.rq_pi = qp->kern_qp.rq_pi;
468 	erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL,
469 			    true);
470 }
471 
472 static int erdma_qp_validate_cap(struct erdma_dev *dev,
473 				 struct ib_qp_init_attr *attrs)
474 {
475 	if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) ||
476 	    (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) ||
477 	    (attrs->cap.max_send_sge > dev->attrs.max_send_sge) ||
478 	    (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) ||
479 	    (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) ||
480 	    !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) {
481 		return -EINVAL;
482 	}
483 
484 	return 0;
485 }
486 
487 static int erdma_qp_validate_attr(struct erdma_dev *dev,
488 				  struct ib_qp_init_attr *attrs)
489 {
490 	if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC)
491 		return -EOPNOTSUPP;
492 
493 	if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC &&
494 	    attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI)
495 		return -EOPNOTSUPP;
496 
497 	if (attrs->srq)
498 		return -EOPNOTSUPP;
499 
500 	if (!attrs->send_cq || !attrs->recv_cq)
501 		return -EOPNOTSUPP;
502 
503 	return 0;
504 }
505 
506 static void free_kernel_qp(struct erdma_qp *qp)
507 {
508 	struct erdma_dev *dev = qp->dev;
509 
510 	vfree(qp->kern_qp.swr_tbl);
511 	vfree(qp->kern_qp.rwr_tbl);
512 
513 	if (qp->kern_qp.sq_buf)
514 		dma_free_coherent(&dev->pdev->dev,
515 				  qp->attrs.sq_size << SQEBB_SHIFT,
516 				  qp->kern_qp.sq_buf,
517 				  qp->kern_qp.sq_buf_dma_addr);
518 
519 	if (qp->kern_qp.sq_dbrec)
520 		dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
521 			      qp->kern_qp.sq_dbrec_dma);
522 
523 	if (qp->kern_qp.rq_buf)
524 		dma_free_coherent(&dev->pdev->dev,
525 				  qp->attrs.rq_size << RQE_SHIFT,
526 				  qp->kern_qp.rq_buf,
527 				  qp->kern_qp.rq_buf_dma_addr);
528 
529 	if (qp->kern_qp.rq_dbrec)
530 		dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
531 			      qp->kern_qp.rq_dbrec_dma);
532 }
533 
534 static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
535 			  struct ib_qp_init_attr *attrs)
536 {
537 	struct erdma_kqp *kqp = &qp->kern_qp;
538 	int size;
539 
540 	if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
541 		kqp->sig_all = 1;
542 
543 	kqp->sq_pi = 0;
544 	kqp->sq_ci = 0;
545 	kqp->rq_pi = 0;
546 	kqp->rq_ci = 0;
547 	kqp->hw_sq_db =
548 		dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
549 	kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
550 
551 	kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64));
552 	kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64));
553 	if (!kqp->swr_tbl || !kqp->rwr_tbl)
554 		goto err_out;
555 
556 	size = qp->attrs.sq_size << SQEBB_SHIFT;
557 	kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
558 					 &kqp->sq_buf_dma_addr, GFP_KERNEL);
559 	if (!kqp->sq_buf)
560 		goto err_out;
561 
562 	kqp->sq_dbrec =
563 		dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
564 	if (!kqp->sq_dbrec)
565 		goto err_out;
566 
567 	size = qp->attrs.rq_size << RQE_SHIFT;
568 	kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
569 					 &kqp->rq_buf_dma_addr, GFP_KERNEL);
570 	if (!kqp->rq_buf)
571 		goto err_out;
572 
573 	kqp->rq_dbrec =
574 		dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
575 	if (!kqp->rq_dbrec)
576 		goto err_out;
577 
578 	return 0;
579 
580 err_out:
581 	free_kernel_qp(qp);
582 	return -ENOMEM;
583 }
584 
585 static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem)
586 {
587 	struct erdma_mtt *mtt = mem->mtt;
588 	struct ib_block_iter biter;
589 	u32 idx = 0;
590 
591 	while (mtt->low_level)
592 		mtt = mtt->low_level;
593 
594 	rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
595 		mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
596 }
597 
598 static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
599 					       size_t size)
600 {
601 	struct erdma_mtt *mtt;
602 
603 	mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
604 	if (!mtt)
605 		return ERR_PTR(-ENOMEM);
606 
607 	mtt->size = size;
608 	mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
609 	if (!mtt->buf)
610 		goto err_free_mtt;
611 
612 	mtt->continuous = true;
613 	mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
614 				      DMA_TO_DEVICE);
615 	if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
616 		goto err_free_mtt_buf;
617 
618 	return mtt;
619 
620 err_free_mtt_buf:
621 	kfree(mtt->buf);
622 
623 err_free_mtt:
624 	kfree(mtt);
625 
626 	return ERR_PTR(-ENOMEM);
627 }
628 
629 static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
630 				  u32 npages)
631 {
632 	u32 i;
633 
634 	for (i = 0; i < npages; i++)
635 		dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
636 			       DMA_TO_DEVICE);
637 }
638 
639 static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
640 					    struct erdma_mtt *mtt)
641 {
642 	erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
643 	vfree(mtt->dma_addrs);
644 }
645 
646 static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
647 				      struct erdma_mtt *mtt)
648 {
649 	erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
650 	vfree(mtt->buf);
651 	kfree(mtt);
652 }
653 
654 static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
655 				  struct erdma_mtt *low_mtt)
656 {
657 	dma_addr_t *pg_addr = mtt->buf;
658 	u32 i;
659 
660 	for (i = 0; i < low_mtt->npages; i++)
661 		pg_addr[i] = low_mtt->dma_addrs[i];
662 }
663 
664 static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
665 				void *buf, u64 len)
666 {
667 	dma_addr_t *pg_dma;
668 	struct page *pg;
669 	u32 npages, i;
670 	void *addr;
671 
672 	npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
673 		 PAGE_SHIFT;
674 	pg_dma = vcalloc(npages, sizeof(*pg_dma));
675 	if (!pg_dma)
676 		return 0;
677 
678 	addr = buf;
679 	for (i = 0; i < npages; i++) {
680 		pg = vmalloc_to_page(addr);
681 		if (!pg)
682 			goto err;
683 
684 		pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
685 					 DMA_TO_DEVICE);
686 		if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
687 			goto err;
688 
689 		addr += PAGE_SIZE;
690 	}
691 
692 	*dma_addrs = pg_dma;
693 
694 	return npages;
695 err:
696 	erdma_unmap_page_list(dev, pg_dma, i);
697 	vfree(pg_dma);
698 
699 	return 0;
700 }
701 
702 static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
703 					  struct erdma_mtt *mtt)
704 {
705 	dma_addr_t *addrs;
706 	u32 npages;
707 
708 	/* Failed if buf is not page aligned */
709 	if ((uintptr_t)mtt->buf & ~PAGE_MASK)
710 		return -EINVAL;
711 
712 	npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
713 	if (!npages)
714 		return -ENOMEM;
715 
716 	mtt->dma_addrs = addrs;
717 	mtt->npages = npages;
718 
719 	return 0;
720 }
721 
722 static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
723 						  size_t size)
724 {
725 	struct erdma_mtt *mtt;
726 	int ret = -ENOMEM;
727 
728 	mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
729 	if (!mtt)
730 		return ERR_PTR(-ENOMEM);
731 
732 	mtt->size = ALIGN(size, PAGE_SIZE);
733 	mtt->buf = vzalloc(mtt->size);
734 	mtt->continuous = false;
735 	if (!mtt->buf)
736 		goto err_free_mtt;
737 
738 	ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
739 	if (ret)
740 		goto err_free_mtt_buf;
741 
742 	ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
743 		  mtt->size, mtt->npages);
744 
745 	return mtt;
746 
747 err_free_mtt_buf:
748 	vfree(mtt->buf);
749 
750 err_free_mtt:
751 	kfree(mtt);
752 
753 	return ERR_PTR(ret);
754 }
755 
756 static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
757 					  bool force_continuous)
758 {
759 	struct erdma_mtt *mtt, *tmp_mtt;
760 	int ret, level = 0;
761 
762 	ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size,
763 		  force_continuous);
764 
765 	if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA))
766 		force_continuous = true;
767 
768 	if (force_continuous)
769 		return erdma_create_cont_mtt(dev, size);
770 
771 	mtt = erdma_create_scatter_mtt(dev, size);
772 	if (IS_ERR(mtt))
773 		return mtt;
774 	level = 1;
775 
776 	/* convergence the mtt table. */
777 	while (mtt->npages != 1 && level <= 3) {
778 		tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
779 		if (IS_ERR(tmp_mtt)) {
780 			ret = PTR_ERR(tmp_mtt);
781 			goto err_free_mtt;
782 		}
783 		erdma_init_middle_mtt(tmp_mtt, mtt);
784 		tmp_mtt->low_level = mtt;
785 		mtt = tmp_mtt;
786 		level++;
787 	}
788 
789 	if (level > 3) {
790 		ret = -ENOMEM;
791 		goto err_free_mtt;
792 	}
793 
794 	mtt->level = level;
795 	ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
796 		  mtt->level, mtt->dma_addrs[0]);
797 
798 	return mtt;
799 err_free_mtt:
800 	while (mtt) {
801 		tmp_mtt = mtt->low_level;
802 		erdma_destroy_scatter_mtt(dev, mtt);
803 		mtt = tmp_mtt;
804 	}
805 
806 	return ERR_PTR(ret);
807 }
808 
809 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
810 {
811 	struct erdma_mtt *tmp_mtt;
812 
813 	if (mtt->continuous) {
814 		dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
815 				 DMA_TO_DEVICE);
816 		kfree(mtt->buf);
817 		kfree(mtt);
818 	} else {
819 		while (mtt) {
820 			tmp_mtt = mtt->low_level;
821 			erdma_destroy_scatter_mtt(dev, mtt);
822 			mtt = tmp_mtt;
823 		}
824 	}
825 }
826 
827 static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
828 			   u64 start, u64 len, int access, u64 virt,
829 			   unsigned long req_page_size, bool force_continuous)
830 {
831 	int ret = 0;
832 
833 	mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
834 	if (IS_ERR(mem->umem)) {
835 		ret = PTR_ERR(mem->umem);
836 		mem->umem = NULL;
837 		return ret;
838 	}
839 
840 	mem->va = virt;
841 	mem->len = len;
842 	mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt);
843 	mem->page_offset = start & (mem->page_size - 1);
844 	mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
845 	mem->page_cnt = mem->mtt_nents;
846 	mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
847 				    force_continuous);
848 	if (IS_ERR(mem->mtt)) {
849 		ret = PTR_ERR(mem->mtt);
850 		goto error_ret;
851 	}
852 
853 	erdma_fill_bottom_mtt(dev, mem);
854 
855 	return 0;
856 
857 error_ret:
858 	if (mem->umem) {
859 		ib_umem_release(mem->umem);
860 		mem->umem = NULL;
861 	}
862 
863 	return ret;
864 }
865 
866 static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
867 {
868 	if (mem->mtt)
869 		erdma_destroy_mtt(dev, mem->mtt);
870 
871 	if (mem->umem) {
872 		ib_umem_release(mem->umem);
873 		mem->umem = NULL;
874 	}
875 }
876 
877 static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx,
878 				    u64 dbrecords_va,
879 				    struct erdma_user_dbrecords_page **dbr_page,
880 				    dma_addr_t *dma_addr)
881 {
882 	struct erdma_user_dbrecords_page *page = NULL;
883 	int rv = 0;
884 
885 	mutex_lock(&ctx->dbrecords_page_mutex);
886 
887 	list_for_each_entry(page, &ctx->dbrecords_page_list, list)
888 		if (page->va == (dbrecords_va & PAGE_MASK))
889 			goto found;
890 
891 	page = kmalloc(sizeof(*page), GFP_KERNEL);
892 	if (!page) {
893 		rv = -ENOMEM;
894 		goto out;
895 	}
896 
897 	page->va = (dbrecords_va & PAGE_MASK);
898 	page->refcnt = 0;
899 
900 	page->umem = ib_umem_get(ctx->ibucontext.device,
901 				 dbrecords_va & PAGE_MASK, PAGE_SIZE, 0);
902 	if (IS_ERR(page->umem)) {
903 		rv = PTR_ERR(page->umem);
904 		kfree(page);
905 		goto out;
906 	}
907 
908 	list_add(&page->list, &ctx->dbrecords_page_list);
909 
910 found:
911 	*dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
912 		    (dbrecords_va & ~PAGE_MASK);
913 	*dbr_page = page;
914 	page->refcnt++;
915 
916 out:
917 	mutex_unlock(&ctx->dbrecords_page_mutex);
918 	return rv;
919 }
920 
921 static void
922 erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
923 			   struct erdma_user_dbrecords_page **dbr_page)
924 {
925 	if (!ctx || !(*dbr_page))
926 		return;
927 
928 	mutex_lock(&ctx->dbrecords_page_mutex);
929 	if (--(*dbr_page)->refcnt == 0) {
930 		list_del(&(*dbr_page)->list);
931 		ib_umem_release((*dbr_page)->umem);
932 		kfree(*dbr_page);
933 	}
934 
935 	*dbr_page = NULL;
936 	mutex_unlock(&ctx->dbrecords_page_mutex);
937 }
938 
939 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
940 			u64 va, u32 len, u64 dbrec_va)
941 {
942 	dma_addr_t dbrec_dma;
943 	u32 rq_offset;
944 	int ret;
945 
946 	if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
947 		   qp->attrs.rq_size * RQE_SIZE))
948 		return -EINVAL;
949 
950 	ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
951 			      qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
952 			      (SZ_1M - SZ_4K), true);
953 	if (ret)
954 		return ret;
955 
956 	rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
957 	qp->user_qp.rq_offset = rq_offset;
958 
959 	ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
960 			      qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
961 			      (SZ_1M - SZ_4K), true);
962 	if (ret)
963 		goto put_sq_mtt;
964 
965 	ret = erdma_map_user_dbrecords(uctx, dbrec_va,
966 				       &qp->user_qp.user_dbr_page,
967 				       &dbrec_dma);
968 	if (ret)
969 		goto put_rq_mtt;
970 
971 	qp->user_qp.sq_dbrec_dma = dbrec_dma;
972 	qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
973 
974 	return 0;
975 
976 put_rq_mtt:
977 	put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
978 
979 put_sq_mtt:
980 	put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
981 
982 	return ret;
983 }
984 
985 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
986 {
987 	put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
988 	put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
989 	erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
990 }
991 
992 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
993 		    struct ib_udata *udata)
994 {
995 	struct erdma_qp *qp = to_eqp(ibqp);
996 	struct erdma_dev *dev = to_edev(ibqp->device);
997 	struct erdma_ucontext *uctx = rdma_udata_to_drv_context(
998 		udata, struct erdma_ucontext, ibucontext);
999 	struct erdma_ureq_create_qp ureq;
1000 	struct erdma_uresp_create_qp uresp;
1001 	void *old_entry;
1002 	int ret = 0;
1003 
1004 	ret = erdma_qp_validate_cap(dev, attrs);
1005 	if (ret)
1006 		goto err_out;
1007 
1008 	ret = erdma_qp_validate_attr(dev, attrs);
1009 	if (ret)
1010 		goto err_out;
1011 
1012 	qp->scq = to_ecq(attrs->send_cq);
1013 	qp->rcq = to_ecq(attrs->recv_cq);
1014 	qp->dev = dev;
1015 	qp->attrs.cc = dev->attrs.cc;
1016 
1017 	init_rwsem(&qp->state_lock);
1018 	kref_init(&qp->ref);
1019 	init_completion(&qp->safe_free);
1020 
1021 	if (qp->ibqp.qp_type == IB_QPT_GSI) {
1022 		old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
1023 		if (xa_is_err(old_entry))
1024 			ret = xa_err(old_entry);
1025 		else
1026 			qp->ibqp.qp_num = 1;
1027 	} else {
1028 		ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
1029 				      XA_LIMIT(1, dev->attrs.max_qp - 1),
1030 				      &dev->next_alloc_qpn, GFP_KERNEL);
1031 	}
1032 
1033 	if (ret < 0) {
1034 		ret = -ENOMEM;
1035 		goto err_out;
1036 	}
1037 
1038 	qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr *
1039 					       ERDMA_MAX_WQEBB_PER_SQE);
1040 	qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr);
1041 
1042 	if (uctx) {
1043 		ret = ib_copy_from_udata(&ureq, udata,
1044 					 min(sizeof(ureq), udata->inlen));
1045 		if (ret)
1046 			goto err_out_xa;
1047 
1048 		ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len,
1049 				   ureq.db_record_va);
1050 		if (ret)
1051 			goto err_out_xa;
1052 
1053 		memset(&uresp, 0, sizeof(uresp));
1054 
1055 		uresp.num_sqe = qp->attrs.sq_size;
1056 		uresp.num_rqe = qp->attrs.rq_size;
1057 		uresp.qp_id = QP_ID(qp);
1058 		uresp.rq_offset = qp->user_qp.rq_offset;
1059 
1060 		ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1061 		if (ret)
1062 			goto err_out_cmd;
1063 	} else {
1064 		ret = init_kernel_qp(dev, qp, attrs);
1065 		if (ret)
1066 			goto err_out_xa;
1067 	}
1068 
1069 	qp->attrs.max_send_sge = attrs->cap.max_send_sge;
1070 	qp->attrs.max_recv_sge = attrs->cap.max_recv_sge;
1071 
1072 	if (erdma_device_iwarp(qp->dev))
1073 		qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE;
1074 	else
1075 		qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET;
1076 
1077 	INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
1078 
1079 	ret = create_qp_cmd(uctx, qp);
1080 	if (ret)
1081 		goto err_out_cmd;
1082 
1083 	spin_lock_init(&qp->lock);
1084 
1085 	return 0;
1086 
1087 err_out_cmd:
1088 	if (uctx)
1089 		free_user_qp(qp, uctx);
1090 	else
1091 		free_kernel_qp(qp);
1092 err_out_xa:
1093 	xa_erase(&dev->qp_xa, QP_ID(qp));
1094 err_out:
1095 	return ret;
1096 }
1097 
1098 static int erdma_create_stag(struct erdma_dev *dev, u32 *stag)
1099 {
1100 	int stag_idx;
1101 
1102 	stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]);
1103 	if (stag_idx < 0)
1104 		return stag_idx;
1105 
1106 	/* For now, we always let key field be zero. */
1107 	*stag = (stag_idx << 8);
1108 
1109 	return 0;
1110 }
1111 
1112 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc)
1113 {
1114 	struct erdma_dev *dev = to_edev(ibpd->device);
1115 	struct erdma_mr *mr;
1116 	u32 stag;
1117 	int ret;
1118 
1119 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1120 	if (!mr)
1121 		return ERR_PTR(-ENOMEM);
1122 
1123 	ret = erdma_create_stag(dev, &stag);
1124 	if (ret)
1125 		goto out_free;
1126 
1127 	mr->type = ERDMA_MR_TYPE_DMA;
1128 
1129 	mr->ibmr.lkey = stag;
1130 	mr->ibmr.rkey = stag;
1131 	mr->ibmr.pd = ibpd;
1132 	mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc);
1133 	ret = regmr_cmd(dev, mr);
1134 	if (ret)
1135 		goto out_remove_stag;
1136 
1137 	return &mr->ibmr;
1138 
1139 out_remove_stag:
1140 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1141 		       mr->ibmr.lkey >> 8);
1142 
1143 out_free:
1144 	kfree(mr);
1145 
1146 	return ERR_PTR(ret);
1147 }
1148 
1149 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
1150 				u32 max_num_sg)
1151 {
1152 	struct erdma_mr *mr;
1153 	struct erdma_dev *dev = to_edev(ibpd->device);
1154 	int ret;
1155 	u32 stag;
1156 
1157 	if (mr_type != IB_MR_TYPE_MEM_REG)
1158 		return ERR_PTR(-EOPNOTSUPP);
1159 
1160 	if (max_num_sg > ERDMA_MR_MAX_MTT_CNT)
1161 		return ERR_PTR(-EINVAL);
1162 
1163 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1164 	if (!mr)
1165 		return ERR_PTR(-ENOMEM);
1166 
1167 	ret = erdma_create_stag(dev, &stag);
1168 	if (ret)
1169 		goto out_free;
1170 
1171 	mr->type = ERDMA_MR_TYPE_FRMR;
1172 
1173 	mr->ibmr.lkey = stag;
1174 	mr->ibmr.rkey = stag;
1175 	mr->ibmr.pd = ibpd;
1176 	/* update it in FRMR. */
1177 	mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR |
1178 		     ERDMA_MR_ACC_RW;
1179 
1180 	mr->mem.page_size = PAGE_SIZE; /* update it later. */
1181 	mr->mem.page_cnt = max_num_sg;
1182 	mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
1183 	if (IS_ERR(mr->mem.mtt)) {
1184 		ret = PTR_ERR(mr->mem.mtt);
1185 		goto out_remove_stag;
1186 	}
1187 
1188 	ret = regmr_cmd(dev, mr);
1189 	if (ret)
1190 		goto out_destroy_mtt;
1191 
1192 	return &mr->ibmr;
1193 
1194 out_destroy_mtt:
1195 	erdma_destroy_mtt(dev, mr->mem.mtt);
1196 
1197 out_remove_stag:
1198 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1199 		       mr->ibmr.lkey >> 8);
1200 
1201 out_free:
1202 	kfree(mr);
1203 
1204 	return ERR_PTR(ret);
1205 }
1206 
1207 static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
1208 {
1209 	struct erdma_mr *mr = to_emr(ibmr);
1210 
1211 	if (mr->mem.mtt_nents >= mr->mem.page_cnt)
1212 		return -1;
1213 
1214 	mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;
1215 	mr->mem.mtt_nents++;
1216 
1217 	return 0;
1218 }
1219 
1220 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1221 		    unsigned int *sg_offset)
1222 {
1223 	struct erdma_mr *mr = to_emr(ibmr);
1224 	int num;
1225 
1226 	mr->mem.mtt_nents = 0;
1227 
1228 	num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset,
1229 			     erdma_set_page);
1230 
1231 	return num;
1232 }
1233 
1234 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1235 				u64 virt, int access, struct ib_dmah *dmah,
1236 				struct ib_udata *udata)
1237 {
1238 	struct erdma_mr *mr = NULL;
1239 	struct erdma_dev *dev = to_edev(ibpd->device);
1240 	u32 stag;
1241 	int ret;
1242 
1243 	if (dmah)
1244 		return ERR_PTR(-EOPNOTSUPP);
1245 
1246 	if (!len || len > dev->attrs.max_mr_size)
1247 		return ERR_PTR(-EINVAL);
1248 
1249 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1250 	if (!mr)
1251 		return ERR_PTR(-ENOMEM);
1252 
1253 	ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
1254 			      SZ_2G - SZ_4K, false);
1255 	if (ret)
1256 		goto err_out_free;
1257 
1258 	ret = erdma_create_stag(dev, &stag);
1259 	if (ret)
1260 		goto err_out_put_mtt;
1261 
1262 	mr->ibmr.lkey = mr->ibmr.rkey = stag;
1263 	mr->ibmr.pd = ibpd;
1264 	mr->mem.va = virt;
1265 	mr->mem.len = len;
1266 	mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access);
1267 	mr->valid = 1;
1268 	mr->type = ERDMA_MR_TYPE_NORMAL;
1269 
1270 	ret = regmr_cmd(dev, mr);
1271 	if (ret)
1272 		goto err_out_mr;
1273 
1274 	return &mr->ibmr;
1275 
1276 err_out_mr:
1277 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
1278 		       mr->ibmr.lkey >> 8);
1279 
1280 err_out_put_mtt:
1281 	put_mtt_entries(dev, &mr->mem);
1282 
1283 err_out_free:
1284 	kfree(mr);
1285 
1286 	return ERR_PTR(ret);
1287 }
1288 
1289 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1290 {
1291 	struct erdma_mr *mr;
1292 	struct erdma_dev *dev = to_edev(ibmr->device);
1293 	struct erdma_cmdq_dereg_mr_req req;
1294 	int ret;
1295 
1296 	mr = to_emr(ibmr);
1297 
1298 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1299 				CMDQ_OPCODE_DEREG_MR);
1300 
1301 	req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
1302 		  FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
1303 
1304 	ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
1305 				  true);
1306 	if (ret)
1307 		return ret;
1308 
1309 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8);
1310 
1311 	put_mtt_entries(dev, &mr->mem);
1312 
1313 	kfree(mr);
1314 	return 0;
1315 }
1316 
1317 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1318 {
1319 	struct erdma_cq *cq = to_ecq(ibcq);
1320 	struct erdma_dev *dev = to_edev(ibcq->device);
1321 	struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1322 		udata, struct erdma_ucontext, ibucontext);
1323 	int err;
1324 	struct erdma_cmdq_destroy_cq_req req;
1325 
1326 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1327 				CMDQ_OPCODE_DESTROY_CQ);
1328 	req.cqn = cq->cqn;
1329 
1330 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
1331 				  true);
1332 	if (err)
1333 		return err;
1334 
1335 	if (rdma_is_kernel_res(&cq->ibcq.res)) {
1336 		dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
1337 				  cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1338 		dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
1339 			      cq->kern_cq.dbrec_dma);
1340 	} else {
1341 		erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
1342 		put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
1343 	}
1344 
1345 	xa_erase(&dev->cq_xa, cq->cqn);
1346 
1347 	return 0;
1348 }
1349 
1350 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1351 {
1352 	struct erdma_qp *qp = to_eqp(ibqp);
1353 	struct erdma_dev *dev = to_edev(ibqp->device);
1354 	struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1355 		udata, struct erdma_ucontext, ibucontext);
1356 	struct erdma_cmdq_destroy_qp_req req;
1357 	union erdma_mod_qp_params params;
1358 	int err;
1359 
1360 	down_write(&qp->state_lock);
1361 	if (erdma_device_iwarp(dev)) {
1362 		params.iwarp.state = ERDMA_QPS_IWARP_ERROR;
1363 		erdma_modify_qp_state_iwarp(qp, &params.iwarp,
1364 					    ERDMA_QPA_IWARP_STATE);
1365 	} else {
1366 		params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR;
1367 		erdma_modify_qp_state_rocev2(qp, &params.rocev2,
1368 					     ERDMA_QPA_ROCEV2_STATE);
1369 	}
1370 	up_write(&qp->state_lock);
1371 
1372 	cancel_delayed_work_sync(&qp->reflush_dwork);
1373 
1374 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1375 				CMDQ_OPCODE_DESTROY_QP);
1376 	req.qpn = QP_ID(qp);
1377 
1378 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
1379 				  true);
1380 	if (err)
1381 		return err;
1382 
1383 	erdma_qp_put(qp);
1384 	wait_for_completion(&qp->safe_free);
1385 
1386 	if (rdma_is_kernel_res(&qp->ibqp.res)) {
1387 		free_kernel_qp(qp);
1388 	} else {
1389 		put_mtt_entries(dev, &qp->user_qp.sq_mem);
1390 		put_mtt_entries(dev, &qp->user_qp.rq_mem);
1391 		erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
1392 	}
1393 
1394 	if (qp->cep)
1395 		erdma_cep_put(qp->cep);
1396 	xa_erase(&dev->qp_xa, QP_ID(qp));
1397 
1398 	return 0;
1399 }
1400 
1401 void erdma_qp_get_ref(struct ib_qp *ibqp)
1402 {
1403 	erdma_qp_get(to_eqp(ibqp));
1404 }
1405 
1406 void erdma_qp_put_ref(struct ib_qp *ibqp)
1407 {
1408 	erdma_qp_put(to_eqp(ibqp));
1409 }
1410 
1411 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
1412 {
1413 	struct rdma_user_mmap_entry *rdma_entry;
1414 	struct erdma_user_mmap_entry *entry;
1415 	pgprot_t prot;
1416 	int err;
1417 
1418 	rdma_entry = rdma_user_mmap_entry_get(ctx, vma);
1419 	if (!rdma_entry)
1420 		return -EINVAL;
1421 
1422 	entry = to_emmap(rdma_entry);
1423 
1424 	switch (entry->mmap_flag) {
1425 	case ERDMA_MMAP_IO_NC:
1426 		/* map doorbell. */
1427 		prot = pgprot_device(vma->vm_page_prot);
1428 		break;
1429 	default:
1430 		err = -EINVAL;
1431 		goto put_entry;
1432 	}
1433 
1434 	err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
1435 				prot, rdma_entry);
1436 
1437 put_entry:
1438 	rdma_user_mmap_entry_put(rdma_entry);
1439 	return err;
1440 }
1441 
1442 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1443 {
1444 	struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry);
1445 
1446 	kfree(entry);
1447 }
1448 
1449 static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
1450 			      bool ext_db_en)
1451 {
1452 	struct erdma_cmdq_ext_db_req req = {};
1453 	u64 val0, val1;
1454 	int ret;
1455 
1456 	/*
1457 	 * CAP_SYS_RAWIO is required if hardware does not support extend
1458 	 * doorbell mechanism.
1459 	 */
1460 	if (!ext_db_en && !capable(CAP_SYS_RAWIO))
1461 		return -EPERM;
1462 
1463 	if (!ext_db_en) {
1464 		ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET;
1465 		ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
1466 		ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
1467 		return 0;
1468 	}
1469 
1470 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
1471 				CMDQ_OPCODE_ALLOC_DB);
1472 
1473 	req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
1474 		  FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
1475 		  FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
1476 
1477 	ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1,
1478 				  true);
1479 	if (ret)
1480 		return ret;
1481 
1482 	ctx->ext_db.enable = true;
1483 	ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB);
1484 	ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
1485 	ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
1486 
1487 	ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
1488 	ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
1489 	ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
1490 
1491 	return 0;
1492 }
1493 
1494 static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
1495 {
1496 	struct erdma_cmdq_ext_db_req req = {};
1497 	int ret;
1498 
1499 	if (!ctx->ext_db.enable)
1500 		return;
1501 
1502 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
1503 				CMDQ_OPCODE_FREE_DB);
1504 
1505 	req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
1506 		  FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
1507 		  FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
1508 
1509 	req.sdb_off = ctx->ext_db.sdb_off;
1510 	req.rdb_off = ctx->ext_db.rdb_off;
1511 	req.cdb_off = ctx->ext_db.cdb_off;
1512 
1513 	ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
1514 				  true);
1515 	if (ret)
1516 		ibdev_err_ratelimited(&dev->ibdev,
1517 				      "free db resources failed %d", ret);
1518 }
1519 
1520 static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
1521 {
1522 	rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
1523 	rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry);
1524 	rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry);
1525 }
1526 
1527 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
1528 {
1529 	struct erdma_ucontext *ctx = to_ectx(ibctx);
1530 	struct erdma_dev *dev = to_edev(ibctx->device);
1531 	int ret;
1532 	struct erdma_uresp_alloc_ctx uresp = {};
1533 
1534 	if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
1535 		ret = -ENOMEM;
1536 		goto err_out;
1537 	}
1538 
1539 	if (udata->outlen < sizeof(uresp)) {
1540 		ret = -EINVAL;
1541 		goto err_out;
1542 	}
1543 
1544 	INIT_LIST_HEAD(&ctx->dbrecords_page_list);
1545 	mutex_init(&ctx->dbrecords_page_mutex);
1546 
1547 	ret = alloc_db_resources(dev, ctx,
1548 				 !!(dev->attrs.cap_flags &
1549 				    ERDMA_DEV_CAP_FLAGS_EXTEND_DB));
1550 	if (ret)
1551 		goto err_out;
1552 
1553 	ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
1554 		ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
1555 	if (!ctx->sq_db_mmap_entry) {
1556 		ret = -ENOMEM;
1557 		goto err_free_ext_db;
1558 	}
1559 
1560 	ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
1561 		ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
1562 	if (!ctx->rq_db_mmap_entry) {
1563 		ret = -EINVAL;
1564 		goto err_put_mmap_entries;
1565 	}
1566 
1567 	ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
1568 		ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
1569 	if (!ctx->cq_db_mmap_entry) {
1570 		ret = -EINVAL;
1571 		goto err_put_mmap_entries;
1572 	}
1573 
1574 	uresp.dev_id = dev->pdev->device;
1575 
1576 	ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1577 	if (ret)
1578 		goto err_put_mmap_entries;
1579 
1580 	return 0;
1581 
1582 err_put_mmap_entries:
1583 	erdma_uctx_user_mmap_entries_remove(ctx);
1584 
1585 err_free_ext_db:
1586 	free_db_resources(dev, ctx);
1587 
1588 err_out:
1589 	atomic_dec(&dev->num_ctx);
1590 	return ret;
1591 }
1592 
1593 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
1594 {
1595 	struct erdma_dev *dev = to_edev(ibctx->device);
1596 	struct erdma_ucontext *ctx = to_ectx(ibctx);
1597 
1598 	erdma_uctx_user_mmap_entries_remove(ctx);
1599 	free_db_resources(dev, ctx);
1600 	atomic_dec(&dev->num_ctx);
1601 }
1602 
1603 static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr,
1604 			     struct erdma_av *av, u16 sport)
1605 {
1606 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
1607 
1608 	av->port = rdma_ah_get_port_num(ah_attr);
1609 	av->sgid_index = grh->sgid_index;
1610 	av->hop_limit = grh->hop_limit;
1611 	av->traffic_class = grh->traffic_class;
1612 	av->sl = rdma_ah_get_sl(ah_attr);
1613 
1614 	av->flow_label = grh->flow_label;
1615 	av->udp_sport = sport;
1616 
1617 	ether_addr_copy(av->dmac, ah_attr->roce.dmac);
1618 	memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE);
1619 
1620 	if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid))
1621 		av->ntype = ERDMA_NETWORK_TYPE_IPV4;
1622 	else
1623 		av->ntype = ERDMA_NETWORK_TYPE_IPV6;
1624 }
1625 
1626 static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr)
1627 {
1628 	ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
1629 
1630 	rdma_ah_set_sl(ah_attr, av->sl);
1631 	rdma_ah_set_port_num(ah_attr, av->port);
1632 	rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH);
1633 
1634 	rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index,
1635 			av->hop_limit, av->traffic_class);
1636 	rdma_ah_set_dgid_raw(ah_attr, av->dgid);
1637 }
1638 
1639 static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = {
1640 	[ERDMA_PROTO_IWARP] = {
1641 		[IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE,
1642 		[IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE,
1643 		[IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR,
1644 		[IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS,
1645 		[IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING,
1646 		[IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE,
1647 		[IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR,
1648 	},
1649 	[ERDMA_PROTO_ROCEV2] = {
1650 		[IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET,
1651 		[IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT,
1652 		[IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR,
1653 		[IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS,
1654 		[IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD,
1655 		[IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE,
1656 		[IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR,
1657 	},
1658 };
1659 
1660 static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = {
1661 	[ERDMA_PROTO_IWARP] = {
1662 		[ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT,
1663 		[ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR,
1664 		[ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS,
1665 		[ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR,
1666 		[ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR,
1667 		[ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR,
1668 	},
1669 	[ERDMA_PROTO_ROCEV2] = {
1670 		[ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET,
1671 		[ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT,
1672 		[ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR,
1673 		[ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS,
1674 		[ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD,
1675 		[ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE,
1676 		[ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR,
1677 	},
1678 };
1679 
1680 static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state)
1681 {
1682 	return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state];
1683 }
1684 
1685 static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state)
1686 {
1687 	return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state];
1688 }
1689 
1690 static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state)
1691 {
1692 	return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state];
1693 }
1694 
1695 static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state)
1696 {
1697 	return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state];
1698 }
1699 
1700 static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr,
1701 				int attr_mask)
1702 {
1703 	enum ib_qp_state cur_state, nxt_state;
1704 	struct erdma_dev *dev = qp->dev;
1705 	int ret = -EINVAL;
1706 
1707 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) {
1708 		ret = -EOPNOTSUPP;
1709 		goto out;
1710 	}
1711 
1712 	if ((attr_mask & IB_QP_PORT) &&
1713 	    !rdma_is_port_valid(&dev->ibdev, attr->port_num))
1714 		goto out;
1715 
1716 	if (erdma_device_rocev2(dev)) {
1717 		cur_state = (attr_mask & IB_QP_CUR_STATE) ?
1718 				    attr->cur_qp_state :
1719 				    rocev2_to_ib_qps(qp->attrs.rocev2.state);
1720 
1721 		nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state :
1722 							cur_state;
1723 
1724 		if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type,
1725 					attr_mask))
1726 			goto out;
1727 
1728 		if ((attr_mask & IB_QP_AV) &&
1729 		    erdma_check_gid_attr(
1730 			    rdma_ah_read_grh(&attr->ah_attr)->sgid_attr))
1731 			goto out;
1732 
1733 		if ((attr_mask & IB_QP_PKEY_INDEX) &&
1734 		    attr->pkey_index >= ERDMA_MAX_PKEYS)
1735 			goto out;
1736 	}
1737 
1738 	return 0;
1739 
1740 out:
1741 	return ret;
1742 }
1743 
1744 static void erdma_init_mod_qp_params_rocev2(
1745 	struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params,
1746 	int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask)
1747 {
1748 	enum erdma_qpa_mask_rocev2 to_modify_attrs = 0;
1749 	enum erdma_qps_rocev2 cur_state, nxt_state;
1750 	u16 udp_sport;
1751 
1752 	if (ib_attr_mask & IB_QP_CUR_STATE)
1753 		cur_state = ib_to_rocev2_qps(attr->cur_qp_state);
1754 	else
1755 		cur_state = qp->attrs.rocev2.state;
1756 
1757 	if (ib_attr_mask & IB_QP_STATE)
1758 		nxt_state = ib_to_rocev2_qps(attr->qp_state);
1759 	else
1760 		nxt_state = cur_state;
1761 
1762 	to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE;
1763 	params->state = nxt_state;
1764 
1765 	if (ib_attr_mask & IB_QP_QKEY) {
1766 		to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY;
1767 		params->qkey = attr->qkey;
1768 	}
1769 
1770 	if (ib_attr_mask & IB_QP_SQ_PSN) {
1771 		to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN;
1772 		params->sq_psn = attr->sq_psn;
1773 	}
1774 
1775 	if (ib_attr_mask & IB_QP_RQ_PSN) {
1776 		to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN;
1777 		params->rq_psn = attr->rq_psn;
1778 	}
1779 
1780 	if (ib_attr_mask & IB_QP_DEST_QPN) {
1781 		to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN;
1782 		params->dst_qpn = attr->dest_qp_num;
1783 	}
1784 
1785 	if (ib_attr_mask & IB_QP_AV) {
1786 		to_modify_attrs |= ERDMA_QPA_ROCEV2_AV;
1787 		udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
1788 					       QP_ID(qp), params->dst_qpn);
1789 		erdma_attr_to_av(&attr->ah_attr, &params->av, udp_sport);
1790 	}
1791 
1792 	*erdma_attr_mask = to_modify_attrs;
1793 }
1794 
1795 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1796 		    struct ib_udata *udata)
1797 {
1798 	struct erdma_qp *qp = to_eqp(ibqp);
1799 	union erdma_mod_qp_params params;
1800 	int ret = 0, erdma_attr_mask = 0;
1801 
1802 	down_write(&qp->state_lock);
1803 
1804 	ret = erdma_check_qp_attrs(qp, attr, attr_mask);
1805 	if (ret)
1806 		goto out;
1807 
1808 	if (erdma_device_iwarp(qp->dev)) {
1809 		if (attr_mask & IB_QP_STATE) {
1810 			erdma_attr_mask |= ERDMA_QPA_IWARP_STATE;
1811 			params.iwarp.state = ib_to_iwarp_qps(attr->qp_state);
1812 		}
1813 
1814 		ret = erdma_modify_qp_state_iwarp(qp, &params.iwarp,
1815 						  erdma_attr_mask);
1816 	} else {
1817 		erdma_init_mod_qp_params_rocev2(
1818 			qp, &params.rocev2, &erdma_attr_mask, attr, attr_mask);
1819 
1820 		ret = erdma_modify_qp_state_rocev2(qp, &params.rocev2,
1821 						   erdma_attr_mask);
1822 	}
1823 
1824 out:
1825 	up_write(&qp->state_lock);
1826 	return ret;
1827 }
1828 
1829 static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
1830 {
1831 	if (erdma_device_iwarp(qp->dev))
1832 		return iwarp_to_ib_qps(qp->attrs.iwarp.state);
1833 	else
1834 		return rocev2_to_ib_qps(qp->attrs.rocev2.state);
1835 }
1836 
1837 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
1838 		   int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1839 {
1840 	struct erdma_cmdq_query_qp_req_rocev2 req;
1841 	struct erdma_dev *dev;
1842 	struct erdma_qp *qp;
1843 	u64 resp0, resp1;
1844 	int ret;
1845 
1846 	if (ibqp && qp_attr && qp_init_attr) {
1847 		qp = to_eqp(ibqp);
1848 		dev = to_edev(ibqp->device);
1849 	} else {
1850 		return -EINVAL;
1851 	}
1852 
1853 	qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1854 	qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE;
1855 
1856 	qp_attr->cap.max_send_wr = qp->attrs.sq_size;
1857 	qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
1858 	qp_attr->cap.max_send_sge = qp->attrs.max_send_sge;
1859 	qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge;
1860 
1861 	qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu);
1862 	qp_attr->max_rd_atomic = qp->attrs.irq_size;
1863 	qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
1864 
1865 	qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
1866 				   IB_ACCESS_REMOTE_WRITE |
1867 				   IB_ACCESS_REMOTE_READ;
1868 
1869 	qp_init_attr->cap = qp_attr->cap;
1870 
1871 	if (erdma_device_rocev2(dev)) {
1872 		/* Query hardware to get some attributes */
1873 		erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
1874 					CMDQ_OPCODE_QUERY_QP);
1875 		req.qpn = QP_ID(qp);
1876 
1877 		ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
1878 					  &resp1, true);
1879 		if (ret)
1880 			return ret;
1881 
1882 		qp_attr->sq_psn =
1883 			FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0);
1884 		qp_attr->rq_psn =
1885 			FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0);
1886 		qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET(
1887 			ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0));
1888 		qp_attr->cur_qp_state = qp_attr->qp_state;
1889 		qp_attr->sq_draining = FIELD_GET(
1890 			ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0);
1891 
1892 		qp_attr->pkey_index = 0;
1893 		qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn;
1894 
1895 		if (qp->ibqp.qp_type == IB_QPT_RC)
1896 			erdma_av_to_attr(&qp->attrs.rocev2.av,
1897 					 &qp_attr->ah_attr);
1898 	} else {
1899 		qp_attr->qp_state = query_qp_state(qp);
1900 		qp_attr->cur_qp_state = qp_attr->qp_state;
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
1907 			      struct erdma_ureq_create_cq *ureq)
1908 {
1909 	int ret;
1910 	struct erdma_dev *dev = to_edev(cq->ibcq.device);
1911 
1912 	ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
1913 			      ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
1914 			      true);
1915 	if (ret)
1916 		return ret;
1917 
1918 	ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
1919 				       &cq->user_cq.user_dbr_page,
1920 				       &cq->user_cq.dbrec_dma);
1921 	if (ret)
1922 		put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
1923 
1924 	return ret;
1925 }
1926 
1927 static int erdma_init_kernel_cq(struct erdma_cq *cq)
1928 {
1929 	struct erdma_dev *dev = to_edev(cq->ibcq.device);
1930 
1931 	cq->kern_cq.qbuf =
1932 		dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
1933 				   &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
1934 	if (!cq->kern_cq.qbuf)
1935 		return -ENOMEM;
1936 
1937 	cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
1938 					    &cq->kern_cq.dbrec_dma);
1939 	if (!cq->kern_cq.dbrec)
1940 		goto err_out;
1941 
1942 	spin_lock_init(&cq->kern_cq.lock);
1943 	/* use default cqdb addr */
1944 	cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
1945 
1946 	return 0;
1947 
1948 err_out:
1949 	dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
1950 			  cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
1951 
1952 	return -ENOMEM;
1953 }
1954 
1955 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1956 		    struct uverbs_attr_bundle *attrs)
1957 {
1958 	struct ib_udata *udata = &attrs->driver_udata;
1959 	struct erdma_cq *cq = to_ecq(ibcq);
1960 	struct erdma_dev *dev = to_edev(ibcq->device);
1961 	unsigned int depth = attr->cqe;
1962 	int ret;
1963 	struct erdma_ucontext *ctx = rdma_udata_to_drv_context(
1964 		udata, struct erdma_ucontext, ibucontext);
1965 
1966 	if (depth > dev->attrs.max_cqe)
1967 		return -EINVAL;
1968 
1969 	depth = roundup_pow_of_two(depth);
1970 	cq->ibcq.cqe = depth;
1971 	cq->depth = depth;
1972 	cq->assoc_eqn = attr->comp_vector + 1;
1973 
1974 	ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq,
1975 			      XA_LIMIT(1, dev->attrs.max_cq - 1),
1976 			      &dev->next_alloc_cqn, GFP_KERNEL);
1977 	if (ret < 0)
1978 		return ret;
1979 
1980 	if (!rdma_is_kernel_res(&ibcq->res)) {
1981 		struct erdma_ureq_create_cq ureq;
1982 		struct erdma_uresp_create_cq uresp;
1983 
1984 		ret = ib_copy_from_udata(&ureq, udata,
1985 					 min(udata->inlen, sizeof(ureq)));
1986 		if (ret)
1987 			goto err_out_xa;
1988 
1989 		ret = erdma_init_user_cq(ctx, cq, &ureq);
1990 		if (ret)
1991 			goto err_out_xa;
1992 
1993 		uresp.cq_id = cq->cqn;
1994 		uresp.num_cqe = depth;
1995 
1996 		ret = ib_copy_to_udata(udata, &uresp,
1997 				       min(sizeof(uresp), udata->outlen));
1998 		if (ret)
1999 			goto err_free_res;
2000 	} else {
2001 		ret = erdma_init_kernel_cq(cq);
2002 		if (ret)
2003 			goto err_out_xa;
2004 	}
2005 
2006 	ret = create_cq_cmd(ctx, cq);
2007 	if (ret)
2008 		goto err_free_res;
2009 
2010 	return 0;
2011 
2012 err_free_res:
2013 	if (!rdma_is_kernel_res(&ibcq->res)) {
2014 		erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
2015 		put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
2016 	} else {
2017 		dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
2018 				  cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
2019 		dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
2020 			      cq->kern_cq.dbrec_dma);
2021 	}
2022 
2023 err_out_xa:
2024 	xa_erase(&dev->cq_xa, cq->cqn);
2025 
2026 	return ret;
2027 }
2028 
2029 void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext)
2030 {
2031 }
2032 
2033 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
2034 {
2035 	struct erdma_cmdq_config_mtu_req req;
2036 
2037 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
2038 				CMDQ_OPCODE_CONF_MTU);
2039 	req.mtu = mtu;
2040 
2041 	erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true);
2042 }
2043 
2044 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
2045 {
2046 	struct ib_event event;
2047 
2048 	event.device = &dev->ibdev;
2049 	event.element.port_num = 1;
2050 	event.event = reason;
2051 
2052 	ib_dispatch_event(&event);
2053 }
2054 
2055 enum counters {
2056 	ERDMA_STATS_TX_REQS_CNT,
2057 	ERDMA_STATS_TX_PACKETS_CNT,
2058 	ERDMA_STATS_TX_BYTES_CNT,
2059 	ERDMA_STATS_TX_DISABLE_DROP_CNT,
2060 	ERDMA_STATS_TX_BPS_METER_DROP_CNT,
2061 	ERDMA_STATS_TX_PPS_METER_DROP_CNT,
2062 
2063 	ERDMA_STATS_RX_PACKETS_CNT,
2064 	ERDMA_STATS_RX_BYTES_CNT,
2065 	ERDMA_STATS_RX_DISABLE_DROP_CNT,
2066 	ERDMA_STATS_RX_BPS_METER_DROP_CNT,
2067 	ERDMA_STATS_RX_PPS_METER_DROP_CNT,
2068 
2069 	ERDMA_STATS_MAX
2070 };
2071 
2072 static const struct rdma_stat_desc erdma_descs[] = {
2073 	[ERDMA_STATS_TX_REQS_CNT].name = "tx_reqs_cnt",
2074 	[ERDMA_STATS_TX_PACKETS_CNT].name = "tx_packets_cnt",
2075 	[ERDMA_STATS_TX_BYTES_CNT].name = "tx_bytes_cnt",
2076 	[ERDMA_STATS_TX_DISABLE_DROP_CNT].name = "tx_disable_drop_cnt",
2077 	[ERDMA_STATS_TX_BPS_METER_DROP_CNT].name = "tx_bps_limit_drop_cnt",
2078 	[ERDMA_STATS_TX_PPS_METER_DROP_CNT].name = "tx_pps_limit_drop_cnt",
2079 	[ERDMA_STATS_RX_PACKETS_CNT].name = "rx_packets_cnt",
2080 	[ERDMA_STATS_RX_BYTES_CNT].name = "rx_bytes_cnt",
2081 	[ERDMA_STATS_RX_DISABLE_DROP_CNT].name = "rx_disable_drop_cnt",
2082 	[ERDMA_STATS_RX_BPS_METER_DROP_CNT].name = "rx_bps_limit_drop_cnt",
2083 	[ERDMA_STATS_RX_PPS_METER_DROP_CNT].name = "rx_pps_limit_drop_cnt",
2084 };
2085 
2086 struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
2087 						u32 port_num)
2088 {
2089 	return rdma_alloc_hw_stats_struct(erdma_descs, ERDMA_STATS_MAX,
2090 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
2091 }
2092 
2093 static int erdma_query_hw_stats(struct erdma_dev *dev,
2094 				struct rdma_hw_stats *stats)
2095 {
2096 	struct erdma_cmdq_query_stats_resp *resp;
2097 	struct erdma_cmdq_query_req req;
2098 	dma_addr_t dma_addr;
2099 	int err;
2100 
2101 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
2102 				CMDQ_OPCODE_GET_STATS);
2103 
2104 	resp = dma_pool_zalloc(dev->resp_pool, GFP_KERNEL, &dma_addr);
2105 	if (!resp)
2106 		return -ENOMEM;
2107 
2108 	req.target_addr = dma_addr;
2109 	req.target_length = ERDMA_HW_RESP_SIZE;
2110 
2111 	err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
2112 				  true);
2113 	if (err)
2114 		goto out;
2115 
2116 	if (resp->hdr.magic != ERDMA_HW_RESP_MAGIC) {
2117 		err = -EINVAL;
2118 		goto out;
2119 	}
2120 
2121 	memcpy(&stats->value[0], &resp->tx_req_cnt,
2122 	       sizeof(u64) * stats->num_counters);
2123 
2124 out:
2125 	dma_pool_free(dev->resp_pool, resp, dma_addr);
2126 
2127 	return err;
2128 }
2129 
2130 int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
2131 		       u32 port, int index)
2132 {
2133 	struct erdma_dev *dev = to_edev(ibdev);
2134 	int ret;
2135 
2136 	if (port == 0)
2137 		return 0;
2138 
2139 	ret = erdma_query_hw_stats(dev, stats);
2140 	if (ret)
2141 		return ret;
2142 
2143 	return stats->num_counters;
2144 }
2145 
2146 enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num)
2147 {
2148 	return IB_LINK_LAYER_ETHERNET;
2149 }
2150 
2151 static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx,
2152 			 const union ib_gid *gid)
2153 {
2154 	struct erdma_cmdq_set_gid_req req;
2155 	u8 ntype;
2156 
2157 	req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) |
2158 		  FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op);
2159 
2160 	if (op == ERDMA_SET_GID_OP_ADD) {
2161 		if (ipv6_addr_v4mapped((struct in6_addr *)gid))
2162 			ntype = ERDMA_NETWORK_TYPE_IPV4;
2163 		else
2164 			ntype = ERDMA_NETWORK_TYPE_IPV6;
2165 
2166 		req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype);
2167 
2168 		memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE);
2169 	}
2170 
2171 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
2172 				CMDQ_OPCODE_SET_GID);
2173 	return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
2174 				   true);
2175 }
2176 
2177 int erdma_add_gid(const struct ib_gid_attr *attr, void **context)
2178 {
2179 	struct erdma_dev *dev = to_edev(attr->device);
2180 	int ret;
2181 
2182 	ret = erdma_check_gid_attr(attr);
2183 	if (ret)
2184 		return ret;
2185 
2186 	return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index,
2187 			     &attr->gid);
2188 }
2189 
2190 int erdma_del_gid(const struct ib_gid_attr *attr, void **context)
2191 {
2192 	return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL,
2193 			     attr->index, NULL);
2194 }
2195 
2196 int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
2197 {
2198 	if (index >= ERDMA_MAX_PKEYS)
2199 		return -EINVAL;
2200 
2201 	*pkey = ERDMA_DEFAULT_PKEY;
2202 	return 0;
2203 }
2204 
2205 void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av)
2206 {
2207 	av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) |
2208 		       FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype);
2209 
2210 	av_cfg->traffic_class = av->traffic_class;
2211 	av_cfg->hop_limit = av->hop_limit;
2212 	av_cfg->sl = av->sl;
2213 
2214 	av_cfg->udp_sport = av->udp_sport;
2215 	av_cfg->sgid_index = av->sgid_index;
2216 
2217 	ether_addr_copy(av_cfg->dmac, av->dmac);
2218 	memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE);
2219 }
2220 
2221 int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2222 		    struct ib_udata *udata)
2223 {
2224 	const struct ib_global_route *grh =
2225 		rdma_ah_read_grh(init_attr->ah_attr);
2226 	struct erdma_dev *dev = to_edev(ibah->device);
2227 	struct erdma_pd *pd = to_epd(ibah->pd);
2228 	struct erdma_ah *ah = to_eah(ibah);
2229 	struct erdma_cmdq_create_ah_req req;
2230 	u32 udp_sport;
2231 	int ret;
2232 
2233 	ret = erdma_check_gid_attr(grh->sgid_attr);
2234 	if (ret)
2235 		return ret;
2236 
2237 	ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]);
2238 	if (ret < 0)
2239 		return ret;
2240 
2241 	ah->ahn = ret;
2242 
2243 	if (grh->flow_label)
2244 		udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label);
2245 	else
2246 		udp_sport =
2247 			IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF);
2248 
2249 	erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport);
2250 
2251 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
2252 				CMDQ_OPCODE_CREATE_AH);
2253 
2254 	req.pdn = pd->pdn;
2255 	req.ahn = ah->ahn;
2256 	erdma_set_av_cfg(&req.av_cfg, &ah->av);
2257 
2258 	ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
2259 				  init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
2260 	if (ret) {
2261 		erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
2262 		return ret;
2263 	}
2264 
2265 	return 0;
2266 }
2267 
2268 int erdma_destroy_ah(struct ib_ah *ibah, u32 flags)
2269 {
2270 	struct erdma_dev *dev = to_edev(ibah->device);
2271 	struct erdma_pd *pd = to_epd(ibah->pd);
2272 	struct erdma_ah *ah = to_eah(ibah);
2273 	struct erdma_cmdq_destroy_ah_req req;
2274 	int ret;
2275 
2276 	erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
2277 				CMDQ_OPCODE_DESTROY_AH);
2278 
2279 	req.pdn = pd->pdn;
2280 	req.ahn = ah->ahn;
2281 
2282 	ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
2283 				  flags & RDMA_DESTROY_AH_SLEEPABLE);
2284 	if (ret)
2285 		return ret;
2286 
2287 	erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn);
2288 
2289 	return 0;
2290 }
2291 
2292 int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
2293 {
2294 	struct erdma_ah *ah = to_eah(ibah);
2295 
2296 	memset(ah_attr, 0, sizeof(*ah_attr));
2297 	erdma_av_to_attr(&ah->av, ah_attr);
2298 
2299 	return 0;
2300 }
2301