xref: /linux/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42 
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50 
51 #include "ocrdma.h"
52 #include "ocrdma_hw.h"
53 #include "ocrdma_verbs.h"
54 #include "ocrdma_abi.h"
55 
56 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 {
58 	if (index > 1)
59 		return -EINVAL;
60 
61 	*pkey = 0xffff;
62 	return 0;
63 }
64 
65 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
66 		     int index, union ib_gid *sgid)
67 {
68 	int ret;
69 	struct ocrdma_dev *dev;
70 
71 	dev = get_ocrdma_dev(ibdev);
72 	memset(sgid, 0, sizeof(*sgid));
73 	if (index >= OCRDMA_MAX_SGID)
74 		return -EINVAL;
75 
76 	ret = ib_get_cached_gid(ibdev, port, index, sgid);
77 	if (ret == -EAGAIN) {
78 		memcpy(sgid, &zgid, sizeof(*sgid));
79 		return 0;
80 	}
81 
82 	return ret;
83 }
84 
85 int ocrdma_add_gid(struct ib_device *device,
86 		   u8 port_num,
87 		   unsigned int index,
88 		   const union ib_gid *gid,
89 		   const struct ib_gid_attr *attr,
90 		   void **context) {
91 	return  0;
92 }
93 
94 int  ocrdma_del_gid(struct ib_device *device,
95 		    u8 port_num,
96 		    unsigned int index,
97 		    void **context) {
98 	return 0;
99 }
100 
101 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
102 			struct ib_udata *uhw)
103 {
104 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
105 
106 	if (uhw->inlen || uhw->outlen)
107 		return -EINVAL;
108 
109 	memset(attr, 0, sizeof *attr);
110 	memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
111 	       min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
112 	ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
113 	attr->max_mr_size = dev->attr.max_mr_size;
114 	attr->page_size_cap = 0xffff000;
115 	attr->vendor_id = dev->nic_info.pdev->vendor;
116 	attr->vendor_part_id = dev->nic_info.pdev->device;
117 	attr->hw_ver = dev->asic_id;
118 	attr->max_qp = dev->attr.max_qp;
119 	attr->max_ah = OCRDMA_MAX_AH;
120 	attr->max_qp_wr = dev->attr.max_wqe;
121 
122 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
123 					IB_DEVICE_RC_RNR_NAK_GEN |
124 					IB_DEVICE_SHUTDOWN_PORT |
125 					IB_DEVICE_SYS_IMAGE_GUID |
126 					IB_DEVICE_LOCAL_DMA_LKEY |
127 					IB_DEVICE_MEM_MGT_EXTENSIONS;
128 	attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
129 	attr->max_sge_rd = 0;
130 	attr->max_cq = dev->attr.max_cq;
131 	attr->max_cqe = dev->attr.max_cqe;
132 	attr->max_mr = dev->attr.max_mr;
133 	attr->max_mw = dev->attr.max_mw;
134 	attr->max_pd = dev->attr.max_pd;
135 	attr->atomic_cap = 0;
136 	attr->max_fmr = 0;
137 	attr->max_map_per_fmr = 0;
138 	attr->max_qp_rd_atom =
139 	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
140 	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
141 	attr->max_srq = dev->attr.max_srq;
142 	attr->max_srq_sge = dev->attr.max_srq_sge;
143 	attr->max_srq_wr = dev->attr.max_rqe;
144 	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
145 	attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
146 	attr->max_pkeys = 1;
147 	return 0;
148 }
149 
150 struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
151 {
152 	struct ocrdma_dev *dev;
153 	struct net_device *ndev = NULL;
154 
155 	rcu_read_lock();
156 
157 	dev = get_ocrdma_dev(ibdev);
158 	if (dev)
159 		ndev = dev->nic_info.netdev;
160 	if (ndev)
161 		dev_hold(ndev);
162 
163 	rcu_read_unlock();
164 
165 	return ndev;
166 }
167 
168 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
169 					    u8 *ib_speed, u8 *ib_width)
170 {
171 	int status;
172 	u8 speed;
173 
174 	status = ocrdma_mbx_get_link_speed(dev, &speed);
175 	if (status)
176 		speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
177 
178 	switch (speed) {
179 	case OCRDMA_PHYS_LINK_SPEED_1GBPS:
180 		*ib_speed = IB_SPEED_SDR;
181 		*ib_width = IB_WIDTH_1X;
182 		break;
183 
184 	case OCRDMA_PHYS_LINK_SPEED_10GBPS:
185 		*ib_speed = IB_SPEED_QDR;
186 		*ib_width = IB_WIDTH_1X;
187 		break;
188 
189 	case OCRDMA_PHYS_LINK_SPEED_20GBPS:
190 		*ib_speed = IB_SPEED_DDR;
191 		*ib_width = IB_WIDTH_4X;
192 		break;
193 
194 	case OCRDMA_PHYS_LINK_SPEED_40GBPS:
195 		*ib_speed = IB_SPEED_QDR;
196 		*ib_width = IB_WIDTH_4X;
197 		break;
198 
199 	default:
200 		/* Unsupported */
201 		*ib_speed = IB_SPEED_SDR;
202 		*ib_width = IB_WIDTH_1X;
203 	}
204 }
205 
206 int ocrdma_query_port(struct ib_device *ibdev,
207 		      u8 port, struct ib_port_attr *props)
208 {
209 	enum ib_port_state port_state;
210 	struct ocrdma_dev *dev;
211 	struct net_device *netdev;
212 
213 	dev = get_ocrdma_dev(ibdev);
214 	if (port > 1) {
215 		pr_err("%s(%d) invalid_port=0x%x\n", __func__,
216 		       dev->id, port);
217 		return -EINVAL;
218 	}
219 	netdev = dev->nic_info.netdev;
220 	if (netif_running(netdev) && netif_oper_up(netdev)) {
221 		port_state = IB_PORT_ACTIVE;
222 		props->phys_state = 5;
223 	} else {
224 		port_state = IB_PORT_DOWN;
225 		props->phys_state = 3;
226 	}
227 	props->max_mtu = IB_MTU_4096;
228 	props->active_mtu = iboe_get_mtu(netdev->mtu);
229 	props->lid = 0;
230 	props->lmc = 0;
231 	props->sm_lid = 0;
232 	props->sm_sl = 0;
233 	props->state = port_state;
234 	props->port_cap_flags =
235 	    IB_PORT_CM_SUP |
236 	    IB_PORT_REINIT_SUP |
237 	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP |
238 	    IB_PORT_IP_BASED_GIDS;
239 	props->gid_tbl_len = OCRDMA_MAX_SGID;
240 	props->pkey_tbl_len = 1;
241 	props->bad_pkey_cntr = 0;
242 	props->qkey_viol_cntr = 0;
243 	get_link_speed_and_width(dev, &props->active_speed,
244 				 &props->active_width);
245 	props->max_msg_sz = 0x80000000;
246 	props->max_vl_num = 4;
247 	return 0;
248 }
249 
250 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
251 		       struct ib_port_modify *props)
252 {
253 	struct ocrdma_dev *dev;
254 
255 	dev = get_ocrdma_dev(ibdev);
256 	if (port > 1) {
257 		pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
258 		return -EINVAL;
259 	}
260 	return 0;
261 }
262 
263 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
264 			   unsigned long len)
265 {
266 	struct ocrdma_mm *mm;
267 
268 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
269 	if (mm == NULL)
270 		return -ENOMEM;
271 	mm->key.phy_addr = phy_addr;
272 	mm->key.len = len;
273 	INIT_LIST_HEAD(&mm->entry);
274 
275 	mutex_lock(&uctx->mm_list_lock);
276 	list_add_tail(&mm->entry, &uctx->mm_head);
277 	mutex_unlock(&uctx->mm_list_lock);
278 	return 0;
279 }
280 
281 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
282 			    unsigned long len)
283 {
284 	struct ocrdma_mm *mm, *tmp;
285 
286 	mutex_lock(&uctx->mm_list_lock);
287 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
288 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
289 			continue;
290 
291 		list_del(&mm->entry);
292 		kfree(mm);
293 		break;
294 	}
295 	mutex_unlock(&uctx->mm_list_lock);
296 }
297 
298 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
299 			      unsigned long len)
300 {
301 	bool found = false;
302 	struct ocrdma_mm *mm;
303 
304 	mutex_lock(&uctx->mm_list_lock);
305 	list_for_each_entry(mm, &uctx->mm_head, entry) {
306 		if (len != mm->key.len && phy_addr != mm->key.phy_addr)
307 			continue;
308 
309 		found = true;
310 		break;
311 	}
312 	mutex_unlock(&uctx->mm_list_lock);
313 	return found;
314 }
315 
316 
317 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
318 {
319 	u16 pd_bitmap_idx = 0;
320 	const unsigned long *pd_bitmap;
321 
322 	if (dpp_pool) {
323 		pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
324 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
325 						    dev->pd_mgr->max_dpp_pd);
326 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
327 		dev->pd_mgr->pd_dpp_count++;
328 		if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
329 			dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
330 	} else {
331 		pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
332 		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
333 						    dev->pd_mgr->max_normal_pd);
334 		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
335 		dev->pd_mgr->pd_norm_count++;
336 		if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
337 			dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
338 	}
339 	return pd_bitmap_idx;
340 }
341 
342 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
343 					bool dpp_pool)
344 {
345 	u16 pd_count;
346 	u16 pd_bit_index;
347 
348 	pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
349 			      dev->pd_mgr->pd_norm_count;
350 	if (pd_count == 0)
351 		return -EINVAL;
352 
353 	if (dpp_pool) {
354 		pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
355 		if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
356 			return -EINVAL;
357 		} else {
358 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
359 			dev->pd_mgr->pd_dpp_count--;
360 		}
361 	} else {
362 		pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
363 		if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
364 			return -EINVAL;
365 		} else {
366 			__clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
367 			dev->pd_mgr->pd_norm_count--;
368 		}
369 	}
370 
371 	return 0;
372 }
373 
374 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
375 				   bool dpp_pool)
376 {
377 	int status;
378 
379 	mutex_lock(&dev->dev_lock);
380 	status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
381 	mutex_unlock(&dev->dev_lock);
382 	return status;
383 }
384 
385 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
386 {
387 	u16 pd_idx = 0;
388 	int status = 0;
389 
390 	mutex_lock(&dev->dev_lock);
391 	if (pd->dpp_enabled) {
392 		/* try allocating DPP PD, if not available then normal PD */
393 		if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
394 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
395 			pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
396 			pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
397 		} else if (dev->pd_mgr->pd_norm_count <
398 			   dev->pd_mgr->max_normal_pd) {
399 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
400 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
401 			pd->dpp_enabled = false;
402 		} else {
403 			status = -EINVAL;
404 		}
405 	} else {
406 		if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
407 			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
408 			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
409 		} else {
410 			status = -EINVAL;
411 		}
412 	}
413 	mutex_unlock(&dev->dev_lock);
414 	return status;
415 }
416 
417 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
418 					  struct ocrdma_ucontext *uctx,
419 					  struct ib_udata *udata)
420 {
421 	struct ocrdma_pd *pd = NULL;
422 	int status = 0;
423 
424 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
425 	if (!pd)
426 		return ERR_PTR(-ENOMEM);
427 
428 	if (udata && uctx && dev->attr.max_dpp_pds) {
429 		pd->dpp_enabled =
430 			ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
431 		pd->num_dpp_qp =
432 			pd->dpp_enabled ? (dev->nic_info.db_page_size /
433 					   dev->attr.wqe_size) : 0;
434 	}
435 
436 	if (dev->pd_mgr->pd_prealloc_valid) {
437 		status = ocrdma_get_pd_num(dev, pd);
438 		if (status == 0) {
439 			return pd;
440 		} else {
441 			kfree(pd);
442 			return ERR_PTR(status);
443 		}
444 	}
445 
446 retry:
447 	status = ocrdma_mbx_alloc_pd(dev, pd);
448 	if (status) {
449 		if (pd->dpp_enabled) {
450 			pd->dpp_enabled = false;
451 			pd->num_dpp_qp = 0;
452 			goto retry;
453 		} else {
454 			kfree(pd);
455 			return ERR_PTR(status);
456 		}
457 	}
458 
459 	return pd;
460 }
461 
462 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
463 				 struct ocrdma_pd *pd)
464 {
465 	return (uctx->cntxt_pd == pd ? true : false);
466 }
467 
468 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
469 			      struct ocrdma_pd *pd)
470 {
471 	int status = 0;
472 
473 	if (dev->pd_mgr->pd_prealloc_valid)
474 		status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
475 	else
476 		status = ocrdma_mbx_dealloc_pd(dev, pd);
477 
478 	kfree(pd);
479 	return status;
480 }
481 
482 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
483 				    struct ocrdma_ucontext *uctx,
484 				    struct ib_udata *udata)
485 {
486 	int status = 0;
487 
488 	uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
489 	if (IS_ERR(uctx->cntxt_pd)) {
490 		status = PTR_ERR(uctx->cntxt_pd);
491 		uctx->cntxt_pd = NULL;
492 		goto err;
493 	}
494 
495 	uctx->cntxt_pd->uctx = uctx;
496 	uctx->cntxt_pd->ibpd.device = &dev->ibdev;
497 err:
498 	return status;
499 }
500 
501 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
502 {
503 	struct ocrdma_pd *pd = uctx->cntxt_pd;
504 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
505 
506 	if (uctx->pd_in_use) {
507 		pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
508 		       __func__, dev->id, pd->id);
509 	}
510 	uctx->cntxt_pd = NULL;
511 	(void)_ocrdma_dealloc_pd(dev, pd);
512 	return 0;
513 }
514 
515 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
516 {
517 	struct ocrdma_pd *pd = NULL;
518 
519 	mutex_lock(&uctx->mm_list_lock);
520 	if (!uctx->pd_in_use) {
521 		uctx->pd_in_use = true;
522 		pd = uctx->cntxt_pd;
523 	}
524 	mutex_unlock(&uctx->mm_list_lock);
525 
526 	return pd;
527 }
528 
529 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
530 {
531 	mutex_lock(&uctx->mm_list_lock);
532 	uctx->pd_in_use = false;
533 	mutex_unlock(&uctx->mm_list_lock);
534 }
535 
536 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
537 					  struct ib_udata *udata)
538 {
539 	int status;
540 	struct ocrdma_ucontext *ctx;
541 	struct ocrdma_alloc_ucontext_resp resp;
542 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
543 	struct pci_dev *pdev = dev->nic_info.pdev;
544 	u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
545 
546 	if (!udata)
547 		return ERR_PTR(-EFAULT);
548 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
549 	if (!ctx)
550 		return ERR_PTR(-ENOMEM);
551 	INIT_LIST_HEAD(&ctx->mm_head);
552 	mutex_init(&ctx->mm_list_lock);
553 
554 	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
555 					    &ctx->ah_tbl.pa, GFP_KERNEL);
556 	if (!ctx->ah_tbl.va) {
557 		kfree(ctx);
558 		return ERR_PTR(-ENOMEM);
559 	}
560 	memset(ctx->ah_tbl.va, 0, map_len);
561 	ctx->ah_tbl.len = map_len;
562 
563 	memset(&resp, 0, sizeof(resp));
564 	resp.ah_tbl_len = ctx->ah_tbl.len;
565 	resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
566 
567 	status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
568 	if (status)
569 		goto map_err;
570 
571 	status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
572 	if (status)
573 		goto pd_err;
574 
575 	resp.dev_id = dev->id;
576 	resp.max_inline_data = dev->attr.max_inline_data;
577 	resp.wqe_size = dev->attr.wqe_size;
578 	resp.rqe_size = dev->attr.rqe_size;
579 	resp.dpp_wqe_size = dev->attr.wqe_size;
580 
581 	memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
582 	status = ib_copy_to_udata(udata, &resp, sizeof(resp));
583 	if (status)
584 		goto cpy_err;
585 	return &ctx->ibucontext;
586 
587 cpy_err:
588 pd_err:
589 	ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
590 map_err:
591 	dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
592 			  ctx->ah_tbl.pa);
593 	kfree(ctx);
594 	return ERR_PTR(status);
595 }
596 
597 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
598 {
599 	int status = 0;
600 	struct ocrdma_mm *mm, *tmp;
601 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
602 	struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
603 	struct pci_dev *pdev = dev->nic_info.pdev;
604 
605 	status = ocrdma_dealloc_ucontext_pd(uctx);
606 
607 	ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
608 	dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
609 			  uctx->ah_tbl.pa);
610 
611 	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
612 		list_del(&mm->entry);
613 		kfree(mm);
614 	}
615 	kfree(uctx);
616 	return status;
617 }
618 
619 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
620 {
621 	struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
622 	struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
623 	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
624 	u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
625 	unsigned long len = (vma->vm_end - vma->vm_start);
626 	int status = 0;
627 	bool found;
628 
629 	if (vma->vm_start & (PAGE_SIZE - 1))
630 		return -EINVAL;
631 	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
632 	if (!found)
633 		return -EINVAL;
634 
635 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
636 		dev->nic_info.db_total_size)) &&
637 		(len <=	dev->nic_info.db_page_size)) {
638 		if (vma->vm_flags & VM_READ)
639 			return -EPERM;
640 
641 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
642 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
643 					    len, vma->vm_page_prot);
644 	} else if (dev->nic_info.dpp_unmapped_len &&
645 		(vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
646 		(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
647 			dev->nic_info.dpp_unmapped_len)) &&
648 		(len <= dev->nic_info.dpp_unmapped_len)) {
649 		if (vma->vm_flags & VM_READ)
650 			return -EPERM;
651 
652 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
653 		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
654 					    len, vma->vm_page_prot);
655 	} else {
656 		status = remap_pfn_range(vma, vma->vm_start,
657 					 vma->vm_pgoff, len, vma->vm_page_prot);
658 	}
659 	return status;
660 }
661 
662 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
663 				struct ib_ucontext *ib_ctx,
664 				struct ib_udata *udata)
665 {
666 	int status;
667 	u64 db_page_addr;
668 	u64 dpp_page_addr = 0;
669 	u32 db_page_size;
670 	struct ocrdma_alloc_pd_uresp rsp;
671 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
672 
673 	memset(&rsp, 0, sizeof(rsp));
674 	rsp.id = pd->id;
675 	rsp.dpp_enabled = pd->dpp_enabled;
676 	db_page_addr = ocrdma_get_db_addr(dev, pd->id);
677 	db_page_size = dev->nic_info.db_page_size;
678 
679 	status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
680 	if (status)
681 		return status;
682 
683 	if (pd->dpp_enabled) {
684 		dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
685 				(pd->id * PAGE_SIZE);
686 		status = ocrdma_add_mmap(uctx, dpp_page_addr,
687 				 PAGE_SIZE);
688 		if (status)
689 			goto dpp_map_err;
690 		rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
691 		rsp.dpp_page_addr_lo = dpp_page_addr;
692 	}
693 
694 	status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
695 	if (status)
696 		goto ucopy_err;
697 
698 	pd->uctx = uctx;
699 	return 0;
700 
701 ucopy_err:
702 	if (pd->dpp_enabled)
703 		ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
704 dpp_map_err:
705 	ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
706 	return status;
707 }
708 
709 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
710 			      struct ib_ucontext *context,
711 			      struct ib_udata *udata)
712 {
713 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
714 	struct ocrdma_pd *pd;
715 	struct ocrdma_ucontext *uctx = NULL;
716 	int status;
717 	u8 is_uctx_pd = false;
718 
719 	if (udata && context) {
720 		uctx = get_ocrdma_ucontext(context);
721 		pd = ocrdma_get_ucontext_pd(uctx);
722 		if (pd) {
723 			is_uctx_pd = true;
724 			goto pd_mapping;
725 		}
726 	}
727 
728 	pd = _ocrdma_alloc_pd(dev, uctx, udata);
729 	if (IS_ERR(pd)) {
730 		status = PTR_ERR(pd);
731 		goto exit;
732 	}
733 
734 pd_mapping:
735 	if (udata && context) {
736 		status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
737 		if (status)
738 			goto err;
739 	}
740 	return &pd->ibpd;
741 
742 err:
743 	if (is_uctx_pd) {
744 		ocrdma_release_ucontext_pd(uctx);
745 	} else {
746 		status = _ocrdma_dealloc_pd(dev, pd);
747 	}
748 exit:
749 	return ERR_PTR(status);
750 }
751 
752 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
753 {
754 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
755 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
756 	struct ocrdma_ucontext *uctx = NULL;
757 	int status = 0;
758 	u64 usr_db;
759 
760 	uctx = pd->uctx;
761 	if (uctx) {
762 		u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
763 			(pd->id * PAGE_SIZE);
764 		if (pd->dpp_enabled)
765 			ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
766 		usr_db = ocrdma_get_db_addr(dev, pd->id);
767 		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
768 
769 		if (is_ucontext_pd(uctx, pd)) {
770 			ocrdma_release_ucontext_pd(uctx);
771 			return status;
772 		}
773 	}
774 	status = _ocrdma_dealloc_pd(dev, pd);
775 	return status;
776 }
777 
778 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
779 			    u32 pdid, int acc, u32 num_pbls, u32 addr_check)
780 {
781 	int status;
782 
783 	mr->hwmr.fr_mr = 0;
784 	mr->hwmr.local_rd = 1;
785 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
786 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
787 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
788 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
789 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
790 	mr->hwmr.num_pbls = num_pbls;
791 
792 	status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
793 	if (status)
794 		return status;
795 
796 	mr->ibmr.lkey = mr->hwmr.lkey;
797 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
798 		mr->ibmr.rkey = mr->hwmr.lkey;
799 	return 0;
800 }
801 
802 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
803 {
804 	int status;
805 	struct ocrdma_mr *mr;
806 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
807 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
808 
809 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
810 		pr_err("%s err, invalid access rights\n", __func__);
811 		return ERR_PTR(-EINVAL);
812 	}
813 
814 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
815 	if (!mr)
816 		return ERR_PTR(-ENOMEM);
817 
818 	status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
819 				   OCRDMA_ADDR_CHECK_DISABLE);
820 	if (status) {
821 		kfree(mr);
822 		return ERR_PTR(status);
823 	}
824 
825 	return &mr->ibmr;
826 }
827 
828 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
829 				   struct ocrdma_hw_mr *mr)
830 {
831 	struct pci_dev *pdev = dev->nic_info.pdev;
832 	int i = 0;
833 
834 	if (mr->pbl_table) {
835 		for (i = 0; i < mr->num_pbls; i++) {
836 			if (!mr->pbl_table[i].va)
837 				continue;
838 			dma_free_coherent(&pdev->dev, mr->pbl_size,
839 					  mr->pbl_table[i].va,
840 					  mr->pbl_table[i].pa);
841 		}
842 		kfree(mr->pbl_table);
843 		mr->pbl_table = NULL;
844 	}
845 }
846 
847 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
848 			      u32 num_pbes)
849 {
850 	u32 num_pbls = 0;
851 	u32 idx = 0;
852 	int status = 0;
853 	u32 pbl_size;
854 
855 	do {
856 		pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
857 		if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
858 			status = -EFAULT;
859 			break;
860 		}
861 		num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
862 		num_pbls = num_pbls / (pbl_size / sizeof(u64));
863 		idx++;
864 	} while (num_pbls >= dev->attr.max_num_mr_pbl);
865 
866 	mr->hwmr.num_pbes = num_pbes;
867 	mr->hwmr.num_pbls = num_pbls;
868 	mr->hwmr.pbl_size = pbl_size;
869 	return status;
870 }
871 
872 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
873 {
874 	int status = 0;
875 	int i;
876 	u32 dma_len = mr->pbl_size;
877 	struct pci_dev *pdev = dev->nic_info.pdev;
878 	void *va;
879 	dma_addr_t pa;
880 
881 	mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
882 				mr->num_pbls, GFP_KERNEL);
883 
884 	if (!mr->pbl_table)
885 		return -ENOMEM;
886 
887 	for (i = 0; i < mr->num_pbls; i++) {
888 		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
889 		if (!va) {
890 			ocrdma_free_mr_pbl_tbl(dev, mr);
891 			status = -ENOMEM;
892 			break;
893 		}
894 		memset(va, 0, dma_len);
895 		mr->pbl_table[i].va = va;
896 		mr->pbl_table[i].pa = pa;
897 	}
898 	return status;
899 }
900 
901 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
902 			    u32 num_pbes)
903 {
904 	struct ocrdma_pbe *pbe;
905 	struct scatterlist *sg;
906 	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
907 	struct ib_umem *umem = mr->umem;
908 	int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
909 
910 	if (!mr->hwmr.num_pbes)
911 		return;
912 
913 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
914 	pbe_cnt = 0;
915 
916 	shift = ilog2(umem->page_size);
917 
918 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
919 		pages = sg_dma_len(sg) >> shift;
920 		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
921 			/* store the page address in pbe */
922 			pbe->pa_lo =
923 			    cpu_to_le32(sg_dma_address
924 					(sg) +
925 					(umem->page_size * pg_cnt));
926 			pbe->pa_hi =
927 			    cpu_to_le32(upper_32_bits
928 					((sg_dma_address
929 					  (sg) +
930 					  umem->page_size * pg_cnt)));
931 			pbe_cnt += 1;
932 			total_num_pbes += 1;
933 			pbe++;
934 
935 			/* if done building pbes, issue the mbx cmd. */
936 			if (total_num_pbes == num_pbes)
937 				return;
938 
939 			/* if the given pbl is full storing the pbes,
940 			 * move to next pbl.
941 			 */
942 			if (pbe_cnt ==
943 				(mr->hwmr.pbl_size / sizeof(u64))) {
944 				pbl_tbl++;
945 				pbe = (struct ocrdma_pbe *)pbl_tbl->va;
946 				pbe_cnt = 0;
947 			}
948 
949 		}
950 	}
951 }
952 
953 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
954 				 u64 usr_addr, int acc, struct ib_udata *udata)
955 {
956 	int status = -ENOMEM;
957 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
958 	struct ocrdma_mr *mr;
959 	struct ocrdma_pd *pd;
960 	u32 num_pbes;
961 
962 	pd = get_ocrdma_pd(ibpd);
963 
964 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
965 		return ERR_PTR(-EINVAL);
966 
967 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
968 	if (!mr)
969 		return ERR_PTR(status);
970 	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
971 	if (IS_ERR(mr->umem)) {
972 		status = -EFAULT;
973 		goto umem_err;
974 	}
975 	num_pbes = ib_umem_page_count(mr->umem);
976 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
977 	if (status)
978 		goto umem_err;
979 
980 	mr->hwmr.pbe_size = mr->umem->page_size;
981 	mr->hwmr.fbo = ib_umem_offset(mr->umem);
982 	mr->hwmr.va = usr_addr;
983 	mr->hwmr.len = len;
984 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
985 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
986 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
987 	mr->hwmr.local_rd = 1;
988 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
989 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
990 	if (status)
991 		goto umem_err;
992 	build_user_pbes(dev, mr, num_pbes);
993 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
994 	if (status)
995 		goto mbx_err;
996 	mr->ibmr.lkey = mr->hwmr.lkey;
997 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
998 		mr->ibmr.rkey = mr->hwmr.lkey;
999 
1000 	return &mr->ibmr;
1001 
1002 mbx_err:
1003 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1004 umem_err:
1005 	kfree(mr);
1006 	return ERR_PTR(status);
1007 }
1008 
1009 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
1010 {
1011 	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
1012 	struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
1013 
1014 	(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
1015 
1016 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
1017 
1018 	/* it could be user registered memory. */
1019 	if (mr->umem)
1020 		ib_umem_release(mr->umem);
1021 	kfree(mr);
1022 
1023 	/* Don't stop cleanup, in case FW is unresponsive */
1024 	if (dev->mqe_ctx.fw_error_state) {
1025 		pr_err("%s(%d) fw not responding.\n",
1026 		       __func__, dev->id);
1027 	}
1028 	return 0;
1029 }
1030 
1031 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1032 				struct ib_udata *udata,
1033 				struct ib_ucontext *ib_ctx)
1034 {
1035 	int status;
1036 	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
1037 	struct ocrdma_create_cq_uresp uresp;
1038 
1039 	memset(&uresp, 0, sizeof(uresp));
1040 	uresp.cq_id = cq->id;
1041 	uresp.page_size = PAGE_ALIGN(cq->len);
1042 	uresp.num_pages = 1;
1043 	uresp.max_hw_cqe = cq->max_hw_cqe;
1044 	uresp.page_addr[0] = virt_to_phys(cq->va);
1045 	uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
1046 	uresp.db_page_size = dev->nic_info.db_page_size;
1047 	uresp.phase_change = cq->phase_change ? 1 : 0;
1048 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1049 	if (status) {
1050 		pr_err("%s(%d) copy error cqid=0x%x.\n",
1051 		       __func__, dev->id, cq->id);
1052 		goto err;
1053 	}
1054 	status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1055 	if (status)
1056 		goto err;
1057 	status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
1058 	if (status) {
1059 		ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1060 		goto err;
1061 	}
1062 	cq->ucontext = uctx;
1063 err:
1064 	return status;
1065 }
1066 
1067 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1068 			       const struct ib_cq_init_attr *attr,
1069 			       struct ib_ucontext *ib_ctx,
1070 			       struct ib_udata *udata)
1071 {
1072 	int entries = attr->cqe;
1073 	struct ocrdma_cq *cq;
1074 	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1075 	struct ocrdma_ucontext *uctx = NULL;
1076 	u16 pd_id = 0;
1077 	int status;
1078 	struct ocrdma_create_cq_ureq ureq;
1079 
1080 	if (attr->flags)
1081 		return ERR_PTR(-EINVAL);
1082 
1083 	if (udata) {
1084 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1085 			return ERR_PTR(-EFAULT);
1086 	} else
1087 		ureq.dpp_cq = 0;
1088 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1089 	if (!cq)
1090 		return ERR_PTR(-ENOMEM);
1091 
1092 	spin_lock_init(&cq->cq_lock);
1093 	spin_lock_init(&cq->comp_handler_lock);
1094 	INIT_LIST_HEAD(&cq->sq_head);
1095 	INIT_LIST_HEAD(&cq->rq_head);
1096 	cq->first_arm = true;
1097 
1098 	if (ib_ctx) {
1099 		uctx = get_ocrdma_ucontext(ib_ctx);
1100 		pd_id = uctx->cntxt_pd->id;
1101 	}
1102 
1103 	status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1104 	if (status) {
1105 		kfree(cq);
1106 		return ERR_PTR(status);
1107 	}
1108 	if (ib_ctx) {
1109 		status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1110 		if (status)
1111 			goto ctx_err;
1112 	}
1113 	cq->phase = OCRDMA_CQE_VALID;
1114 	dev->cq_tbl[cq->id] = cq;
1115 	return &cq->ibcq;
1116 
1117 ctx_err:
1118 	ocrdma_mbx_destroy_cq(dev, cq);
1119 	kfree(cq);
1120 	return ERR_PTR(status);
1121 }
1122 
1123 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1124 		     struct ib_udata *udata)
1125 {
1126 	int status = 0;
1127 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1128 
1129 	if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1130 		status = -EINVAL;
1131 		return status;
1132 	}
1133 	ibcq->cqe = new_cnt;
1134 	return status;
1135 }
1136 
1137 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1138 {
1139 	int cqe_cnt;
1140 	int valid_count = 0;
1141 	unsigned long flags;
1142 
1143 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1144 	struct ocrdma_cqe *cqe = NULL;
1145 
1146 	cqe = cq->va;
1147 	cqe_cnt = cq->cqe_cnt;
1148 
1149 	/* Last irq might have scheduled a polling thread
1150 	 * sync-up with it before hard flushing.
1151 	 */
1152 	spin_lock_irqsave(&cq->cq_lock, flags);
1153 	while (cqe_cnt) {
1154 		if (is_cqe_valid(cq, cqe))
1155 			valid_count++;
1156 		cqe++;
1157 		cqe_cnt--;
1158 	}
1159 	ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1160 	spin_unlock_irqrestore(&cq->cq_lock, flags);
1161 }
1162 
1163 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1164 {
1165 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1166 	struct ocrdma_eq *eq = NULL;
1167 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1168 	int pdid = 0;
1169 	u32 irq, indx;
1170 
1171 	dev->cq_tbl[cq->id] = NULL;
1172 	indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1173 	if (indx == -EINVAL)
1174 		BUG();
1175 
1176 	eq = &dev->eq_tbl[indx];
1177 	irq = ocrdma_get_irq(dev, eq);
1178 	synchronize_irq(irq);
1179 	ocrdma_flush_cq(cq);
1180 
1181 	(void)ocrdma_mbx_destroy_cq(dev, cq);
1182 	if (cq->ucontext) {
1183 		pdid = cq->ucontext->cntxt_pd->id;
1184 		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1185 				PAGE_ALIGN(cq->len));
1186 		ocrdma_del_mmap(cq->ucontext,
1187 				ocrdma_get_db_addr(dev, pdid),
1188 				dev->nic_info.db_page_size);
1189 	}
1190 
1191 	kfree(cq);
1192 	return 0;
1193 }
1194 
1195 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1196 {
1197 	int status = -EINVAL;
1198 
1199 	if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1200 		dev->qp_tbl[qp->id] = qp;
1201 		status = 0;
1202 	}
1203 	return status;
1204 }
1205 
1206 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1207 {
1208 	dev->qp_tbl[qp->id] = NULL;
1209 }
1210 
1211 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1212 				  struct ib_qp_init_attr *attrs)
1213 {
1214 	if ((attrs->qp_type != IB_QPT_GSI) &&
1215 	    (attrs->qp_type != IB_QPT_RC) &&
1216 	    (attrs->qp_type != IB_QPT_UC) &&
1217 	    (attrs->qp_type != IB_QPT_UD)) {
1218 		pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1219 		       __func__, dev->id, attrs->qp_type);
1220 		return -EINVAL;
1221 	}
1222 	/* Skip the check for QP1 to support CM size of 128 */
1223 	if ((attrs->qp_type != IB_QPT_GSI) &&
1224 	    (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1225 		pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1226 		       __func__, dev->id, attrs->cap.max_send_wr);
1227 		pr_err("%s(%d) supported send_wr=0x%x\n",
1228 		       __func__, dev->id, dev->attr.max_wqe);
1229 		return -EINVAL;
1230 	}
1231 	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1232 		pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1233 		       __func__, dev->id, attrs->cap.max_recv_wr);
1234 		pr_err("%s(%d) supported recv_wr=0x%x\n",
1235 		       __func__, dev->id, dev->attr.max_rqe);
1236 		return -EINVAL;
1237 	}
1238 	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1239 		pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1240 		       __func__, dev->id, attrs->cap.max_inline_data);
1241 		pr_err("%s(%d) supported inline data size=0x%x\n",
1242 		       __func__, dev->id, dev->attr.max_inline_data);
1243 		return -EINVAL;
1244 	}
1245 	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1246 		pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1247 		       __func__, dev->id, attrs->cap.max_send_sge);
1248 		pr_err("%s(%d) supported send_sge=0x%x\n",
1249 		       __func__, dev->id, dev->attr.max_send_sge);
1250 		return -EINVAL;
1251 	}
1252 	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1253 		pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1254 		       __func__, dev->id, attrs->cap.max_recv_sge);
1255 		pr_err("%s(%d) supported recv_sge=0x%x\n",
1256 		       __func__, dev->id, dev->attr.max_recv_sge);
1257 		return -EINVAL;
1258 	}
1259 	/* unprivileged user space cannot create special QP */
1260 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1261 		pr_err
1262 		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1263 		     __func__, dev->id, attrs->qp_type);
1264 		return -EINVAL;
1265 	}
1266 	/* allow creating only one GSI type of QP */
1267 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1268 		pr_err("%s(%d) GSI special QPs already created.\n",
1269 		       __func__, dev->id);
1270 		return -EINVAL;
1271 	}
1272 	/* verify consumer QPs are not trying to use GSI QP's CQ */
1273 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1274 		if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1275 			(dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1276 			pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1277 				__func__, dev->id);
1278 			return -EINVAL;
1279 		}
1280 	}
1281 	return 0;
1282 }
1283 
1284 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1285 				struct ib_udata *udata, int dpp_offset,
1286 				int dpp_credit_lmt, int srq)
1287 {
1288 	int status = 0;
1289 	u64 usr_db;
1290 	struct ocrdma_create_qp_uresp uresp;
1291 	struct ocrdma_pd *pd = qp->pd;
1292 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1293 
1294 	memset(&uresp, 0, sizeof(uresp));
1295 	usr_db = dev->nic_info.unmapped_db +
1296 			(pd->id * dev->nic_info.db_page_size);
1297 	uresp.qp_id = qp->id;
1298 	uresp.sq_dbid = qp->sq.dbid;
1299 	uresp.num_sq_pages = 1;
1300 	uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1301 	uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1302 	uresp.num_wqe_allocated = qp->sq.max_cnt;
1303 	if (!srq) {
1304 		uresp.rq_dbid = qp->rq.dbid;
1305 		uresp.num_rq_pages = 1;
1306 		uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1307 		uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1308 		uresp.num_rqe_allocated = qp->rq.max_cnt;
1309 	}
1310 	uresp.db_page_addr = usr_db;
1311 	uresp.db_page_size = dev->nic_info.db_page_size;
1312 	uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1313 	uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1314 	uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1315 
1316 	if (qp->dpp_enabled) {
1317 		uresp.dpp_credit = dpp_credit_lmt;
1318 		uresp.dpp_offset = dpp_offset;
1319 	}
1320 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1321 	if (status) {
1322 		pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1323 		goto err;
1324 	}
1325 	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1326 				 uresp.sq_page_size);
1327 	if (status)
1328 		goto err;
1329 
1330 	if (!srq) {
1331 		status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1332 					 uresp.rq_page_size);
1333 		if (status)
1334 			goto rq_map_err;
1335 	}
1336 	return status;
1337 rq_map_err:
1338 	ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1339 err:
1340 	return status;
1341 }
1342 
1343 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1344 			     struct ocrdma_pd *pd)
1345 {
1346 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1347 		qp->sq_db = dev->nic_info.db +
1348 			(pd->id * dev->nic_info.db_page_size) +
1349 			OCRDMA_DB_GEN2_SQ_OFFSET;
1350 		qp->rq_db = dev->nic_info.db +
1351 			(pd->id * dev->nic_info.db_page_size) +
1352 			OCRDMA_DB_GEN2_RQ_OFFSET;
1353 	} else {
1354 		qp->sq_db = dev->nic_info.db +
1355 			(pd->id * dev->nic_info.db_page_size) +
1356 			OCRDMA_DB_SQ_OFFSET;
1357 		qp->rq_db = dev->nic_info.db +
1358 			(pd->id * dev->nic_info.db_page_size) +
1359 			OCRDMA_DB_RQ_OFFSET;
1360 	}
1361 }
1362 
1363 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1364 {
1365 	qp->wqe_wr_id_tbl =
1366 	    kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1367 		    GFP_KERNEL);
1368 	if (qp->wqe_wr_id_tbl == NULL)
1369 		return -ENOMEM;
1370 	qp->rqe_wr_id_tbl =
1371 	    kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1372 	if (qp->rqe_wr_id_tbl == NULL)
1373 		return -ENOMEM;
1374 
1375 	return 0;
1376 }
1377 
1378 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1379 				      struct ocrdma_pd *pd,
1380 				      struct ib_qp_init_attr *attrs)
1381 {
1382 	qp->pd = pd;
1383 	spin_lock_init(&qp->q_lock);
1384 	INIT_LIST_HEAD(&qp->sq_entry);
1385 	INIT_LIST_HEAD(&qp->rq_entry);
1386 
1387 	qp->qp_type = attrs->qp_type;
1388 	qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1389 	qp->max_inline_data = attrs->cap.max_inline_data;
1390 	qp->sq.max_sges = attrs->cap.max_send_sge;
1391 	qp->rq.max_sges = attrs->cap.max_recv_sge;
1392 	qp->state = OCRDMA_QPS_RST;
1393 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1394 }
1395 
1396 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1397 				   struct ib_qp_init_attr *attrs)
1398 {
1399 	if (attrs->qp_type == IB_QPT_GSI) {
1400 		dev->gsi_qp_created = 1;
1401 		dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1402 		dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1403 	}
1404 }
1405 
1406 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1407 			       struct ib_qp_init_attr *attrs,
1408 			       struct ib_udata *udata)
1409 {
1410 	int status;
1411 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1412 	struct ocrdma_qp *qp;
1413 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1414 	struct ocrdma_create_qp_ureq ureq;
1415 	u16 dpp_credit_lmt, dpp_offset;
1416 
1417 	status = ocrdma_check_qp_params(ibpd, dev, attrs);
1418 	if (status)
1419 		goto gen_err;
1420 
1421 	memset(&ureq, 0, sizeof(ureq));
1422 	if (udata) {
1423 		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1424 			return ERR_PTR(-EFAULT);
1425 	}
1426 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1427 	if (!qp) {
1428 		status = -ENOMEM;
1429 		goto gen_err;
1430 	}
1431 	ocrdma_set_qp_init_params(qp, pd, attrs);
1432 	if (udata == NULL)
1433 		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1434 					OCRDMA_QP_FAST_REG);
1435 
1436 	mutex_lock(&dev->dev_lock);
1437 	status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1438 					ureq.dpp_cq_id,
1439 					&dpp_offset, &dpp_credit_lmt);
1440 	if (status)
1441 		goto mbx_err;
1442 
1443 	/* user space QP's wr_id table are managed in library */
1444 	if (udata == NULL) {
1445 		status = ocrdma_alloc_wr_id_tbl(qp);
1446 		if (status)
1447 			goto map_err;
1448 	}
1449 
1450 	status = ocrdma_add_qpn_map(dev, qp);
1451 	if (status)
1452 		goto map_err;
1453 	ocrdma_set_qp_db(dev, qp, pd);
1454 	if (udata) {
1455 		status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1456 					      dpp_credit_lmt,
1457 					      (attrs->srq != NULL));
1458 		if (status)
1459 			goto cpy_err;
1460 	}
1461 	ocrdma_store_gsi_qp_cq(dev, attrs);
1462 	qp->ibqp.qp_num = qp->id;
1463 	mutex_unlock(&dev->dev_lock);
1464 	return &qp->ibqp;
1465 
1466 cpy_err:
1467 	ocrdma_del_qpn_map(dev, qp);
1468 map_err:
1469 	ocrdma_mbx_destroy_qp(dev, qp);
1470 mbx_err:
1471 	mutex_unlock(&dev->dev_lock);
1472 	kfree(qp->wqe_wr_id_tbl);
1473 	kfree(qp->rqe_wr_id_tbl);
1474 	kfree(qp);
1475 	pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1476 gen_err:
1477 	return ERR_PTR(status);
1478 }
1479 
1480 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1481 		      int attr_mask)
1482 {
1483 	int status = 0;
1484 	struct ocrdma_qp *qp;
1485 	struct ocrdma_dev *dev;
1486 	enum ib_qp_state old_qps;
1487 
1488 	qp = get_ocrdma_qp(ibqp);
1489 	dev = get_ocrdma_dev(ibqp->device);
1490 	if (attr_mask & IB_QP_STATE)
1491 		status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1492 	/* if new and previous states are same hw doesn't need to
1493 	 * know about it.
1494 	 */
1495 	if (status < 0)
1496 		return status;
1497 	status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1498 
1499 	return status;
1500 }
1501 
1502 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1503 		     int attr_mask, struct ib_udata *udata)
1504 {
1505 	unsigned long flags;
1506 	int status = -EINVAL;
1507 	struct ocrdma_qp *qp;
1508 	struct ocrdma_dev *dev;
1509 	enum ib_qp_state old_qps, new_qps;
1510 
1511 	qp = get_ocrdma_qp(ibqp);
1512 	dev = get_ocrdma_dev(ibqp->device);
1513 
1514 	/* syncronize with multiple context trying to change, retrive qps */
1515 	mutex_lock(&dev->dev_lock);
1516 	/* syncronize with wqe, rqe posting and cqe processing contexts */
1517 	spin_lock_irqsave(&qp->q_lock, flags);
1518 	old_qps = get_ibqp_state(qp->state);
1519 	if (attr_mask & IB_QP_STATE)
1520 		new_qps = attr->qp_state;
1521 	else
1522 		new_qps = old_qps;
1523 	spin_unlock_irqrestore(&qp->q_lock, flags);
1524 
1525 	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1526 				IB_LINK_LAYER_ETHERNET)) {
1527 		pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1528 		       "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1529 		       __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1530 		       old_qps, new_qps);
1531 		goto param_err;
1532 	}
1533 
1534 	status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1535 	if (status > 0)
1536 		status = 0;
1537 param_err:
1538 	mutex_unlock(&dev->dev_lock);
1539 	return status;
1540 }
1541 
1542 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1543 {
1544 	switch (mtu) {
1545 	case 256:
1546 		return IB_MTU_256;
1547 	case 512:
1548 		return IB_MTU_512;
1549 	case 1024:
1550 		return IB_MTU_1024;
1551 	case 2048:
1552 		return IB_MTU_2048;
1553 	case 4096:
1554 		return IB_MTU_4096;
1555 	default:
1556 		return IB_MTU_1024;
1557 	}
1558 }
1559 
1560 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1561 {
1562 	int ib_qp_acc_flags = 0;
1563 
1564 	if (qp_cap_flags & OCRDMA_QP_INB_WR)
1565 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1566 	if (qp_cap_flags & OCRDMA_QP_INB_RD)
1567 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1568 	return ib_qp_acc_flags;
1569 }
1570 
1571 int ocrdma_query_qp(struct ib_qp *ibqp,
1572 		    struct ib_qp_attr *qp_attr,
1573 		    int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1574 {
1575 	int status;
1576 	u32 qp_state;
1577 	struct ocrdma_qp_params params;
1578 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1579 	struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1580 
1581 	memset(&params, 0, sizeof(params));
1582 	mutex_lock(&dev->dev_lock);
1583 	status = ocrdma_mbx_query_qp(dev, qp, &params);
1584 	mutex_unlock(&dev->dev_lock);
1585 	if (status)
1586 		goto mbx_err;
1587 	if (qp->qp_type == IB_QPT_UD)
1588 		qp_attr->qkey = params.qkey;
1589 	qp_attr->path_mtu =
1590 		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1591 				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1592 				OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1593 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
1594 	qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1595 	qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1596 	qp_attr->dest_qp_num =
1597 	    params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1598 
1599 	qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1600 	qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1601 	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1602 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
1603 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1604 	qp_attr->cap.max_inline_data = qp->max_inline_data;
1605 	qp_init_attr->cap = qp_attr->cap;
1606 	memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1607 	       sizeof(params.dgid));
1608 	qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1609 	    OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1610 	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1611 	qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1612 					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1613 						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1614 	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1615 					      OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1616 						OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1617 
1618 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1619 	qp_attr->ah_attr.port_num = 1;
1620 	qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1621 			       OCRDMA_QP_PARAMS_SL_MASK) >>
1622 				OCRDMA_QP_PARAMS_SL_SHIFT;
1623 	qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1624 			    OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1625 				OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1626 	qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1627 			      OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1628 				OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1629 	qp_attr->retry_cnt =
1630 	    (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1631 		OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1632 	qp_attr->min_rnr_timer = 0;
1633 	qp_attr->pkey_index = 0;
1634 	qp_attr->port_num = 1;
1635 	qp_attr->ah_attr.src_path_bits = 0;
1636 	qp_attr->ah_attr.static_rate = 0;
1637 	qp_attr->alt_pkey_index = 0;
1638 	qp_attr->alt_port_num = 0;
1639 	qp_attr->alt_timeout = 0;
1640 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1641 	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1642 		    OCRDMA_QP_PARAMS_STATE_SHIFT;
1643 	qp_attr->qp_state = get_ibqp_state(qp_state);
1644 	qp_attr->cur_qp_state = qp_attr->qp_state;
1645 	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1646 	qp_attr->max_dest_rd_atomic =
1647 	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1648 	qp_attr->max_rd_atomic =
1649 	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1650 	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1651 				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1652 	/* Sync driver QP state with FW */
1653 	ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1654 mbx_err:
1655 	return status;
1656 }
1657 
1658 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1659 {
1660 	unsigned int i = idx / 32;
1661 	u32 mask = (1U << (idx % 32));
1662 
1663 	srq->idx_bit_fields[i] ^= mask;
1664 }
1665 
1666 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1667 {
1668 	return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1669 }
1670 
1671 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1672 {
1673 	return (qp->sq.tail == qp->sq.head);
1674 }
1675 
1676 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1677 {
1678 	return (qp->rq.tail == qp->rq.head);
1679 }
1680 
1681 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1682 {
1683 	return q->va + (q->head * q->entry_size);
1684 }
1685 
1686 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1687 				      u32 idx)
1688 {
1689 	return q->va + (idx * q->entry_size);
1690 }
1691 
1692 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1693 {
1694 	q->head = (q->head + 1) & q->max_wqe_idx;
1695 }
1696 
1697 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1698 {
1699 	q->tail = (q->tail + 1) & q->max_wqe_idx;
1700 }
1701 
1702 /* discard the cqe for a given QP */
1703 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1704 {
1705 	unsigned long cq_flags;
1706 	unsigned long flags;
1707 	int discard_cnt = 0;
1708 	u32 cur_getp, stop_getp;
1709 	struct ocrdma_cqe *cqe;
1710 	u32 qpn = 0, wqe_idx = 0;
1711 
1712 	spin_lock_irqsave(&cq->cq_lock, cq_flags);
1713 
1714 	/* traverse through the CQEs in the hw CQ,
1715 	 * find the matching CQE for a given qp,
1716 	 * mark the matching one discarded by clearing qpn.
1717 	 * ring the doorbell in the poll_cq() as
1718 	 * we don't complete out of order cqe.
1719 	 */
1720 
1721 	cur_getp = cq->getp;
1722 	/* find upto when do we reap the cq. */
1723 	stop_getp = cur_getp;
1724 	do {
1725 		if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1726 			break;
1727 
1728 		cqe = cq->va + cur_getp;
1729 		/* if (a) done reaping whole hw cq, or
1730 		 *    (b) qp_xq becomes empty.
1731 		 * then exit
1732 		 */
1733 		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1734 		/* if previously discarded cqe found, skip that too. */
1735 		/* check for matching qp */
1736 		if (qpn == 0 || qpn != qp->id)
1737 			goto skip_cqe;
1738 
1739 		if (is_cqe_for_sq(cqe)) {
1740 			ocrdma_hwq_inc_tail(&qp->sq);
1741 		} else {
1742 			if (qp->srq) {
1743 				wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1744 					OCRDMA_CQE_BUFTAG_SHIFT) &
1745 					qp->srq->rq.max_wqe_idx;
1746 				if (wqe_idx < 1)
1747 					BUG();
1748 				spin_lock_irqsave(&qp->srq->q_lock, flags);
1749 				ocrdma_hwq_inc_tail(&qp->srq->rq);
1750 				ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1751 				spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1752 
1753 			} else {
1754 				ocrdma_hwq_inc_tail(&qp->rq);
1755 			}
1756 		}
1757 		/* mark cqe discarded so that it is not picked up later
1758 		 * in the poll_cq().
1759 		 */
1760 		discard_cnt += 1;
1761 		cqe->cmn.qpn = 0;
1762 skip_cqe:
1763 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1764 	} while (cur_getp != stop_getp);
1765 	spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1766 }
1767 
1768 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1769 {
1770 	int found = false;
1771 	unsigned long flags;
1772 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1773 	/* sync with any active CQ poll */
1774 
1775 	spin_lock_irqsave(&dev->flush_q_lock, flags);
1776 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1777 	if (found)
1778 		list_del(&qp->sq_entry);
1779 	if (!qp->srq) {
1780 		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1781 		if (found)
1782 			list_del(&qp->rq_entry);
1783 	}
1784 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1785 }
1786 
1787 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1788 {
1789 	struct ocrdma_pd *pd;
1790 	struct ocrdma_qp *qp;
1791 	struct ocrdma_dev *dev;
1792 	struct ib_qp_attr attrs;
1793 	int attr_mask;
1794 	unsigned long flags;
1795 
1796 	qp = get_ocrdma_qp(ibqp);
1797 	dev = get_ocrdma_dev(ibqp->device);
1798 
1799 	pd = qp->pd;
1800 
1801 	/* change the QP state to ERROR */
1802 	if (qp->state != OCRDMA_QPS_RST) {
1803 		attrs.qp_state = IB_QPS_ERR;
1804 		attr_mask = IB_QP_STATE;
1805 		_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1806 	}
1807 	/* ensure that CQEs for newly created QP (whose id may be same with
1808 	 * one which just getting destroyed are same), dont get
1809 	 * discarded until the old CQEs are discarded.
1810 	 */
1811 	mutex_lock(&dev->dev_lock);
1812 	(void) ocrdma_mbx_destroy_qp(dev, qp);
1813 
1814 	/*
1815 	 * acquire CQ lock while destroy is in progress, in order to
1816 	 * protect against proessing in-flight CQEs for this QP.
1817 	 */
1818 	spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1819 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1820 		spin_lock(&qp->rq_cq->cq_lock);
1821 
1822 	ocrdma_del_qpn_map(dev, qp);
1823 
1824 	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1825 		spin_unlock(&qp->rq_cq->cq_lock);
1826 	spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1827 
1828 	if (!pd->uctx) {
1829 		ocrdma_discard_cqes(qp, qp->sq_cq);
1830 		ocrdma_discard_cqes(qp, qp->rq_cq);
1831 	}
1832 	mutex_unlock(&dev->dev_lock);
1833 
1834 	if (pd->uctx) {
1835 		ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1836 				PAGE_ALIGN(qp->sq.len));
1837 		if (!qp->srq)
1838 			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1839 					PAGE_ALIGN(qp->rq.len));
1840 	}
1841 
1842 	ocrdma_del_flush_qp(qp);
1843 
1844 	kfree(qp->wqe_wr_id_tbl);
1845 	kfree(qp->rqe_wr_id_tbl);
1846 	kfree(qp);
1847 	return 0;
1848 }
1849 
1850 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1851 				struct ib_udata *udata)
1852 {
1853 	int status;
1854 	struct ocrdma_create_srq_uresp uresp;
1855 
1856 	memset(&uresp, 0, sizeof(uresp));
1857 	uresp.rq_dbid = srq->rq.dbid;
1858 	uresp.num_rq_pages = 1;
1859 	uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1860 	uresp.rq_page_size = srq->rq.len;
1861 	uresp.db_page_addr = dev->nic_info.unmapped_db +
1862 	    (srq->pd->id * dev->nic_info.db_page_size);
1863 	uresp.db_page_size = dev->nic_info.db_page_size;
1864 	uresp.num_rqe_allocated = srq->rq.max_cnt;
1865 	if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1866 		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1867 		uresp.db_shift = 24;
1868 	} else {
1869 		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1870 		uresp.db_shift = 16;
1871 	}
1872 
1873 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1874 	if (status)
1875 		return status;
1876 	status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1877 				 uresp.rq_page_size);
1878 	if (status)
1879 		return status;
1880 	return status;
1881 }
1882 
1883 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1884 				 struct ib_srq_init_attr *init_attr,
1885 				 struct ib_udata *udata)
1886 {
1887 	int status = -ENOMEM;
1888 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1889 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1890 	struct ocrdma_srq *srq;
1891 
1892 	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1893 		return ERR_PTR(-EINVAL);
1894 	if (init_attr->attr.max_wr > dev->attr.max_rqe)
1895 		return ERR_PTR(-EINVAL);
1896 
1897 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1898 	if (!srq)
1899 		return ERR_PTR(status);
1900 
1901 	spin_lock_init(&srq->q_lock);
1902 	srq->pd = pd;
1903 	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1904 	status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1905 	if (status)
1906 		goto err;
1907 
1908 	if (udata == NULL) {
1909 		srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1910 			    GFP_KERNEL);
1911 		if (srq->rqe_wr_id_tbl == NULL)
1912 			goto arm_err;
1913 
1914 		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1915 		    (srq->rq.max_cnt % 32 ? 1 : 0);
1916 		srq->idx_bit_fields =
1917 		    kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1918 		if (srq->idx_bit_fields == NULL)
1919 			goto arm_err;
1920 		memset(srq->idx_bit_fields, 0xff,
1921 		       srq->bit_fields_len * sizeof(u32));
1922 	}
1923 
1924 	if (init_attr->attr.srq_limit) {
1925 		status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1926 		if (status)
1927 			goto arm_err;
1928 	}
1929 
1930 	if (udata) {
1931 		status = ocrdma_copy_srq_uresp(dev, srq, udata);
1932 		if (status)
1933 			goto arm_err;
1934 	}
1935 
1936 	return &srq->ibsrq;
1937 
1938 arm_err:
1939 	ocrdma_mbx_destroy_srq(dev, srq);
1940 err:
1941 	kfree(srq->rqe_wr_id_tbl);
1942 	kfree(srq->idx_bit_fields);
1943 	kfree(srq);
1944 	return ERR_PTR(status);
1945 }
1946 
1947 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1948 		      struct ib_srq_attr *srq_attr,
1949 		      enum ib_srq_attr_mask srq_attr_mask,
1950 		      struct ib_udata *udata)
1951 {
1952 	int status = 0;
1953 	struct ocrdma_srq *srq;
1954 
1955 	srq = get_ocrdma_srq(ibsrq);
1956 	if (srq_attr_mask & IB_SRQ_MAX_WR)
1957 		status = -EINVAL;
1958 	else
1959 		status = ocrdma_mbx_modify_srq(srq, srq_attr);
1960 	return status;
1961 }
1962 
1963 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1964 {
1965 	int status;
1966 	struct ocrdma_srq *srq;
1967 
1968 	srq = get_ocrdma_srq(ibsrq);
1969 	status = ocrdma_mbx_query_srq(srq, srq_attr);
1970 	return status;
1971 }
1972 
1973 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1974 {
1975 	int status;
1976 	struct ocrdma_srq *srq;
1977 	struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1978 
1979 	srq = get_ocrdma_srq(ibsrq);
1980 
1981 	status = ocrdma_mbx_destroy_srq(dev, srq);
1982 
1983 	if (srq->pd->uctx)
1984 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1985 				PAGE_ALIGN(srq->rq.len));
1986 
1987 	kfree(srq->idx_bit_fields);
1988 	kfree(srq->rqe_wr_id_tbl);
1989 	kfree(srq);
1990 	return status;
1991 }
1992 
1993 /* unprivileged verbs and their support functions. */
1994 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1995 				struct ocrdma_hdr_wqe *hdr,
1996 				struct ib_send_wr *wr)
1997 {
1998 	struct ocrdma_ewqe_ud_hdr *ud_hdr =
1999 		(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
2000 	struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
2001 
2002 	ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
2003 	if (qp->qp_type == IB_QPT_GSI)
2004 		ud_hdr->qkey = qp->qkey;
2005 	else
2006 		ud_hdr->qkey = wr->wr.ud.remote_qkey;
2007 	ud_hdr->rsvd_ahid = ah->id;
2008 	if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
2009 		hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
2010 }
2011 
2012 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
2013 			      struct ocrdma_sge *sge, int num_sge,
2014 			      struct ib_sge *sg_list)
2015 {
2016 	int i;
2017 
2018 	for (i = 0; i < num_sge; i++) {
2019 		sge[i].lrkey = sg_list[i].lkey;
2020 		sge[i].addr_lo = sg_list[i].addr;
2021 		sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
2022 		sge[i].len = sg_list[i].length;
2023 		hdr->total_len += sg_list[i].length;
2024 	}
2025 	if (num_sge == 0)
2026 		memset(sge, 0, sizeof(*sge));
2027 }
2028 
2029 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
2030 {
2031 	uint32_t total_len = 0, i;
2032 
2033 	for (i = 0; i < num_sge; i++)
2034 		total_len += sg_list[i].length;
2035 	return total_len;
2036 }
2037 
2038 
2039 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
2040 				    struct ocrdma_hdr_wqe *hdr,
2041 				    struct ocrdma_sge *sge,
2042 				    struct ib_send_wr *wr, u32 wqe_size)
2043 {
2044 	int i;
2045 	char *dpp_addr;
2046 
2047 	if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
2048 		hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
2049 		if (unlikely(hdr->total_len > qp->max_inline_data)) {
2050 			pr_err("%s() supported_len=0x%x,\n"
2051 			       " unsupported len req=0x%x\n", __func__,
2052 				qp->max_inline_data, hdr->total_len);
2053 			return -EINVAL;
2054 		}
2055 		dpp_addr = (char *)sge;
2056 		for (i = 0; i < wr->num_sge; i++) {
2057 			memcpy(dpp_addr,
2058 			       (void *)(unsigned long)wr->sg_list[i].addr,
2059 			       wr->sg_list[i].length);
2060 			dpp_addr += wr->sg_list[i].length;
2061 		}
2062 
2063 		wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
2064 		if (0 == hdr->total_len)
2065 			wqe_size += sizeof(struct ocrdma_sge);
2066 		hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2067 	} else {
2068 		ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2069 		if (wr->num_sge)
2070 			wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2071 		else
2072 			wqe_size += sizeof(struct ocrdma_sge);
2073 		hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2074 	}
2075 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2076 	return 0;
2077 }
2078 
2079 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2080 			     struct ib_send_wr *wr)
2081 {
2082 	int status;
2083 	struct ocrdma_sge *sge;
2084 	u32 wqe_size = sizeof(*hdr);
2085 
2086 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2087 		ocrdma_build_ud_hdr(qp, hdr, wr);
2088 		sge = (struct ocrdma_sge *)(hdr + 2);
2089 		wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2090 	} else {
2091 		sge = (struct ocrdma_sge *)(hdr + 1);
2092 	}
2093 
2094 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2095 	return status;
2096 }
2097 
2098 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2099 			      struct ib_send_wr *wr)
2100 {
2101 	int status;
2102 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2103 	struct ocrdma_sge *sge = ext_rw + 1;
2104 	u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2105 
2106 	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2107 	if (status)
2108 		return status;
2109 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2110 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2111 	ext_rw->lrkey = wr->wr.rdma.rkey;
2112 	ext_rw->len = hdr->total_len;
2113 	return 0;
2114 }
2115 
2116 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2117 			      struct ib_send_wr *wr)
2118 {
2119 	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2120 	struct ocrdma_sge *sge = ext_rw + 1;
2121 	u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2122 	    sizeof(struct ocrdma_hdr_wqe);
2123 
2124 	ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2125 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2126 	hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2127 	hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2128 
2129 	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2130 	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2131 	ext_rw->lrkey = wr->wr.rdma.rkey;
2132 	ext_rw->len = hdr->total_len;
2133 }
2134 
2135 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2136 			    struct ocrdma_hw_mr *hwmr)
2137 {
2138 	int i;
2139 	u64 buf_addr = 0;
2140 	int num_pbes;
2141 	struct ocrdma_pbe *pbe;
2142 
2143 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2144 	num_pbes = 0;
2145 
2146 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
2147 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2148 		/* number of pbes can be more for one OS buf, when
2149 		 * buffers are of different sizes.
2150 		 * split the ib_buf to one or more pbes.
2151 		 */
2152 		buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2153 		pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2154 		pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2155 		num_pbes += 1;
2156 		pbe++;
2157 
2158 		/* if the pbl is full storing the pbes,
2159 		 * move to next pbl.
2160 		*/
2161 		if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2162 			pbl_tbl++;
2163 			pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2164 		}
2165 	}
2166 	return;
2167 }
2168 
2169 static int get_encoded_page_size(int pg_sz)
2170 {
2171 	/* Max size is 256M 4096 << 16 */
2172 	int i = 0;
2173 	for (; i < 17; i++)
2174 		if (pg_sz == (4096 << i))
2175 			break;
2176 	return i;
2177 }
2178 
2179 
2180 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2181 			   struct ib_send_wr *wr)
2182 {
2183 	u64 fbo;
2184 	struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2185 	struct ocrdma_mr *mr;
2186 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2187 	u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2188 
2189 	wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2190 
2191 	if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2192 		return -EINVAL;
2193 
2194 	hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2195 	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2196 
2197 	if (wr->wr.fast_reg.page_list_len == 0)
2198 		BUG();
2199 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2200 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2201 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2202 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2203 	if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2204 		hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2205 	hdr->lkey = wr->wr.fast_reg.rkey;
2206 	hdr->total_len = wr->wr.fast_reg.length;
2207 
2208 	fbo = wr->wr.fast_reg.iova_start -
2209 	    (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2210 
2211 	fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2212 	fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2213 	fast_reg->fbo_hi = upper_32_bits(fbo);
2214 	fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2215 	fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2216 	fast_reg->size_sge =
2217 		get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2218 	mr = (struct ocrdma_mr *) (unsigned long)
2219 		dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2220 	build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2221 	return 0;
2222 }
2223 
2224 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2225 {
2226 	u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2227 
2228 	iowrite32(val, qp->sq_db);
2229 }
2230 
2231 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2232 		     struct ib_send_wr **bad_wr)
2233 {
2234 	int status = 0;
2235 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2236 	struct ocrdma_hdr_wqe *hdr;
2237 	unsigned long flags;
2238 
2239 	spin_lock_irqsave(&qp->q_lock, flags);
2240 	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2241 		spin_unlock_irqrestore(&qp->q_lock, flags);
2242 		*bad_wr = wr;
2243 		return -EINVAL;
2244 	}
2245 
2246 	while (wr) {
2247 		if (qp->qp_type == IB_QPT_UD &&
2248 		    (wr->opcode != IB_WR_SEND &&
2249 		     wr->opcode != IB_WR_SEND_WITH_IMM)) {
2250 			*bad_wr = wr;
2251 			status = -EINVAL;
2252 			break;
2253 		}
2254 		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2255 		    wr->num_sge > qp->sq.max_sges) {
2256 			*bad_wr = wr;
2257 			status = -ENOMEM;
2258 			break;
2259 		}
2260 		hdr = ocrdma_hwq_head(&qp->sq);
2261 		hdr->cw = 0;
2262 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2263 			hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2264 		if (wr->send_flags & IB_SEND_FENCE)
2265 			hdr->cw |=
2266 			    (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2267 		if (wr->send_flags & IB_SEND_SOLICITED)
2268 			hdr->cw |=
2269 			    (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2270 		hdr->total_len = 0;
2271 		switch (wr->opcode) {
2272 		case IB_WR_SEND_WITH_IMM:
2273 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2274 			hdr->immdt = ntohl(wr->ex.imm_data);
2275 		case IB_WR_SEND:
2276 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2277 			ocrdma_build_send(qp, hdr, wr);
2278 			break;
2279 		case IB_WR_SEND_WITH_INV:
2280 			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2281 			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2282 			hdr->lkey = wr->ex.invalidate_rkey;
2283 			status = ocrdma_build_send(qp, hdr, wr);
2284 			break;
2285 		case IB_WR_RDMA_WRITE_WITH_IMM:
2286 			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2287 			hdr->immdt = ntohl(wr->ex.imm_data);
2288 		case IB_WR_RDMA_WRITE:
2289 			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2290 			status = ocrdma_build_write(qp, hdr, wr);
2291 			break;
2292 		case IB_WR_RDMA_READ:
2293 			ocrdma_build_read(qp, hdr, wr);
2294 			break;
2295 		case IB_WR_LOCAL_INV:
2296 			hdr->cw |=
2297 			    (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2298 			hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2299 					sizeof(struct ocrdma_sge)) /
2300 				OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2301 			hdr->lkey = wr->ex.invalidate_rkey;
2302 			break;
2303 		case IB_WR_FAST_REG_MR:
2304 			status = ocrdma_build_fr(qp, hdr, wr);
2305 			break;
2306 		default:
2307 			status = -EINVAL;
2308 			break;
2309 		}
2310 		if (status) {
2311 			*bad_wr = wr;
2312 			break;
2313 		}
2314 		if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2315 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2316 		else
2317 			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2318 		qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2319 		ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2320 				   OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2321 		/* make sure wqe is written before adapter can access it */
2322 		wmb();
2323 		/* inform hw to start processing it */
2324 		ocrdma_ring_sq_db(qp);
2325 
2326 		/* update pointer, counter for next wr */
2327 		ocrdma_hwq_inc_head(&qp->sq);
2328 		wr = wr->next;
2329 	}
2330 	spin_unlock_irqrestore(&qp->q_lock, flags);
2331 	return status;
2332 }
2333 
2334 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2335 {
2336 	u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2337 
2338 	iowrite32(val, qp->rq_db);
2339 }
2340 
2341 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2342 			     u16 tag)
2343 {
2344 	u32 wqe_size = 0;
2345 	struct ocrdma_sge *sge;
2346 	if (wr->num_sge)
2347 		wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2348 	else
2349 		wqe_size = sizeof(*sge) + sizeof(*rqe);
2350 
2351 	rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2352 				OCRDMA_WQE_SIZE_SHIFT);
2353 	rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2354 	rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2355 	rqe->total_len = 0;
2356 	rqe->rsvd_tag = tag;
2357 	sge = (struct ocrdma_sge *)(rqe + 1);
2358 	ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2359 	ocrdma_cpu_to_le32(rqe, wqe_size);
2360 }
2361 
2362 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2363 		     struct ib_recv_wr **bad_wr)
2364 {
2365 	int status = 0;
2366 	unsigned long flags;
2367 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2368 	struct ocrdma_hdr_wqe *rqe;
2369 
2370 	spin_lock_irqsave(&qp->q_lock, flags);
2371 	if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2372 		spin_unlock_irqrestore(&qp->q_lock, flags);
2373 		*bad_wr = wr;
2374 		return -EINVAL;
2375 	}
2376 	while (wr) {
2377 		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2378 		    wr->num_sge > qp->rq.max_sges) {
2379 			*bad_wr = wr;
2380 			status = -ENOMEM;
2381 			break;
2382 		}
2383 		rqe = ocrdma_hwq_head(&qp->rq);
2384 		ocrdma_build_rqe(rqe, wr, 0);
2385 
2386 		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2387 		/* make sure rqe is written before adapter can access it */
2388 		wmb();
2389 
2390 		/* inform hw to start processing it */
2391 		ocrdma_ring_rq_db(qp);
2392 
2393 		/* update pointer, counter for next wr */
2394 		ocrdma_hwq_inc_head(&qp->rq);
2395 		wr = wr->next;
2396 	}
2397 	spin_unlock_irqrestore(&qp->q_lock, flags);
2398 	return status;
2399 }
2400 
2401 /* cqe for srq's rqe can potentially arrive out of order.
2402  * index gives the entry in the shadow table where to store
2403  * the wr_id. tag/index is returned in cqe to reference back
2404  * for a given rqe.
2405  */
2406 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2407 {
2408 	int row = 0;
2409 	int indx = 0;
2410 
2411 	for (row = 0; row < srq->bit_fields_len; row++) {
2412 		if (srq->idx_bit_fields[row]) {
2413 			indx = ffs(srq->idx_bit_fields[row]);
2414 			indx = (row * 32) + (indx - 1);
2415 			if (indx >= srq->rq.max_cnt)
2416 				BUG();
2417 			ocrdma_srq_toggle_bit(srq, indx);
2418 			break;
2419 		}
2420 	}
2421 
2422 	if (row == srq->bit_fields_len)
2423 		BUG();
2424 	return indx + 1; /* Use from index 1 */
2425 }
2426 
2427 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2428 {
2429 	u32 val = srq->rq.dbid | (1 << 16);
2430 
2431 	iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2432 }
2433 
2434 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2435 			 struct ib_recv_wr **bad_wr)
2436 {
2437 	int status = 0;
2438 	unsigned long flags;
2439 	struct ocrdma_srq *srq;
2440 	struct ocrdma_hdr_wqe *rqe;
2441 	u16 tag;
2442 
2443 	srq = get_ocrdma_srq(ibsrq);
2444 
2445 	spin_lock_irqsave(&srq->q_lock, flags);
2446 	while (wr) {
2447 		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2448 		    wr->num_sge > srq->rq.max_sges) {
2449 			status = -ENOMEM;
2450 			*bad_wr = wr;
2451 			break;
2452 		}
2453 		tag = ocrdma_srq_get_idx(srq);
2454 		rqe = ocrdma_hwq_head(&srq->rq);
2455 		ocrdma_build_rqe(rqe, wr, tag);
2456 
2457 		srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2458 		/* make sure rqe is written before adapter can perform DMA */
2459 		wmb();
2460 		/* inform hw to start processing it */
2461 		ocrdma_ring_srq_db(srq);
2462 		/* update pointer, counter for next wr */
2463 		ocrdma_hwq_inc_head(&srq->rq);
2464 		wr = wr->next;
2465 	}
2466 	spin_unlock_irqrestore(&srq->q_lock, flags);
2467 	return status;
2468 }
2469 
2470 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2471 {
2472 	enum ib_wc_status ibwc_status;
2473 
2474 	switch (status) {
2475 	case OCRDMA_CQE_GENERAL_ERR:
2476 		ibwc_status = IB_WC_GENERAL_ERR;
2477 		break;
2478 	case OCRDMA_CQE_LOC_LEN_ERR:
2479 		ibwc_status = IB_WC_LOC_LEN_ERR;
2480 		break;
2481 	case OCRDMA_CQE_LOC_QP_OP_ERR:
2482 		ibwc_status = IB_WC_LOC_QP_OP_ERR;
2483 		break;
2484 	case OCRDMA_CQE_LOC_EEC_OP_ERR:
2485 		ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2486 		break;
2487 	case OCRDMA_CQE_LOC_PROT_ERR:
2488 		ibwc_status = IB_WC_LOC_PROT_ERR;
2489 		break;
2490 	case OCRDMA_CQE_WR_FLUSH_ERR:
2491 		ibwc_status = IB_WC_WR_FLUSH_ERR;
2492 		break;
2493 	case OCRDMA_CQE_MW_BIND_ERR:
2494 		ibwc_status = IB_WC_MW_BIND_ERR;
2495 		break;
2496 	case OCRDMA_CQE_BAD_RESP_ERR:
2497 		ibwc_status = IB_WC_BAD_RESP_ERR;
2498 		break;
2499 	case OCRDMA_CQE_LOC_ACCESS_ERR:
2500 		ibwc_status = IB_WC_LOC_ACCESS_ERR;
2501 		break;
2502 	case OCRDMA_CQE_REM_INV_REQ_ERR:
2503 		ibwc_status = IB_WC_REM_INV_REQ_ERR;
2504 		break;
2505 	case OCRDMA_CQE_REM_ACCESS_ERR:
2506 		ibwc_status = IB_WC_REM_ACCESS_ERR;
2507 		break;
2508 	case OCRDMA_CQE_REM_OP_ERR:
2509 		ibwc_status = IB_WC_REM_OP_ERR;
2510 		break;
2511 	case OCRDMA_CQE_RETRY_EXC_ERR:
2512 		ibwc_status = IB_WC_RETRY_EXC_ERR;
2513 		break;
2514 	case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2515 		ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2516 		break;
2517 	case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2518 		ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2519 		break;
2520 	case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2521 		ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2522 		break;
2523 	case OCRDMA_CQE_REM_ABORT_ERR:
2524 		ibwc_status = IB_WC_REM_ABORT_ERR;
2525 		break;
2526 	case OCRDMA_CQE_INV_EECN_ERR:
2527 		ibwc_status = IB_WC_INV_EECN_ERR;
2528 		break;
2529 	case OCRDMA_CQE_INV_EEC_STATE_ERR:
2530 		ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2531 		break;
2532 	case OCRDMA_CQE_FATAL_ERR:
2533 		ibwc_status = IB_WC_FATAL_ERR;
2534 		break;
2535 	case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2536 		ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2537 		break;
2538 	default:
2539 		ibwc_status = IB_WC_GENERAL_ERR;
2540 		break;
2541 	}
2542 	return ibwc_status;
2543 }
2544 
2545 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2546 		      u32 wqe_idx)
2547 {
2548 	struct ocrdma_hdr_wqe *hdr;
2549 	struct ocrdma_sge *rw;
2550 	int opcode;
2551 
2552 	hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2553 
2554 	ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2555 	/* Undo the hdr->cw swap */
2556 	opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2557 	switch (opcode) {
2558 	case OCRDMA_WRITE:
2559 		ibwc->opcode = IB_WC_RDMA_WRITE;
2560 		break;
2561 	case OCRDMA_READ:
2562 		rw = (struct ocrdma_sge *)(hdr + 1);
2563 		ibwc->opcode = IB_WC_RDMA_READ;
2564 		ibwc->byte_len = rw->len;
2565 		break;
2566 	case OCRDMA_SEND:
2567 		ibwc->opcode = IB_WC_SEND;
2568 		break;
2569 	case OCRDMA_FR_MR:
2570 		ibwc->opcode = IB_WC_FAST_REG_MR;
2571 		break;
2572 	case OCRDMA_LKEY_INV:
2573 		ibwc->opcode = IB_WC_LOCAL_INV;
2574 		break;
2575 	default:
2576 		ibwc->status = IB_WC_GENERAL_ERR;
2577 		pr_err("%s() invalid opcode received = 0x%x\n",
2578 		       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2579 		break;
2580 	}
2581 }
2582 
2583 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2584 						struct ocrdma_cqe *cqe)
2585 {
2586 	if (is_cqe_for_sq(cqe)) {
2587 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2588 				cqe->flags_status_srcqpn) &
2589 					~OCRDMA_CQE_STATUS_MASK);
2590 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2591 				cqe->flags_status_srcqpn) |
2592 				(OCRDMA_CQE_WR_FLUSH_ERR <<
2593 					OCRDMA_CQE_STATUS_SHIFT));
2594 	} else {
2595 		if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2596 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2597 					cqe->flags_status_srcqpn) &
2598 						~OCRDMA_CQE_UD_STATUS_MASK);
2599 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2600 					cqe->flags_status_srcqpn) |
2601 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2602 						OCRDMA_CQE_UD_STATUS_SHIFT));
2603 		} else {
2604 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2605 					cqe->flags_status_srcqpn) &
2606 						~OCRDMA_CQE_STATUS_MASK);
2607 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2608 					cqe->flags_status_srcqpn) |
2609 					(OCRDMA_CQE_WR_FLUSH_ERR <<
2610 						OCRDMA_CQE_STATUS_SHIFT));
2611 		}
2612 	}
2613 }
2614 
2615 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2616 				  struct ocrdma_qp *qp, int status)
2617 {
2618 	bool expand = false;
2619 
2620 	ibwc->byte_len = 0;
2621 	ibwc->qp = &qp->ibqp;
2622 	ibwc->status = ocrdma_to_ibwc_err(status);
2623 
2624 	ocrdma_flush_qp(qp);
2625 	ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2626 
2627 	/* if wqe/rqe pending for which cqe needs to be returned,
2628 	 * trigger inflating it.
2629 	 */
2630 	if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2631 		expand = true;
2632 		ocrdma_set_cqe_status_flushed(qp, cqe);
2633 	}
2634 	return expand;
2635 }
2636 
2637 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2638 				  struct ocrdma_qp *qp, int status)
2639 {
2640 	ibwc->opcode = IB_WC_RECV;
2641 	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2642 	ocrdma_hwq_inc_tail(&qp->rq);
2643 
2644 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2645 }
2646 
2647 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2648 				  struct ocrdma_qp *qp, int status)
2649 {
2650 	ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2651 	ocrdma_hwq_inc_tail(&qp->sq);
2652 
2653 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2654 }
2655 
2656 
2657 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2658 				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2659 				 bool *polled, bool *stop)
2660 {
2661 	bool expand;
2662 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2663 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2664 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2665 	if (status < OCRDMA_MAX_CQE_ERR)
2666 		atomic_inc(&dev->cqe_err_stats[status]);
2667 
2668 	/* when hw sq is empty, but rq is not empty, so we continue
2669 	 * to keep the cqe in order to get the cq event again.
2670 	 */
2671 	if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2672 		/* when cq for rq and sq is same, it is safe to return
2673 		 * flush cqe for RQEs.
2674 		 */
2675 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2676 			*polled = true;
2677 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2678 			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2679 		} else {
2680 			/* stop processing further cqe as this cqe is used for
2681 			 * triggering cq event on buddy cq of RQ.
2682 			 * When QP is destroyed, this cqe will be removed
2683 			 * from the cq's hardware q.
2684 			 */
2685 			*polled = false;
2686 			*stop = true;
2687 			expand = false;
2688 		}
2689 	} else if (is_hw_sq_empty(qp)) {
2690 		/* Do nothing */
2691 		expand = false;
2692 		*polled = false;
2693 		*stop = false;
2694 	} else {
2695 		*polled = true;
2696 		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2697 	}
2698 	return expand;
2699 }
2700 
2701 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2702 				     struct ocrdma_cqe *cqe,
2703 				     struct ib_wc *ibwc, bool *polled)
2704 {
2705 	bool expand = false;
2706 	int tail = qp->sq.tail;
2707 	u32 wqe_idx;
2708 
2709 	if (!qp->wqe_wr_id_tbl[tail].signaled) {
2710 		*polled = false;    /* WC cannot be consumed yet */
2711 	} else {
2712 		ibwc->status = IB_WC_SUCCESS;
2713 		ibwc->wc_flags = 0;
2714 		ibwc->qp = &qp->ibqp;
2715 		ocrdma_update_wc(qp, ibwc, tail);
2716 		*polled = true;
2717 	}
2718 	wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2719 			OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2720 	if (tail != wqe_idx)
2721 		expand = true; /* Coalesced CQE can't be consumed yet */
2722 
2723 	ocrdma_hwq_inc_tail(&qp->sq);
2724 	return expand;
2725 }
2726 
2727 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2728 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2729 {
2730 	int status;
2731 	bool expand;
2732 
2733 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2734 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2735 
2736 	if (status == OCRDMA_CQE_SUCCESS)
2737 		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2738 	else
2739 		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2740 	return expand;
2741 }
2742 
2743 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2744 {
2745 	int status;
2746 
2747 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2748 		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2749 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2750 						OCRDMA_CQE_SRCQP_MASK;
2751 	ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2752 						OCRDMA_CQE_PKEY_MASK;
2753 	ibwc->wc_flags = IB_WC_GRH;
2754 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2755 					OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2756 	return status;
2757 }
2758 
2759 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2760 				       struct ocrdma_cqe *cqe,
2761 				       struct ocrdma_qp *qp)
2762 {
2763 	unsigned long flags;
2764 	struct ocrdma_srq *srq;
2765 	u32 wqe_idx;
2766 
2767 	srq = get_ocrdma_srq(qp->ibqp.srq);
2768 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2769 		OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2770 	if (wqe_idx < 1)
2771 		BUG();
2772 
2773 	ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2774 	spin_lock_irqsave(&srq->q_lock, flags);
2775 	ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2776 	spin_unlock_irqrestore(&srq->q_lock, flags);
2777 	ocrdma_hwq_inc_tail(&srq->rq);
2778 }
2779 
2780 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2781 				struct ib_wc *ibwc, bool *polled, bool *stop,
2782 				int status)
2783 {
2784 	bool expand;
2785 	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2786 
2787 	if (status < OCRDMA_MAX_CQE_ERR)
2788 		atomic_inc(&dev->cqe_err_stats[status]);
2789 
2790 	/* when hw_rq is empty, but wq is not empty, so continue
2791 	 * to keep the cqe to get the cq event again.
2792 	 */
2793 	if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2794 		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2795 			*polled = true;
2796 			status = OCRDMA_CQE_WR_FLUSH_ERR;
2797 			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2798 		} else {
2799 			*polled = false;
2800 			*stop = true;
2801 			expand = false;
2802 		}
2803 	} else if (is_hw_rq_empty(qp)) {
2804 		/* Do nothing */
2805 		expand = false;
2806 		*polled = false;
2807 		*stop = false;
2808 	} else {
2809 		*polled = true;
2810 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2811 	}
2812 	return expand;
2813 }
2814 
2815 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2816 				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2817 {
2818 	ibwc->opcode = IB_WC_RECV;
2819 	ibwc->qp = &qp->ibqp;
2820 	ibwc->status = IB_WC_SUCCESS;
2821 
2822 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2823 		ocrdma_update_ud_rcqe(ibwc, cqe);
2824 	else
2825 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2826 
2827 	if (is_cqe_imm(cqe)) {
2828 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2829 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2830 	} else if (is_cqe_wr_imm(cqe)) {
2831 		ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2832 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2833 		ibwc->wc_flags |= IB_WC_WITH_IMM;
2834 	} else if (is_cqe_invalidated(cqe)) {
2835 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2836 		ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2837 	}
2838 	if (qp->ibqp.srq) {
2839 		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2840 	} else {
2841 		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2842 		ocrdma_hwq_inc_tail(&qp->rq);
2843 	}
2844 }
2845 
2846 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2847 			     struct ib_wc *ibwc, bool *polled, bool *stop)
2848 {
2849 	int status;
2850 	bool expand = false;
2851 
2852 	ibwc->wc_flags = 0;
2853 	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2854 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2855 					OCRDMA_CQE_UD_STATUS_MASK) >>
2856 					OCRDMA_CQE_UD_STATUS_SHIFT;
2857 	} else {
2858 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2859 			     OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2860 	}
2861 
2862 	if (status == OCRDMA_CQE_SUCCESS) {
2863 		*polled = true;
2864 		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2865 	} else {
2866 		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2867 					      status);
2868 	}
2869 	return expand;
2870 }
2871 
2872 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2873 				   u16 cur_getp)
2874 {
2875 	if (cq->phase_change) {
2876 		if (cur_getp == 0)
2877 			cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2878 	} else {
2879 		/* clear valid bit */
2880 		cqe->flags_status_srcqpn = 0;
2881 	}
2882 }
2883 
2884 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2885 			    struct ib_wc *ibwc)
2886 {
2887 	u16 qpn = 0;
2888 	int i = 0;
2889 	bool expand = false;
2890 	int polled_hw_cqes = 0;
2891 	struct ocrdma_qp *qp = NULL;
2892 	struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2893 	struct ocrdma_cqe *cqe;
2894 	u16 cur_getp; bool polled = false; bool stop = false;
2895 
2896 	cur_getp = cq->getp;
2897 	while (num_entries) {
2898 		cqe = cq->va + cur_getp;
2899 		/* check whether valid cqe or not */
2900 		if (!is_cqe_valid(cq, cqe))
2901 			break;
2902 		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2903 		/* ignore discarded cqe */
2904 		if (qpn == 0)
2905 			goto skip_cqe;
2906 		qp = dev->qp_tbl[qpn];
2907 		BUG_ON(qp == NULL);
2908 
2909 		if (is_cqe_for_sq(cqe)) {
2910 			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2911 						  &stop);
2912 		} else {
2913 			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2914 						  &stop);
2915 		}
2916 		if (expand)
2917 			goto expand_cqe;
2918 		if (stop)
2919 			goto stop_cqe;
2920 		/* clear qpn to avoid duplicate processing by discard_cqe() */
2921 		cqe->cmn.qpn = 0;
2922 skip_cqe:
2923 		polled_hw_cqes += 1;
2924 		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2925 		ocrdma_change_cq_phase(cq, cqe, cur_getp);
2926 expand_cqe:
2927 		if (polled) {
2928 			num_entries -= 1;
2929 			i += 1;
2930 			ibwc = ibwc + 1;
2931 			polled = false;
2932 		}
2933 	}
2934 stop_cqe:
2935 	cq->getp = cur_getp;
2936 	if (cq->deferred_arm) {
2937 		ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2938 				  polled_hw_cqes);
2939 		cq->deferred_arm = false;
2940 		cq->deferred_sol = false;
2941 	} else {
2942 		/* We need to pop the CQE. No need to arm */
2943 		ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2944 				  polled_hw_cqes);
2945 		cq->deferred_sol = false;
2946 	}
2947 
2948 	return i;
2949 }
2950 
2951 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2952 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2953 			      struct ocrdma_qp *qp, struct ib_wc *ibwc)
2954 {
2955 	int err_cqes = 0;
2956 
2957 	while (num_entries) {
2958 		if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2959 			break;
2960 		if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2961 			ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2962 			ocrdma_hwq_inc_tail(&qp->sq);
2963 		} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2964 			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2965 			ocrdma_hwq_inc_tail(&qp->rq);
2966 		} else {
2967 			return err_cqes;
2968 		}
2969 		ibwc->byte_len = 0;
2970 		ibwc->status = IB_WC_WR_FLUSH_ERR;
2971 		ibwc = ibwc + 1;
2972 		err_cqes += 1;
2973 		num_entries -= 1;
2974 	}
2975 	return err_cqes;
2976 }
2977 
2978 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2979 {
2980 	int cqes_to_poll = num_entries;
2981 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2982 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2983 	int num_os_cqe = 0, err_cqes = 0;
2984 	struct ocrdma_qp *qp;
2985 	unsigned long flags;
2986 
2987 	/* poll cqes from adapter CQ */
2988 	spin_lock_irqsave(&cq->cq_lock, flags);
2989 	num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2990 	spin_unlock_irqrestore(&cq->cq_lock, flags);
2991 	cqes_to_poll -= num_os_cqe;
2992 
2993 	if (cqes_to_poll) {
2994 		wc = wc + num_os_cqe;
2995 		/* adapter returns single error cqe when qp moves to
2996 		 * error state. So insert error cqes with wc_status as
2997 		 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2998 		 * respectively which uses this CQ.
2999 		 */
3000 		spin_lock_irqsave(&dev->flush_q_lock, flags);
3001 		list_for_each_entry(qp, &cq->sq_head, sq_entry) {
3002 			if (cqes_to_poll == 0)
3003 				break;
3004 			err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
3005 			cqes_to_poll -= err_cqes;
3006 			num_os_cqe += err_cqes;
3007 			wc = wc + err_cqes;
3008 		}
3009 		spin_unlock_irqrestore(&dev->flush_q_lock, flags);
3010 	}
3011 	return num_os_cqe;
3012 }
3013 
3014 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
3015 {
3016 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
3017 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
3018 	u16 cq_id;
3019 	unsigned long flags;
3020 	bool arm_needed = false, sol_needed = false;
3021 
3022 	cq_id = cq->id;
3023 
3024 	spin_lock_irqsave(&cq->cq_lock, flags);
3025 	if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
3026 		arm_needed = true;
3027 	if (cq_flags & IB_CQ_SOLICITED)
3028 		sol_needed = true;
3029 
3030 	if (cq->first_arm) {
3031 		ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3032 		cq->first_arm = false;
3033 	}
3034 
3035 	cq->deferred_arm = true;
3036 	cq->deferred_sol = sol_needed;
3037 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3038 
3039 	return 0;
3040 }
3041 
3042 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd,
3043 			      enum ib_mr_type mr_type,
3044 			      u32 max_num_sg)
3045 {
3046 	int status;
3047 	struct ocrdma_mr *mr;
3048 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3049 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3050 
3051 	if (mr_type != IB_MR_TYPE_MEM_REG)
3052 		return ERR_PTR(-EINVAL);
3053 
3054 	if (max_num_sg > dev->attr.max_pages_per_frmr)
3055 		return ERR_PTR(-EINVAL);
3056 
3057 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3058 	if (!mr)
3059 		return ERR_PTR(-ENOMEM);
3060 
3061 	status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
3062 	if (status)
3063 		goto pbl_err;
3064 	mr->hwmr.fr_mr = 1;
3065 	mr->hwmr.remote_rd = 0;
3066 	mr->hwmr.remote_wr = 0;
3067 	mr->hwmr.local_rd = 0;
3068 	mr->hwmr.local_wr = 0;
3069 	mr->hwmr.mw_bind = 0;
3070 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3071 	if (status)
3072 		goto pbl_err;
3073 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3074 	if (status)
3075 		goto mbx_err;
3076 	mr->ibmr.rkey = mr->hwmr.lkey;
3077 	mr->ibmr.lkey = mr->hwmr.lkey;
3078 	dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3079 		(unsigned long) mr;
3080 	return &mr->ibmr;
3081 mbx_err:
3082 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3083 pbl_err:
3084 	kfree(mr);
3085 	return ERR_PTR(-ENOMEM);
3086 }
3087 
3088 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3089 							  *ibdev,
3090 							  int page_list_len)
3091 {
3092 	struct ib_fast_reg_page_list *frmr_list;
3093 	int size;
3094 
3095 	size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3096 	frmr_list = kzalloc(size, GFP_KERNEL);
3097 	if (!frmr_list)
3098 		return ERR_PTR(-ENOMEM);
3099 	frmr_list->page_list = (u64 *)(frmr_list + 1);
3100 	return frmr_list;
3101 }
3102 
3103 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3104 {
3105 	kfree(page_list);
3106 }
3107 
3108 #define MAX_KERNEL_PBE_SIZE 65536
3109 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3110 				    int buf_cnt, u32 *pbe_size)
3111 {
3112 	u64 total_size = 0;
3113 	u64 buf_size = 0;
3114 	int i;
3115 	*pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3116 	*pbe_size = roundup_pow_of_two(*pbe_size);
3117 
3118 	/* find the smallest PBE size that we can have */
3119 	for (i = 0; i < buf_cnt; i++) {
3120 		/* first addr may not be page aligned, so ignore checking */
3121 		if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3122 				 (buf_list[i].size & ~PAGE_MASK))) {
3123 			return 0;
3124 		}
3125 
3126 		/* if configured PBE size is greater then the chosen one,
3127 		 * reduce the PBE size.
3128 		 */
3129 		buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3130 		/* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3131 		buf_size = roundup_pow_of_two(buf_size);
3132 		if (*pbe_size > buf_size)
3133 			*pbe_size = buf_size;
3134 
3135 		total_size += buf_size;
3136 	}
3137 	*pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3138 	    (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3139 
3140 	/* num_pbes = total_size / (*pbe_size);  this is implemented below. */
3141 
3142 	return total_size >> ilog2(*pbe_size);
3143 }
3144 
3145 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3146 			      u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3147 			      struct ocrdma_hw_mr *hwmr)
3148 {
3149 	int i;
3150 	int idx;
3151 	int pbes_per_buf = 0;
3152 	u64 buf_addr = 0;
3153 	int num_pbes;
3154 	struct ocrdma_pbe *pbe;
3155 	int total_num_pbes = 0;
3156 
3157 	if (!hwmr->num_pbes)
3158 		return;
3159 
3160 	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3161 	num_pbes = 0;
3162 
3163 	/* go through the OS phy regions & fill hw pbe entries into pbls. */
3164 	for (i = 0; i < ib_buf_cnt; i++) {
3165 		buf_addr = buf_list[i].addr;
3166 		pbes_per_buf =
3167 		    roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3168 		    pbe_size;
3169 		hwmr->len += buf_list[i].size;
3170 		/* number of pbes can be more for one OS buf, when
3171 		 * buffers are of different sizes.
3172 		 * split the ib_buf to one or more pbes.
3173 		 */
3174 		for (idx = 0; idx < pbes_per_buf; idx++) {
3175 			/* we program always page aligned addresses,
3176 			 * first unaligned address is taken care by fbo.
3177 			 */
3178 			if (i == 0) {
3179 				/* for non zero fbo, assign the
3180 				 * start of the page.
3181 				 */
3182 				pbe->pa_lo =
3183 				    cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3184 				pbe->pa_hi =
3185 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
3186 			} else {
3187 				pbe->pa_lo =
3188 				    cpu_to_le32((u32) (buf_addr & 0xffffffff));
3189 				pbe->pa_hi =
3190 				    cpu_to_le32((u32) upper_32_bits(buf_addr));
3191 			}
3192 			buf_addr += pbe_size;
3193 			num_pbes += 1;
3194 			total_num_pbes += 1;
3195 			pbe++;
3196 
3197 			if (total_num_pbes == hwmr->num_pbes)
3198 				goto mr_tbl_done;
3199 			/* if the pbl is full storing the pbes,
3200 			 * move to next pbl.
3201 			 */
3202 			if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3203 				pbl_tbl++;
3204 				pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3205 				num_pbes = 0;
3206 			}
3207 		}
3208 	}
3209 mr_tbl_done:
3210 	return;
3211 }
3212 
3213 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3214 				   struct ib_phys_buf *buf_list,
3215 				   int buf_cnt, int acc, u64 *iova_start)
3216 {
3217 	int status = -ENOMEM;
3218 	struct ocrdma_mr *mr;
3219 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3220 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3221 	u32 num_pbes;
3222 	u32 pbe_size = 0;
3223 
3224 	if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3225 		return ERR_PTR(-EINVAL);
3226 
3227 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3228 	if (!mr)
3229 		return ERR_PTR(status);
3230 
3231 	num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3232 	if (num_pbes == 0) {
3233 		status = -EINVAL;
3234 		goto pbl_err;
3235 	}
3236 	status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3237 	if (status)
3238 		goto pbl_err;
3239 
3240 	mr->hwmr.pbe_size = pbe_size;
3241 	mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3242 	mr->hwmr.va = *iova_start;
3243 	mr->hwmr.local_rd = 1;
3244 	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3245 	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3246 	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3247 	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3248 	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3249 
3250 	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3251 	if (status)
3252 		goto pbl_err;
3253 	build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3254 			  &mr->hwmr);
3255 	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3256 	if (status)
3257 		goto mbx_err;
3258 
3259 	mr->ibmr.lkey = mr->hwmr.lkey;
3260 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3261 		mr->ibmr.rkey = mr->hwmr.lkey;
3262 	return &mr->ibmr;
3263 
3264 mbx_err:
3265 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3266 pbl_err:
3267 	kfree(mr);
3268 	return ERR_PTR(status);
3269 }
3270