xref: /linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c (revision cfda8617e22a8bf217a613d0b3ba3a38778443ba)
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52 
53 #include "pvrdma.h"
54 
55 static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
56 				struct pvrdma_qp *qp);
57 
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
59 			   struct pvrdma_cq **recv_cq)
60 {
61 	*send_cq = to_vcq(qp->ibqp.send_cq);
62 	*recv_cq = to_vcq(qp->ibqp.recv_cq);
63 }
64 
65 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
66 			    unsigned long *scq_flags,
67 			    unsigned long *rcq_flags)
68 	__acquires(scq->cq_lock) __acquires(rcq->cq_lock)
69 {
70 	if (scq == rcq) {
71 		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
72 		__acquire(rcq->cq_lock);
73 	} else if (scq->cq_handle < rcq->cq_handle) {
74 		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
75 		spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
76 					 SINGLE_DEPTH_NESTING);
77 	} else {
78 		spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
79 		spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
80 					 SINGLE_DEPTH_NESTING);
81 	}
82 }
83 
84 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
85 			      unsigned long *scq_flags,
86 			      unsigned long *rcq_flags)
87 	__releases(scq->cq_lock) __releases(rcq->cq_lock)
88 {
89 	if (scq == rcq) {
90 		__release(rcq->cq_lock);
91 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
92 	} else if (scq->cq_handle < rcq->cq_handle) {
93 		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
94 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
95 	} else {
96 		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
97 		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
98 	}
99 }
100 
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp)
102 {
103 	struct pvrdma_cq *scq, *rcq;
104 	unsigned long scq_flags, rcq_flags;
105 
106 	/* Clean up cqes */
107 	get_cqs(qp, &scq, &rcq);
108 	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
109 
110 	_pvrdma_flush_cqe(qp, scq);
111 	if (scq != rcq)
112 		_pvrdma_flush_cqe(qp, rcq);
113 
114 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
115 
116 	/*
117 	 * Reset queuepair. The checks are because usermode queuepairs won't
118 	 * have kernel ringstates.
119 	 */
120 	if (qp->rq.ring) {
121 		atomic_set(&qp->rq.ring->cons_head, 0);
122 		atomic_set(&qp->rq.ring->prod_tail, 0);
123 	}
124 	if (qp->sq.ring) {
125 		atomic_set(&qp->sq.ring->cons_head, 0);
126 		atomic_set(&qp->sq.ring->prod_tail, 0);
127 	}
128 }
129 
130 static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
131 			      struct ib_qp_cap *req_cap,
132 			      struct pvrdma_qp *qp)
133 {
134 	if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
135 	    req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
136 		dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
137 		return -EINVAL;
138 	}
139 
140 	qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
141 	qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
142 
143 	/* Write back */
144 	req_cap->max_recv_wr = qp->rq.wqe_cnt;
145 	req_cap->max_recv_sge = qp->rq.max_sg;
146 
147 	qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
148 					     sizeof(struct pvrdma_sge) *
149 					     qp->rq.max_sg);
150 	qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
151 			  PAGE_SIZE;
152 
153 	return 0;
154 }
155 
156 static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
157 			      struct pvrdma_qp *qp)
158 {
159 	if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
160 	    req_cap->max_send_sge > dev->dsr->caps.max_sge) {
161 		dev_warn(&dev->pdev->dev, "send queue size invalid\n");
162 		return -EINVAL;
163 	}
164 
165 	qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
166 	qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
167 
168 	/* Write back */
169 	req_cap->max_send_wr = qp->sq.wqe_cnt;
170 	req_cap->max_send_sge = qp->sq.max_sg;
171 
172 	qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
173 					     sizeof(struct pvrdma_sge) *
174 					     qp->sq.max_sg);
175 	/* Note: one extra page for the header. */
176 	qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
177 			  (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
178 								PAGE_SIZE;
179 
180 	return 0;
181 }
182 
183 /**
184  * pvrdma_create_qp - create queue pair
185  * @pd: protection domain
186  * @init_attr: queue pair attributes
187  * @udata: user data
188  *
189  * @return: the ib_qp pointer on success, otherwise returns an errno.
190  */
191 struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
192 			       struct ib_qp_init_attr *init_attr,
193 			       struct ib_udata *udata)
194 {
195 	struct pvrdma_qp *qp = NULL;
196 	struct pvrdma_dev *dev = to_vdev(pd->device);
197 	union pvrdma_cmd_req req;
198 	union pvrdma_cmd_resp rsp;
199 	struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
200 	struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
201 	struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
202 	struct pvrdma_create_qp ucmd;
203 	struct pvrdma_create_qp_resp qp_resp = {};
204 	unsigned long flags;
205 	int ret;
206 	bool is_srq = !!init_attr->srq;
207 
208 	if (init_attr->create_flags) {
209 		dev_warn(&dev->pdev->dev,
210 			 "invalid create queuepair flags %#x\n",
211 			 init_attr->create_flags);
212 		return ERR_PTR(-EINVAL);
213 	}
214 
215 	if (init_attr->qp_type != IB_QPT_RC &&
216 	    init_attr->qp_type != IB_QPT_UD &&
217 	    init_attr->qp_type != IB_QPT_GSI) {
218 		dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
219 			 init_attr->qp_type);
220 		return ERR_PTR(-EINVAL);
221 	}
222 
223 	if (is_srq && !dev->dsr->caps.max_srq) {
224 		dev_warn(&dev->pdev->dev,
225 			 "SRQs not supported by device\n");
226 		return ERR_PTR(-EINVAL);
227 	}
228 
229 	if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
230 		return ERR_PTR(-ENOMEM);
231 
232 	switch (init_attr->qp_type) {
233 	case IB_QPT_GSI:
234 		if (init_attr->port_num == 0 ||
235 		    init_attr->port_num > pd->device->phys_port_cnt ||
236 		    udata) {
237 			dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
238 			ret = -EINVAL;
239 			goto err_qp;
240 		}
241 		/* fall through */
242 	case IB_QPT_RC:
243 	case IB_QPT_UD:
244 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
245 		if (!qp) {
246 			ret = -ENOMEM;
247 			goto err_qp;
248 		}
249 
250 		spin_lock_init(&qp->sq.lock);
251 		spin_lock_init(&qp->rq.lock);
252 		mutex_init(&qp->mutex);
253 		refcount_set(&qp->refcnt, 1);
254 		init_completion(&qp->free);
255 
256 		qp->state = IB_QPS_RESET;
257 		qp->is_kernel = !udata;
258 
259 		if (!qp->is_kernel) {
260 			dev_dbg(&dev->pdev->dev,
261 				"create queuepair from user space\n");
262 
263 			if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
264 				ret = -EFAULT;
265 				goto err_qp;
266 			}
267 
268 			/* Userspace supports qpn and qp handles? */
269 			if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
270 			    udata->outlen < sizeof(qp_resp)) {
271 				dev_warn(&dev->pdev->dev,
272 					 "create queuepair not supported\n");
273 				ret = -EOPNOTSUPP;
274 				goto err_qp;
275 			}
276 
277 			if (!is_srq) {
278 				/* set qp->sq.wqe_cnt, shift, buf_size.. */
279 				qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
280 							ucmd.rbuf_size, 0);
281 				if (IS_ERR(qp->rumem)) {
282 					ret = PTR_ERR(qp->rumem);
283 					goto err_qp;
284 				}
285 				qp->srq = NULL;
286 			} else {
287 				qp->rumem = NULL;
288 				qp->srq = to_vsrq(init_attr->srq);
289 			}
290 
291 			qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
292 						ucmd.sbuf_size, 0);
293 			if (IS_ERR(qp->sumem)) {
294 				if (!is_srq)
295 					ib_umem_release(qp->rumem);
296 				ret = PTR_ERR(qp->sumem);
297 				goto err_qp;
298 			}
299 
300 			qp->npages_send = ib_umem_page_count(qp->sumem);
301 			if (!is_srq)
302 				qp->npages_recv = ib_umem_page_count(qp->rumem);
303 			else
304 				qp->npages_recv = 0;
305 			qp->npages = qp->npages_send + qp->npages_recv;
306 		} else {
307 			ret = pvrdma_set_sq_size(to_vdev(pd->device),
308 						 &init_attr->cap, qp);
309 			if (ret)
310 				goto err_qp;
311 
312 			ret = pvrdma_set_rq_size(to_vdev(pd->device),
313 						 &init_attr->cap, qp);
314 			if (ret)
315 				goto err_qp;
316 
317 			qp->npages = qp->npages_send + qp->npages_recv;
318 
319 			/* Skip header page. */
320 			qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
321 
322 			/* Recv queue pages are after send pages. */
323 			qp->rq.offset = qp->npages_send * PAGE_SIZE;
324 		}
325 
326 		if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
327 			dev_warn(&dev->pdev->dev,
328 				 "overflow pages in queuepair\n");
329 			ret = -EINVAL;
330 			goto err_umem;
331 		}
332 
333 		ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
334 					   qp->is_kernel);
335 		if (ret) {
336 			dev_warn(&dev->pdev->dev,
337 				 "could not allocate page directory\n");
338 			goto err_umem;
339 		}
340 
341 		if (!qp->is_kernel) {
342 			pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
343 			if (!is_srq)
344 				pvrdma_page_dir_insert_umem(&qp->pdir,
345 							    qp->rumem,
346 							    qp->npages_send);
347 		} else {
348 			/* Ring state is always the first page. */
349 			qp->sq.ring = qp->pdir.pages[0];
350 			qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
351 		}
352 		break;
353 	default:
354 		ret = -EINVAL;
355 		goto err_qp;
356 	}
357 
358 	/* Not supported */
359 	init_attr->cap.max_inline_data = 0;
360 
361 	memset(cmd, 0, sizeof(*cmd));
362 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
363 	cmd->pd_handle = to_vpd(pd)->pd_handle;
364 	cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
365 	cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
366 	if (is_srq)
367 		cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
368 	else
369 		cmd->srq_handle = 0;
370 	cmd->max_send_wr = init_attr->cap.max_send_wr;
371 	cmd->max_recv_wr = init_attr->cap.max_recv_wr;
372 	cmd->max_send_sge = init_attr->cap.max_send_sge;
373 	cmd->max_recv_sge = init_attr->cap.max_recv_sge;
374 	cmd->max_inline_data = init_attr->cap.max_inline_data;
375 	cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
376 	cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
377 	cmd->is_srq = is_srq;
378 	cmd->lkey = 0;
379 	cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
380 	cmd->total_chunks = qp->npages;
381 	cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
382 	cmd->pdir_dma = qp->pdir.dir_dma;
383 
384 	dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
385 		cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
386 		cmd->max_recv_sge);
387 
388 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
389 	if (ret < 0) {
390 		dev_warn(&dev->pdev->dev,
391 			 "could not create queuepair, error: %d\n", ret);
392 		goto err_pdir;
393 	}
394 
395 	/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
396 	qp->port = init_attr->port_num;
397 
398 	if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
399 		qp->ibqp.qp_num = resp_v2->qpn;
400 		qp->qp_handle = resp_v2->qp_handle;
401 	} else {
402 		qp->ibqp.qp_num = resp->qpn;
403 		qp->qp_handle = resp->qpn;
404 	}
405 
406 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
407 	dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
408 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
409 
410 	if (udata) {
411 		qp_resp.qpn = qp->ibqp.qp_num;
412 		qp_resp.qp_handle = qp->qp_handle;
413 
414 		if (ib_copy_to_udata(udata, &qp_resp,
415 				     min(udata->outlen, sizeof(qp_resp)))) {
416 			dev_warn(&dev->pdev->dev,
417 				 "failed to copy back udata\n");
418 			__pvrdma_destroy_qp(dev, qp);
419 			return ERR_PTR(-EINVAL);
420 		}
421 	}
422 
423 	return &qp->ibqp;
424 
425 err_pdir:
426 	pvrdma_page_dir_cleanup(dev, &qp->pdir);
427 err_umem:
428 	ib_umem_release(qp->rumem);
429 	ib_umem_release(qp->sumem);
430 err_qp:
431 	kfree(qp);
432 	atomic_dec(&dev->num_qps);
433 
434 	return ERR_PTR(ret);
435 }
436 
437 static void _pvrdma_free_qp(struct pvrdma_qp *qp)
438 {
439 	unsigned long flags;
440 	struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
441 
442 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
443 	dev->qp_tbl[qp->qp_handle] = NULL;
444 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
445 
446 	if (refcount_dec_and_test(&qp->refcnt))
447 		complete(&qp->free);
448 	wait_for_completion(&qp->free);
449 
450 	ib_umem_release(qp->rumem);
451 	ib_umem_release(qp->sumem);
452 
453 	pvrdma_page_dir_cleanup(dev, &qp->pdir);
454 
455 	kfree(qp);
456 
457 	atomic_dec(&dev->num_qps);
458 }
459 
460 static void pvrdma_free_qp(struct pvrdma_qp *qp)
461 {
462 	struct pvrdma_cq *scq;
463 	struct pvrdma_cq *rcq;
464 	unsigned long scq_flags, rcq_flags;
465 
466 	/* In case cq is polling */
467 	get_cqs(qp, &scq, &rcq);
468 	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
469 
470 	_pvrdma_flush_cqe(qp, scq);
471 	if (scq != rcq)
472 		_pvrdma_flush_cqe(qp, rcq);
473 
474 	/*
475 	 * We're now unlocking the CQs before clearing out the qp handle this
476 	 * should still be safe. We have destroyed the backend QP and flushed
477 	 * the CQEs so there should be no other completions for this QP.
478 	 */
479 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
480 
481 	_pvrdma_free_qp(qp);
482 }
483 
484 static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
485 					   u32 qp_handle)
486 {
487 	union pvrdma_cmd_req req;
488 	struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
489 	int ret;
490 
491 	memset(cmd, 0, sizeof(*cmd));
492 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
493 	cmd->qp_handle = qp_handle;
494 
495 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
496 	if (ret < 0)
497 		dev_warn(&dev->pdev->dev,
498 			 "destroy queuepair failed, error: %d\n", ret);
499 }
500 
501 /**
502  * pvrdma_destroy_qp - destroy a queue pair
503  * @qp: the queue pair to destroy
504  * @udata: user data or null for kernel object
505  *
506  * @return: always 0.
507  */
508 int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
509 {
510 	struct pvrdma_qp *vqp = to_vqp(qp);
511 
512 	_pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
513 	pvrdma_free_qp(vqp);
514 
515 	return 0;
516 }
517 
518 static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
519 				struct pvrdma_qp *qp)
520 {
521 	_pvrdma_destroy_qp_work(dev, qp->qp_handle);
522 	_pvrdma_free_qp(qp);
523 }
524 
525 /**
526  * pvrdma_modify_qp - modify queue pair attributes
527  * @ibqp: the queue pair
528  * @attr: the new queue pair's attributes
529  * @attr_mask: attributes mask
530  * @udata: user data
531  *
532  * @returns 0 on success, otherwise returns an errno.
533  */
534 int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
535 		     int attr_mask, struct ib_udata *udata)
536 {
537 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
538 	struct pvrdma_qp *qp = to_vqp(ibqp);
539 	union pvrdma_cmd_req req;
540 	union pvrdma_cmd_resp rsp;
541 	struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
542 	enum ib_qp_state cur_state, next_state;
543 	int ret;
544 
545 	/* Sanity checking. Should need lock here */
546 	mutex_lock(&qp->mutex);
547 	cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
548 		qp->state;
549 	next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
550 
551 	if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
552 				attr_mask)) {
553 		ret = -EINVAL;
554 		goto out;
555 	}
556 
557 	if (attr_mask & IB_QP_PORT) {
558 		if (attr->port_num == 0 ||
559 		    attr->port_num > ibqp->device->phys_port_cnt) {
560 			ret = -EINVAL;
561 			goto out;
562 		}
563 	}
564 
565 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
566 		if (attr->min_rnr_timer > 31) {
567 			ret = -EINVAL;
568 			goto out;
569 		}
570 	}
571 
572 	if (attr_mask & IB_QP_PKEY_INDEX) {
573 		if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
574 			ret = -EINVAL;
575 			goto out;
576 		}
577 	}
578 
579 	if (attr_mask & IB_QP_QKEY)
580 		qp->qkey = attr->qkey;
581 
582 	if (cur_state == next_state && cur_state == IB_QPS_RESET) {
583 		ret = 0;
584 		goto out;
585 	}
586 
587 	qp->state = next_state;
588 	memset(cmd, 0, sizeof(*cmd));
589 	cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
590 	cmd->qp_handle = qp->qp_handle;
591 	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
592 	cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
593 	cmd->attrs.cur_qp_state =
594 		ib_qp_state_to_pvrdma(attr->cur_qp_state);
595 	cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
596 	cmd->attrs.path_mig_state =
597 		ib_mig_state_to_pvrdma(attr->path_mig_state);
598 	cmd->attrs.qkey = attr->qkey;
599 	cmd->attrs.rq_psn = attr->rq_psn;
600 	cmd->attrs.sq_psn = attr->sq_psn;
601 	cmd->attrs.dest_qp_num = attr->dest_qp_num;
602 	cmd->attrs.qp_access_flags =
603 		ib_access_flags_to_pvrdma(attr->qp_access_flags);
604 	cmd->attrs.pkey_index = attr->pkey_index;
605 	cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
606 	cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
607 	cmd->attrs.sq_draining = attr->sq_draining;
608 	cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
609 	cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
610 	cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
611 	cmd->attrs.port_num = attr->port_num;
612 	cmd->attrs.timeout = attr->timeout;
613 	cmd->attrs.retry_cnt = attr->retry_cnt;
614 	cmd->attrs.rnr_retry = attr->rnr_retry;
615 	cmd->attrs.alt_port_num = attr->alt_port_num;
616 	cmd->attrs.alt_timeout = attr->alt_timeout;
617 	ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
618 	rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
619 	rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
620 
621 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
622 	if (ret < 0) {
623 		dev_warn(&dev->pdev->dev,
624 			 "could not modify queuepair, error: %d\n", ret);
625 	} else if (rsp.hdr.err > 0) {
626 		dev_warn(&dev->pdev->dev,
627 			 "cannot modify queuepair, error: %d\n", rsp.hdr.err);
628 		ret = -EINVAL;
629 	}
630 
631 	if (ret == 0 && next_state == IB_QPS_RESET)
632 		pvrdma_reset_qp(qp);
633 
634 out:
635 	mutex_unlock(&qp->mutex);
636 
637 	return ret;
638 }
639 
640 static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
641 {
642 	return pvrdma_page_dir_get_ptr(&qp->pdir,
643 				       qp->sq.offset + n * qp->sq.wqe_size);
644 }
645 
646 static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
647 {
648 	return pvrdma_page_dir_get_ptr(&qp->pdir,
649 				       qp->rq.offset + n * qp->rq.wqe_size);
650 }
651 
652 static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
653 		       const struct ib_reg_wr *wr)
654 {
655 	struct pvrdma_user_mr *mr = to_vmr(wr->mr);
656 
657 	wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
658 	wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
659 	wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
660 	wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
661 	wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
662 	wqe_hdr->wr.fast_reg.access_flags = wr->access;
663 	wqe_hdr->wr.fast_reg.rkey = wr->key;
664 
665 	return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
666 						mr->npages);
667 }
668 
669 /**
670  * pvrdma_post_send - post send work request entries on a QP
671  * @ibqp: the QP
672  * @wr: work request list to post
673  * @bad_wr: the first bad WR returned
674  *
675  * @return: 0 on success, otherwise errno returned.
676  */
677 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
678 		     const struct ib_send_wr **bad_wr)
679 {
680 	struct pvrdma_qp *qp = to_vqp(ibqp);
681 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
682 	unsigned long flags;
683 	struct pvrdma_sq_wqe_hdr *wqe_hdr;
684 	struct pvrdma_sge *sge;
685 	int i, ret;
686 
687 	/*
688 	 * In states lower than RTS, we can fail immediately. In other states,
689 	 * just post and let the device figure it out.
690 	 */
691 	if (qp->state < IB_QPS_RTS) {
692 		*bad_wr = wr;
693 		return -EINVAL;
694 	}
695 
696 	spin_lock_irqsave(&qp->sq.lock, flags);
697 
698 	while (wr) {
699 		unsigned int tail = 0;
700 
701 		if (unlikely(!pvrdma_idx_ring_has_space(
702 				qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
703 			dev_warn_ratelimited(&dev->pdev->dev,
704 					     "send queue is full\n");
705 			*bad_wr = wr;
706 			ret = -ENOMEM;
707 			goto out;
708 		}
709 
710 		if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
711 			dev_warn_ratelimited(&dev->pdev->dev,
712 					     "send SGE overflow\n");
713 			*bad_wr = wr;
714 			ret = -EINVAL;
715 			goto out;
716 		}
717 
718 		if (unlikely(wr->opcode < 0)) {
719 			dev_warn_ratelimited(&dev->pdev->dev,
720 					     "invalid send opcode\n");
721 			*bad_wr = wr;
722 			ret = -EINVAL;
723 			goto out;
724 		}
725 
726 		/*
727 		 * Only support UD, RC.
728 		 * Need to check opcode table for thorough checking.
729 		 * opcode		_UD	_UC	_RC
730 		 * _SEND		x	x	x
731 		 * _SEND_WITH_IMM	x	x	x
732 		 * _RDMA_WRITE			x	x
733 		 * _RDMA_WRITE_WITH_IMM		x	x
734 		 * _LOCAL_INV			x	x
735 		 * _SEND_WITH_INV		x	x
736 		 * _RDMA_READ				x
737 		 * _ATOMIC_CMP_AND_SWP			x
738 		 * _ATOMIC_FETCH_AND_ADD		x
739 		 * _MASK_ATOMIC_CMP_AND_SWP		x
740 		 * _MASK_ATOMIC_FETCH_AND_ADD		x
741 		 * _REG_MR				x
742 		 *
743 		 */
744 		if (qp->ibqp.qp_type != IB_QPT_UD &&
745 		    qp->ibqp.qp_type != IB_QPT_RC &&
746 			wr->opcode != IB_WR_SEND) {
747 			dev_warn_ratelimited(&dev->pdev->dev,
748 					     "unsupported queuepair type\n");
749 			*bad_wr = wr;
750 			ret = -EINVAL;
751 			goto out;
752 		} else if (qp->ibqp.qp_type == IB_QPT_UD ||
753 			   qp->ibqp.qp_type == IB_QPT_GSI) {
754 			if (wr->opcode != IB_WR_SEND &&
755 			    wr->opcode != IB_WR_SEND_WITH_IMM) {
756 				dev_warn_ratelimited(&dev->pdev->dev,
757 						     "invalid send opcode\n");
758 				*bad_wr = wr;
759 				ret = -EINVAL;
760 				goto out;
761 			}
762 		}
763 
764 		wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
765 		memset(wqe_hdr, 0, sizeof(*wqe_hdr));
766 		wqe_hdr->wr_id = wr->wr_id;
767 		wqe_hdr->num_sge = wr->num_sge;
768 		wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
769 		wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
770 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
771 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
772 			wqe_hdr->ex.imm_data = wr->ex.imm_data;
773 
774 		if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
775 			*bad_wr = wr;
776 			ret = -EINVAL;
777 			goto out;
778 		}
779 
780 		switch (qp->ibqp.qp_type) {
781 		case IB_QPT_GSI:
782 		case IB_QPT_UD:
783 			if (unlikely(!ud_wr(wr)->ah)) {
784 				dev_warn_ratelimited(&dev->pdev->dev,
785 						     "invalid address handle\n");
786 				*bad_wr = wr;
787 				ret = -EINVAL;
788 				goto out;
789 			}
790 
791 			/*
792 			 * Use qkey from qp context if high order bit set,
793 			 * otherwise from work request.
794 			 */
795 			wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
796 			wqe_hdr->wr.ud.remote_qkey =
797 				ud_wr(wr)->remote_qkey & 0x80000000 ?
798 				qp->qkey : ud_wr(wr)->remote_qkey;
799 			wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
800 
801 			break;
802 		case IB_QPT_RC:
803 			switch (wr->opcode) {
804 			case IB_WR_RDMA_READ:
805 			case IB_WR_RDMA_WRITE:
806 			case IB_WR_RDMA_WRITE_WITH_IMM:
807 				wqe_hdr->wr.rdma.remote_addr =
808 					rdma_wr(wr)->remote_addr;
809 				wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
810 				break;
811 			case IB_WR_LOCAL_INV:
812 			case IB_WR_SEND_WITH_INV:
813 				wqe_hdr->ex.invalidate_rkey =
814 					wr->ex.invalidate_rkey;
815 				break;
816 			case IB_WR_ATOMIC_CMP_AND_SWP:
817 			case IB_WR_ATOMIC_FETCH_AND_ADD:
818 				wqe_hdr->wr.atomic.remote_addr =
819 					atomic_wr(wr)->remote_addr;
820 				wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
821 				wqe_hdr->wr.atomic.compare_add =
822 					atomic_wr(wr)->compare_add;
823 				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
824 					wqe_hdr->wr.atomic.swap =
825 						atomic_wr(wr)->swap;
826 				break;
827 			case IB_WR_REG_MR:
828 				ret = set_reg_seg(wqe_hdr, reg_wr(wr));
829 				if (ret < 0) {
830 					dev_warn_ratelimited(&dev->pdev->dev,
831 							     "Failed to set fast register work request\n");
832 					*bad_wr = wr;
833 					goto out;
834 				}
835 				break;
836 			default:
837 				break;
838 			}
839 
840 			break;
841 		default:
842 			dev_warn_ratelimited(&dev->pdev->dev,
843 					     "invalid queuepair type\n");
844 			ret = -EINVAL;
845 			*bad_wr = wr;
846 			goto out;
847 		}
848 
849 		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
850 		for (i = 0; i < wr->num_sge; i++) {
851 			/* Need to check wqe_size 0 or max size */
852 			sge->addr = wr->sg_list[i].addr;
853 			sge->length = wr->sg_list[i].length;
854 			sge->lkey = wr->sg_list[i].lkey;
855 			sge++;
856 		}
857 
858 		/* Make sure wqe is written before index update */
859 		smp_wmb();
860 
861 		/* Update shared sq ring */
862 		pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
863 				    qp->sq.wqe_cnt);
864 
865 		wr = wr->next;
866 	}
867 
868 	ret = 0;
869 
870 out:
871 	spin_unlock_irqrestore(&qp->sq.lock, flags);
872 
873 	if (!ret)
874 		pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
875 
876 	return ret;
877 }
878 
879 /**
880  * pvrdma_post_receive - post receive work request entries on a QP
881  * @ibqp: the QP
882  * @wr: the work request list to post
883  * @bad_wr: the first bad WR returned
884  *
885  * @return: 0 on success, otherwise errno returned.
886  */
887 int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
888 		     const struct ib_recv_wr **bad_wr)
889 {
890 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
891 	unsigned long flags;
892 	struct pvrdma_qp *qp = to_vqp(ibqp);
893 	struct pvrdma_rq_wqe_hdr *wqe_hdr;
894 	struct pvrdma_sge *sge;
895 	int ret = 0;
896 	int i;
897 
898 	/*
899 	 * In the RESET state, we can fail immediately. For other states,
900 	 * just post and let the device figure it out.
901 	 */
902 	if (qp->state == IB_QPS_RESET) {
903 		*bad_wr = wr;
904 		return -EINVAL;
905 	}
906 
907 	if (qp->srq) {
908 		dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
909 		*bad_wr = wr;
910 		return -EINVAL;
911 	}
912 
913 	spin_lock_irqsave(&qp->rq.lock, flags);
914 
915 	while (wr) {
916 		unsigned int tail = 0;
917 
918 		if (unlikely(wr->num_sge > qp->rq.max_sg ||
919 			     wr->num_sge < 0)) {
920 			ret = -EINVAL;
921 			*bad_wr = wr;
922 			dev_warn_ratelimited(&dev->pdev->dev,
923 					     "recv SGE overflow\n");
924 			goto out;
925 		}
926 
927 		if (unlikely(!pvrdma_idx_ring_has_space(
928 				qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
929 			ret = -ENOMEM;
930 			*bad_wr = wr;
931 			dev_warn_ratelimited(&dev->pdev->dev,
932 					     "recv queue full\n");
933 			goto out;
934 		}
935 
936 		wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
937 		wqe_hdr->wr_id = wr->wr_id;
938 		wqe_hdr->num_sge = wr->num_sge;
939 		wqe_hdr->total_len = 0;
940 
941 		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
942 		for (i = 0; i < wr->num_sge; i++) {
943 			sge->addr = wr->sg_list[i].addr;
944 			sge->length = wr->sg_list[i].length;
945 			sge->lkey = wr->sg_list[i].lkey;
946 			sge++;
947 		}
948 
949 		/* Make sure wqe is written before index update */
950 		smp_wmb();
951 
952 		/* Update shared rq ring */
953 		pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
954 				    qp->rq.wqe_cnt);
955 
956 		wr = wr->next;
957 	}
958 
959 	spin_unlock_irqrestore(&qp->rq.lock, flags);
960 
961 	pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
962 
963 	return ret;
964 
965 out:
966 	spin_unlock_irqrestore(&qp->rq.lock, flags);
967 
968 	return ret;
969 }
970 
971 /**
972  * pvrdma_query_qp - query a queue pair's attributes
973  * @ibqp: the queue pair to query
974  * @attr: the queue pair's attributes
975  * @attr_mask: attributes mask
976  * @init_attr: initial queue pair attributes
977  *
978  * @returns 0 on success, otherwise returns an errno.
979  */
980 int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
981 		    int attr_mask, struct ib_qp_init_attr *init_attr)
982 {
983 	struct pvrdma_dev *dev = to_vdev(ibqp->device);
984 	struct pvrdma_qp *qp = to_vqp(ibqp);
985 	union pvrdma_cmd_req req;
986 	union pvrdma_cmd_resp rsp;
987 	struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
988 	struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
989 	int ret = 0;
990 
991 	mutex_lock(&qp->mutex);
992 
993 	if (qp->state == IB_QPS_RESET) {
994 		attr->qp_state = IB_QPS_RESET;
995 		goto out;
996 	}
997 
998 	memset(cmd, 0, sizeof(*cmd));
999 	cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
1000 	cmd->qp_handle = qp->qp_handle;
1001 	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
1002 
1003 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
1004 	if (ret < 0) {
1005 		dev_warn(&dev->pdev->dev,
1006 			 "could not query queuepair, error: %d\n", ret);
1007 		goto out;
1008 	}
1009 
1010 	attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
1011 	attr->cur_qp_state =
1012 		pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
1013 	attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
1014 	attr->path_mig_state =
1015 		pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
1016 	attr->qkey = resp->attrs.qkey;
1017 	attr->rq_psn = resp->attrs.rq_psn;
1018 	attr->sq_psn = resp->attrs.sq_psn;
1019 	attr->dest_qp_num = resp->attrs.dest_qp_num;
1020 	attr->qp_access_flags =
1021 		pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
1022 	attr->pkey_index = resp->attrs.pkey_index;
1023 	attr->alt_pkey_index = resp->attrs.alt_pkey_index;
1024 	attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
1025 	attr->sq_draining = resp->attrs.sq_draining;
1026 	attr->max_rd_atomic = resp->attrs.max_rd_atomic;
1027 	attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
1028 	attr->min_rnr_timer = resp->attrs.min_rnr_timer;
1029 	attr->port_num = resp->attrs.port_num;
1030 	attr->timeout = resp->attrs.timeout;
1031 	attr->retry_cnt = resp->attrs.retry_cnt;
1032 	attr->rnr_retry = resp->attrs.rnr_retry;
1033 	attr->alt_port_num = resp->attrs.alt_port_num;
1034 	attr->alt_timeout = resp->attrs.alt_timeout;
1035 	pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
1036 	pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr);
1037 	pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
1038 
1039 	qp->state = attr->qp_state;
1040 
1041 	ret = 0;
1042 
1043 out:
1044 	attr->cur_qp_state = attr->qp_state;
1045 
1046 	init_attr->event_handler = qp->ibqp.event_handler;
1047 	init_attr->qp_context = qp->ibqp.qp_context;
1048 	init_attr->send_cq = qp->ibqp.send_cq;
1049 	init_attr->recv_cq = qp->ibqp.recv_cq;
1050 	init_attr->srq = qp->ibqp.srq;
1051 	init_attr->xrcd = NULL;
1052 	init_attr->cap = attr->cap;
1053 	init_attr->sq_sig_type = 0;
1054 	init_attr->qp_type = qp->ibqp.qp_type;
1055 	init_attr->create_flags = 0;
1056 	init_attr->port_num = qp->port;
1057 
1058 	mutex_unlock(&qp->mutex);
1059 	return ret;
1060 }
1061