Lines Matching full:qp
56 struct pvrdma_qp *qp);
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs()
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp) in pvrdma_reset_qp() argument
107 get_cqs(qp, &scq, &rcq); in pvrdma_reset_qp()
110 _pvrdma_flush_cqe(qp, scq); in pvrdma_reset_qp()
112 _pvrdma_flush_cqe(qp, rcq); in pvrdma_reset_qp()
120 if (qp->rq.ring) { in pvrdma_reset_qp()
121 atomic_set(&qp->rq.ring->cons_head, 0); in pvrdma_reset_qp()
122 atomic_set(&qp->rq.ring->prod_tail, 0); in pvrdma_reset_qp()
124 if (qp->sq.ring) { in pvrdma_reset_qp()
125 atomic_set(&qp->sq.ring->cons_head, 0); in pvrdma_reset_qp()
126 atomic_set(&qp->sq.ring->prod_tail, 0); in pvrdma_reset_qp()
132 struct pvrdma_qp *qp) in pvrdma_set_rq_size() argument
140 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); in pvrdma_set_rq_size()
141 qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge)); in pvrdma_set_rq_size()
144 req_cap->max_recv_wr = qp->rq.wqe_cnt; in pvrdma_set_rq_size()
145 req_cap->max_recv_sge = qp->rq.max_sg; in pvrdma_set_rq_size()
147 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + in pvrdma_set_rq_size()
149 qp->rq.max_sg); in pvrdma_set_rq_size()
150 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_rq_size()
157 struct pvrdma_qp *qp) in pvrdma_set_sq_size() argument
165 qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); in pvrdma_set_sq_size()
166 qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); in pvrdma_set_sq_size()
169 req_cap->max_send_wr = qp->sq.wqe_cnt; in pvrdma_set_sq_size()
170 req_cap->max_send_sge = qp->sq.max_sg; in pvrdma_set_sq_size()
172 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + in pvrdma_set_sq_size()
174 qp->sq.max_sg); in pvrdma_set_sq_size()
176 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + in pvrdma_set_sq_size()
177 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_sq_size()
194 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_create_qp() local
242 spin_lock_init(&qp->sq.lock); in pvrdma_create_qp()
243 spin_lock_init(&qp->rq.lock); in pvrdma_create_qp()
244 mutex_init(&qp->mutex); in pvrdma_create_qp()
245 refcount_set(&qp->refcnt, 1); in pvrdma_create_qp()
246 init_completion(&qp->free); in pvrdma_create_qp()
248 qp->state = IB_QPS_RESET; in pvrdma_create_qp()
249 qp->is_kernel = !udata; in pvrdma_create_qp()
251 if (!qp->is_kernel) { in pvrdma_create_qp()
260 /* Userspace supports qpn and qp handles? */ in pvrdma_create_qp()
270 /* set qp->sq.wqe_cnt, shift, buf_size.. */ in pvrdma_create_qp()
271 qp->rumem = ib_umem_get(ibqp->device, in pvrdma_create_qp()
274 if (IS_ERR(qp->rumem)) { in pvrdma_create_qp()
275 ret = PTR_ERR(qp->rumem); in pvrdma_create_qp()
278 qp->srq = NULL; in pvrdma_create_qp()
280 qp->rumem = NULL; in pvrdma_create_qp()
281 qp->srq = to_vsrq(init_attr->srq); in pvrdma_create_qp()
284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, in pvrdma_create_qp()
286 if (IS_ERR(qp->sumem)) { in pvrdma_create_qp()
288 ib_umem_release(qp->rumem); in pvrdma_create_qp()
289 ret = PTR_ERR(qp->sumem); in pvrdma_create_qp()
293 qp->npages_send = in pvrdma_create_qp()
294 ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE); in pvrdma_create_qp()
296 qp->npages_recv = ib_umem_num_dma_blocks( in pvrdma_create_qp()
297 qp->rumem, PAGE_SIZE); in pvrdma_create_qp()
299 qp->npages_recv = 0; in pvrdma_create_qp()
300 qp->npages = qp->npages_send + qp->npages_recv; in pvrdma_create_qp()
303 &init_attr->cap, qp); in pvrdma_create_qp()
308 &init_attr->cap, qp); in pvrdma_create_qp()
312 qp->npages = qp->npages_send + qp->npages_recv; in pvrdma_create_qp()
315 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; in pvrdma_create_qp()
318 qp->rq.offset = qp->npages_send * PAGE_SIZE; in pvrdma_create_qp()
321 if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_create_qp()
328 ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, in pvrdma_create_qp()
329 qp->is_kernel); in pvrdma_create_qp()
336 if (!qp->is_kernel) { in pvrdma_create_qp()
337 pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); in pvrdma_create_qp()
339 pvrdma_page_dir_insert_umem(&qp->pdir, in pvrdma_create_qp()
340 qp->rumem, in pvrdma_create_qp()
341 qp->npages_send); in pvrdma_create_qp()
344 qp->sq.ring = qp->pdir.pages[0]; in pvrdma_create_qp()
345 qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1]; in pvrdma_create_qp()
375 cmd->total_chunks = qp->npages; in pvrdma_create_qp()
376 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; in pvrdma_create_qp()
377 cmd->pdir_dma = qp->pdir.dir_dma; in pvrdma_create_qp()
391 qp->port = init_attr->port_num; in pvrdma_create_qp()
394 qp->ibqp.qp_num = resp_v2->qpn; in pvrdma_create_qp()
395 qp->qp_handle = resp_v2->qp_handle; in pvrdma_create_qp()
397 qp->ibqp.qp_num = resp->qpn; in pvrdma_create_qp()
398 qp->qp_handle = resp->qpn; in pvrdma_create_qp()
402 dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; in pvrdma_create_qp()
406 qp_resp.qpn = qp->ibqp.qp_num; in pvrdma_create_qp()
407 qp_resp.qp_handle = qp->qp_handle; in pvrdma_create_qp()
413 __pvrdma_destroy_qp(dev, qp); in pvrdma_create_qp()
421 pvrdma_page_dir_cleanup(dev, &qp->pdir); in pvrdma_create_qp()
423 ib_umem_release(qp->rumem); in pvrdma_create_qp()
424 ib_umem_release(qp->sumem); in pvrdma_create_qp()
430 static void _pvrdma_free_qp(struct pvrdma_qp *qp) in _pvrdma_free_qp() argument
433 struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); in _pvrdma_free_qp()
436 dev->qp_tbl[qp->qp_handle] = NULL; in _pvrdma_free_qp()
439 if (refcount_dec_and_test(&qp->refcnt)) in _pvrdma_free_qp()
440 complete(&qp->free); in _pvrdma_free_qp()
441 wait_for_completion(&qp->free); in _pvrdma_free_qp()
443 ib_umem_release(qp->rumem); in _pvrdma_free_qp()
444 ib_umem_release(qp->sumem); in _pvrdma_free_qp()
446 pvrdma_page_dir_cleanup(dev, &qp->pdir); in _pvrdma_free_qp()
451 static void pvrdma_free_qp(struct pvrdma_qp *qp) in pvrdma_free_qp() argument
458 get_cqs(qp, &scq, &rcq); in pvrdma_free_qp()
461 _pvrdma_flush_cqe(qp, scq); in pvrdma_free_qp()
463 _pvrdma_flush_cqe(qp, rcq); in pvrdma_free_qp()
466 * We're now unlocking the CQs before clearing out the qp handle this in pvrdma_free_qp()
467 * should still be safe. We have destroyed the backend QP and flushed in pvrdma_free_qp()
468 * the CQEs so there should be no other completions for this QP. in pvrdma_free_qp()
472 _pvrdma_free_qp(qp); in pvrdma_free_qp()
494 * @qp: the queue pair to destroy
499 int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in pvrdma_destroy_qp() argument
501 struct pvrdma_qp *vqp = to_vqp(qp); in pvrdma_destroy_qp()
503 _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle); in pvrdma_destroy_qp()
510 struct pvrdma_qp *qp) in __pvrdma_destroy_qp() argument
512 _pvrdma_destroy_qp_work(dev, qp->qp_handle); in __pvrdma_destroy_qp()
513 _pvrdma_free_qp(qp); in __pvrdma_destroy_qp()
529 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_modify_qp() local
540 mutex_lock(&qp->mutex); in pvrdma_modify_qp()
542 qp->state; in pvrdma_modify_qp()
574 qp->qkey = attr->qkey; in pvrdma_modify_qp()
581 qp->state = next_state; in pvrdma_modify_qp()
584 cmd->qp_handle = qp->qp_handle; in pvrdma_modify_qp()
626 pvrdma_reset_qp(qp); in pvrdma_modify_qp()
629 mutex_unlock(&qp->mutex); in pvrdma_modify_qp()
634 static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) in get_sq_wqe() argument
636 return pvrdma_page_dir_get_ptr(&qp->pdir, in get_sq_wqe()
637 qp->sq.offset + n * qp->sq.wqe_size); in get_sq_wqe()
640 static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) in get_rq_wqe() argument
642 return pvrdma_page_dir_get_ptr(&qp->pdir, in get_rq_wqe()
643 qp->rq.offset + n * qp->rq.wqe_size); in get_rq_wqe()
664 * pvrdma_post_send - post send work request entries on a QP
665 * @ibqp: the QP
674 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_post_send() local
685 if (qp->state < IB_QPS_RTS) { in pvrdma_post_send()
690 spin_lock_irqsave(&qp->sq.lock, flags); in pvrdma_post_send()
696 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { in pvrdma_post_send()
704 if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { in pvrdma_post_send()
730 if (qp->ibqp.qp_type != IB_QPT_UD && in pvrdma_post_send()
731 qp->ibqp.qp_type != IB_QPT_RC && in pvrdma_post_send()
738 } else if (qp->ibqp.qp_type == IB_QPT_UD || in pvrdma_post_send()
739 qp->ibqp.qp_type == IB_QPT_GSI) { in pvrdma_post_send()
750 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); in pvrdma_post_send()
766 switch (qp->ibqp.qp_type) { in pvrdma_post_send()
778 * Use qkey from qp context if high order bit set, in pvrdma_post_send()
784 qp->qkey : ud_wr(wr)->remote_qkey; in pvrdma_post_send()
848 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, in pvrdma_post_send()
849 qp->sq.wqe_cnt); in pvrdma_post_send()
857 spin_unlock_irqrestore(&qp->sq.lock, flags); in pvrdma_post_send()
860 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle); in pvrdma_post_send()
866 * pvrdma_post_recv - post receive work request entries on a QP
867 * @ibqp: the QP
878 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_post_recv() local
888 if (qp->state == IB_QPS_RESET) { in pvrdma_post_recv()
893 if (qp->srq) { in pvrdma_post_recv()
894 dev_warn(&dev->pdev->dev, "QP associated with SRQ\n"); in pvrdma_post_recv()
899 spin_lock_irqsave(&qp->rq.lock, flags); in pvrdma_post_recv()
904 if (unlikely(wr->num_sge > qp->rq.max_sg || in pvrdma_post_recv()
914 qp->rq.ring, qp->rq.wqe_cnt, &tail))) { in pvrdma_post_recv()
922 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); in pvrdma_post_recv()
939 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, in pvrdma_post_recv()
940 qp->rq.wqe_cnt); in pvrdma_post_recv()
945 spin_unlock_irqrestore(&qp->rq.lock, flags); in pvrdma_post_recv()
947 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle); in pvrdma_post_recv()
952 spin_unlock_irqrestore(&qp->rq.lock, flags); in pvrdma_post_recv()
970 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_query_qp() local
977 mutex_lock(&qp->mutex); in pvrdma_query_qp()
979 if (qp->state == IB_QPS_RESET) { in pvrdma_query_qp()
986 cmd->qp_handle = qp->qp_handle; in pvrdma_query_qp()
1025 qp->state = attr->qp_state; in pvrdma_query_qp()
1032 init_attr->event_handler = qp->ibqp.event_handler; in pvrdma_query_qp()
1033 init_attr->qp_context = qp->ibqp.qp_context; in pvrdma_query_qp()
1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
1035 init_attr->recv_cq = qp->ibqp.recv_cq; in pvrdma_query_qp()
1036 init_attr->srq = qp->ibqp.srq; in pvrdma_query_qp()
1040 init_attr->qp_type = qp->ibqp.qp_type; in pvrdma_query_qp()
1042 init_attr->port_num = qp->port; in pvrdma_query_qp()
1044 mutex_unlock(&qp->mutex); in pvrdma_query_qp()