Lines Matching +full:wr +full:- +full:setup
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
55 struct adapter *sc = rdev->adap; in destroy_cq()
61 struct wrqe *wr; in destroy_cq() local
64 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); in destroy_cq()
65 if (wr == NULL) in destroy_cq()
67 res_wr = wrtod(wr); in destroy_cq()
69 res_wr->op_nres = cpu_to_be32( in destroy_cq()
73 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in destroy_cq()
74 res_wr->cookie = (unsigned long) &wr_wait; in destroy_cq()
75 res = res_wr->res; in destroy_cq()
76 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq()
77 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq()
78 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq()
82 t4_wrq_tx(sc, wr); in destroy_cq()
86 kfree(cq->sw_queue); in destroy_cq()
87 dma_free_coherent(rhp->ibdev.dma_device, in destroy_cq()
88 cq->memsize, cq->queue, in destroy_cq()
90 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
98 struct adapter *sc = rdev->adap; in create_cq()
103 int user = (uctx != &rdev->uctx); in create_cq()
106 struct wrqe *wr; in create_cq() local
110 return -EIO; in create_cq()
111 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
112 if (!cq->cqid) { in create_cq()
113 ret = -ENOMEM; in create_cq()
118 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); in create_cq()
119 if (!cq->sw_queue) { in create_cq()
120 ret = -ENOMEM; in create_cq()
124 cq->queue = dma_alloc_coherent(rhp->ibdev.dma_device, cq->memsize, in create_cq()
125 &cq->dma_addr, GFP_KERNEL); in create_cq()
126 if (!cq->queue) { in create_cq()
127 ret = -ENOMEM; in create_cq()
130 dma_unmap_addr_set(cq, mapping, cq->dma_addr); in create_cq()
131 memset(cq->queue, 0, cq->memsize); in create_cq()
136 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); in create_cq()
137 if (wr == NULL) in create_cq()
139 res_wr = wrtod(wr); in create_cq()
142 res_wr->op_nres = cpu_to_be32( in create_cq()
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in create_cq()
147 res_wr->cookie = (unsigned long) &wr_wait; in create_cq()
148 res = res_wr->res; in create_cq()
149 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in create_cq()
150 res->u.cq.op = FW_RI_RES_OP_WRITE; in create_cq()
151 res->u.cq.iqid = cpu_to_be32(cq->cqid); in create_cq()
153 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( in create_cq()
157 V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id)); in create_cq()
158 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( in create_cq()
164 res->u.cq.iqsize = cpu_to_be16(cq->size); in create_cq()
165 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); in create_cq()
169 t4_wrq_tx(sc, wr); in create_cq()
176 cq->gen = 1; in create_cq()
177 cq->rdev = rdev; in create_cq()
180 t4_bar2_sge_qregs(rdev->adap, cq->cqid, T4_BAR2_QTYPE_INGRESS, user, in create_cq()
181 &cq_bar2_qoffset, &cq->bar2_qid); in create_cq()
183 /* If user mapping then compute the page-aligned physical in create_cq()
187 cq->bar2_pa = (rdev->bar2_pa + cq_bar2_qoffset) & PAGE_MASK; in create_cq()
189 cq->bar2_va = (void __iomem *)((u64)rdev->bar2_kva + in create_cq()
194 dma_free_coherent(rhp->ibdev.dma_device, cq->memsize, cq->queue, in create_cq()
197 kfree(cq->sw_queue); in create_cq()
199 c4iw_put_cqid(rdev, cq->cqid, uctx); in create_cq()
209 cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
215 V_CQE_QPID(wq->sq.qid)); in insert_recv_cqe()
216 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe()
217 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
224 int in_use = wq->rq.in_use - count; in c4iw_flush_rq()
228 __func__, wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
229 while (in_use--) { in c4iw_flush_rq()
242 cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
245 V_CQE_OPCODE(swcqe->opcode) | in insert_sq_cqe()
248 V_CQE_QPID(wq->sq.qid)); in insert_sq_cqe()
249 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
250 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_sq_cqe()
251 cq->sw_queue[cq->sw_pidx] = cqe; in insert_sq_cqe()
260 struct t4_wq *wq = &qhp->wq; in c4iw_flush_sq()
261 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq()
262 struct t4_cq *cq = &chp->cq; in c4iw_flush_sq()
266 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq()
267 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq()
268 idx = wq->sq.flush_cidx; in c4iw_flush_sq()
269 BUG_ON(idx >= wq->sq.size); in c4iw_flush_sq()
270 while (idx != wq->sq.pidx) { in c4iw_flush_sq()
271 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq()
272 BUG_ON(swsqe->flushed); in c4iw_flush_sq()
273 swsqe->flushed = 1; in c4iw_flush_sq()
275 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq()
276 BUG_ON(swsqe->opcode != FW_RI_READ_REQ); in c4iw_flush_sq()
280 if (++idx == wq->sq.size) in c4iw_flush_sq()
283 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
284 if (wq->sq.flush_cidx >= wq->sq.size) in c4iw_flush_sq()
285 wq->sq.flush_cidx -= wq->sq.size; in c4iw_flush_sq()
294 if (wq->sq.flush_cidx == -1) in flush_completed_wrs()
295 wq->sq.flush_cidx = wq->sq.cidx; in flush_completed_wrs()
296 cidx = wq->sq.flush_cidx; in flush_completed_wrs()
297 BUG_ON(cidx > wq->sq.size); in flush_completed_wrs()
299 while (cidx != wq->sq.pidx) { in flush_completed_wrs()
300 swsqe = &wq->sq.sw_sq[cidx]; in flush_completed_wrs()
301 if (!swsqe->signaled) { in flush_completed_wrs()
302 if (++cidx == wq->sq.size) in flush_completed_wrs()
304 } else if (swsqe->complete) { in flush_completed_wrs()
306 BUG_ON(swsqe->flushed); in flush_completed_wrs()
313 __func__, cidx, cq->sw_pidx); in flush_completed_wrs()
314 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); in flush_completed_wrs()
315 cq->sw_queue[cq->sw_pidx] = swsqe->cqe; in flush_completed_wrs()
317 swsqe->flushed = 1; in flush_completed_wrs()
318 if (++cidx == wq->sq.size) in flush_completed_wrs()
320 wq->sq.flush_cidx = cidx; in flush_completed_wrs()
329 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; in create_read_req_cqe()
330 read_cqe->len = htonl(wq->sq.oldest_read->read_len); in create_read_req_cqe()
331 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | in create_read_req_cqe()
335 read_cqe->bits_type_ts = hw_cqe->bits_type_ts; in create_read_req_cqe()
341 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; in advance_oldest_read()
343 if (rptr == wq->sq.size) in advance_oldest_read()
345 while (rptr != wq->sq.pidx) { in advance_oldest_read()
346 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; in advance_oldest_read()
348 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) in advance_oldest_read()
350 if (++rptr == wq->sq.size) in advance_oldest_read()
353 wq->sq.oldest_read = NULL; in advance_oldest_read()
358 * Deal with out-of-order and/or completions that complete
368 CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, &chp->cq, in c4iw_flush_hw_cq()
369 chp->cq.cqid); in c4iw_flush_hw_cq()
370 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); in c4iw_flush_hw_cq()
378 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq()
406 if (!qhp->wq.sq.oldest_read->signaled) { in c4iw_flush_hw_cq()
407 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
415 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); in c4iw_flush_hw_cq()
417 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
421 * unsignaled and now in-order completions into the swcq. in c4iw_flush_hw_cq()
424 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in c4iw_flush_hw_cq()
425 swsqe->cqe = *hw_cqe; in c4iw_flush_hw_cq()
426 swsqe->complete = 1; in c4iw_flush_hw_cq()
427 flush_completed_wrs(&qhp->wq, &chp->cq); in c4iw_flush_hw_cq()
429 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; in c4iw_flush_hw_cq()
431 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); in c4iw_flush_hw_cq()
432 t4_swcq_produce(&chp->cq); in c4iw_flush_hw_cq()
435 t4_hwcq_consume(&chp->cq); in c4iw_flush_hw_cq()
436 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); in c4iw_flush_hw_cq()
463 ptr = cq->sw_cidx; in c4iw_count_rcqes()
464 while (ptr != cq->sw_pidx) { in c4iw_count_rcqes()
465 cqe = &cq->sw_queue[ptr]; in c4iw_count_rcqes()
467 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) in c4iw_count_rcqes()
469 if (++ptr == cq->size) in c4iw_count_rcqes()
488 * -EAGAIN CQE skipped, try again.
489 * -EOVERFLOW CQ overflow detected.
516 ret = -EAGAIN; in poll_cq()
523 if (wq->flushed && !SW_CQE(hw_cqe)) { in poll_cq()
524 ret = -EAGAIN; in poll_cq()
532 ret = -EAGAIN; in poll_cq()
537 * Special cqe for drain WR completions... in poll_cq()
547 * 1) the cqe doesn't contain the sq_wptr from the wr. in poll_cq()
548 * 2) opcode not reflected from the wr. in poll_cq()
549 * 3) read_len not reflected from the wr. in poll_cq()
561 ret = -EAGAIN; in poll_cq()
566 * was generated by the kernel driver as part of peer-2-peer in poll_cq()
567 * connection setup. So ignore the completion. in poll_cq()
572 ret = -EAGAIN; in poll_cq()
579 if (!wq->sq.oldest_read->signaled) { in poll_cq()
581 ret = -EAGAIN; in poll_cq()
613 ret = -EAGAIN; in poll_cq()
616 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { in poll_cq()
618 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); in poll_cq()
629 * now in-order completions into the SW CQ. This handles in poll_cq()
632 * signaled WR is completed. in poll_cq()
635 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { in poll_cq()
641 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in poll_cq()
642 swsqe->cqe = *hw_cqe; in poll_cq()
643 swsqe->complete = 1; in poll_cq()
644 ret = -EAGAIN; in poll_cq()
652 * Reap the associated WR(s) that are freed up with this in poll_cq()
657 BUG_ON(idx >= wq->sq.size); in poll_cq()
667 if (idx < wq->sq.cidx) in poll_cq()
668 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; in poll_cq()
670 wq->sq.in_use -= idx - wq->sq.cidx; in poll_cq()
671 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); in poll_cq()
673 wq->sq.cidx = (uint16_t)idx; in poll_cq()
675 __func__, wq->sq.cidx); in poll_cq()
676 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; in poll_cq()
680 __func__, wq->rq.cidx); in poll_cq()
681 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq()
689 * Flush any completed cqes that are now in-order. in poll_cq()
696 __func__, cq, cq->cqid, cq->sw_cidx); in poll_cq()
700 __func__, cq, cq->cqid, cq->cidx); in poll_cq()
711 * -ENODATA EMPTY;
712 * -EAGAIN caller must try again
713 * any other -errno fatal error
725 ret = t4_next_cqe(&chp->cq, &rd_cqe); in c4iw_poll_cq_one()
730 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); in c4iw_poll_cq_one()
734 spin_lock(&qhp->lock); in c4iw_poll_cq_one()
735 wq = &(qhp->wq); in c4iw_poll_cq_one()
737 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); in c4iw_poll_cq_one()
741 wc->wr_id = cookie; in c4iw_poll_cq_one()
742 wc->qp = &qhp->ibqp; in c4iw_poll_cq_one()
743 wc->vendor_err = CQE_STATUS(&cqe); in c4iw_poll_cq_one()
744 wc->wc_flags = 0; in c4iw_poll_cq_one()
755 wc->byte_len = CQE_LEN(&cqe); in c4iw_poll_cq_one()
757 wc->byte_len = 0; in c4iw_poll_cq_one()
758 wc->opcode = IB_WC_RECV; in c4iw_poll_cq_one()
761 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in c4iw_poll_cq_one()
762 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in c4iw_poll_cq_one()
763 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); in c4iw_poll_cq_one()
768 wc->opcode = IB_WC_RDMA_WRITE; in c4iw_poll_cq_one()
771 wc->opcode = IB_WC_RDMA_READ; in c4iw_poll_cq_one()
772 wc->byte_len = CQE_LEN(&cqe); in c4iw_poll_cq_one()
776 wc->opcode = IB_WC_SEND; in c4iw_poll_cq_one()
777 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in c4iw_poll_cq_one()
781 wc->opcode = IB_WC_SEND; in c4iw_poll_cq_one()
784 wc->opcode = IB_WC_LOCAL_INV; in c4iw_poll_cq_one()
787 wc->opcode = IB_WC_REG_MR; in c4iw_poll_cq_one()
791 c4iw_invalidate_mr(qhp->rhp, in c4iw_poll_cq_one()
795 wc->opcode = IB_WC_SEND; in c4iw_poll_cq_one()
801 ret = -EINVAL; in c4iw_poll_cq_one()
807 wc->status = IB_WC_WR_FLUSH_ERR; in c4iw_poll_cq_one()
812 wc->status = IB_WC_SUCCESS; in c4iw_poll_cq_one()
815 wc->status = IB_WC_LOC_ACCESS_ERR; in c4iw_poll_cq_one()
818 wc->status = IB_WC_LOC_PROT_ERR; in c4iw_poll_cq_one()
822 wc->status = IB_WC_LOC_ACCESS_ERR; in c4iw_poll_cq_one()
825 wc->status = IB_WC_GENERAL_ERR; in c4iw_poll_cq_one()
828 wc->status = IB_WC_LOC_LEN_ERR; in c4iw_poll_cq_one()
832 wc->status = IB_WC_MW_BIND_ERR; in c4iw_poll_cq_one()
848 wc->status = IB_WC_FATAL_ERR; in c4iw_poll_cq_one()
851 wc->status = IB_WC_WR_FLUSH_ERR; in c4iw_poll_cq_one()
856 wc->status = IB_WC_FATAL_ERR; in c4iw_poll_cq_one()
861 spin_unlock(&qhp->lock); in c4iw_poll_cq_one()
874 spin_lock_irqsave(&chp->lock, flags); in c4iw_poll_cq()
878 } while (err == -EAGAIN); in c4iw_poll_cq()
882 spin_unlock_irqrestore(&chp->lock, flags); in c4iw_poll_cq()
883 return !err || err == -ENODATA ? npolled : err; in c4iw_poll_cq()
894 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); in c4iw_destroy_cq()
895 atomic_dec(&chp->refcnt); in c4iw_destroy_cq()
896 wait_event(chp->wait, !atomic_read(&chp->refcnt)); in c4iw_destroy_cq()
900 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_destroy_cq()
901 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); in c4iw_destroy_cq()
907 struct ib_device *ibdev = ibcq->device; in c4iw_create_cq()
908 int entries = attr->cqe; in c4iw_create_cq()
909 int vector = attr->comp_vector; in c4iw_create_cq()
919 if (attr->flags) in c4iw_create_cq()
920 return -EINVAL; in c4iw_create_cq()
941 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); in c4iw_create_cq()
950 memsize = hwentries * sizeof *chp->cq.queue; in c4iw_create_cq()
957 chp->cq.size = hwentries; in c4iw_create_cq()
958 chp->cq.memsize = memsize; in c4iw_create_cq()
959 chp->cq.vector = vector; in c4iw_create_cq()
961 ret = create_cq(&rhp->rdev, &chp->cq, in c4iw_create_cq()
962 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); in c4iw_create_cq()
966 chp->rhp = rhp; in c4iw_create_cq()
967 chp->cq.size--; /* status page */ in c4iw_create_cq()
968 chp->ibcq.cqe = entries - 2; in c4iw_create_cq()
969 spin_lock_init(&chp->lock); in c4iw_create_cq()
970 spin_lock_init(&chp->comp_handler_lock); in c4iw_create_cq()
971 atomic_set(&chp->refcnt, 1); in c4iw_create_cq()
972 init_waitqueue_head(&chp->wait); in c4iw_create_cq()
973 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); in c4iw_create_cq()
978 ret = -ENOMEM; in c4iw_create_cq()
987 uresp.qid_mask = rhp->rdev.cqmask; in c4iw_create_cq()
988 uresp.cqid = chp->cq.cqid; in c4iw_create_cq()
989 uresp.size = chp->cq.size; in c4iw_create_cq()
990 uresp.memsize = chp->cq.memsize; in c4iw_create_cq()
991 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()
992 uresp.key = ucontext->key; in c4iw_create_cq()
993 ucontext->key += PAGE_SIZE; in c4iw_create_cq()
994 uresp.gts_key = ucontext->key; in c4iw_create_cq()
995 ucontext->key += PAGE_SIZE; in c4iw_create_cq()
996 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
998 sizeof(uresp) - sizeof(uresp.reserved)); in c4iw_create_cq()
1002 mm->key = uresp.key; in c4iw_create_cq()
1003 mm->addr = vtophys(chp->cq.queue); in c4iw_create_cq()
1004 mm->len = chp->cq.memsize; in c4iw_create_cq()
1007 mm2->key = uresp.gts_key; in c4iw_create_cq()
1008 mm2->addr = chp->cq.bar2_pa; in c4iw_create_cq()
1009 mm2->len = PAGE_SIZE; in c4iw_create_cq()
1014 __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, in c4iw_create_cq()
1015 (unsigned long long) chp->cq.dma_addr); in c4iw_create_cq()
1022 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); in c4iw_create_cq()
1024 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_create_cq()
1025 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); in c4iw_create_cq()
1032 return -ENOSYS; in c4iw_resize_cq()
1042 if (__predict_false(c4iw_stopped(chp->cq.rdev))) in c4iw_arm_cq()
1043 return -EIO; in c4iw_arm_cq()
1044 spin_lock_irqsave(&chp->lock, flag); in c4iw_arm_cq()
1045 t4_arm_cq(&chp->cq, in c4iw_arm_cq()
1048 ret = t4_cq_notempty(&chp->cq); in c4iw_arm_cq()
1049 spin_unlock_irqrestore(&chp->lock, flag); in c4iw_arm_cq()