Lines Matching +full:no +full:- +full:memory +full:- +full:wc

2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
49 res_wr->op_nres = cpu_to_be32( in destroy_cq()
53 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in destroy_cq()
54 res_wr->cookie = (uintptr_t)wr_waitp; in destroy_cq()
55 res = res_wr->res; in destroy_cq()
56 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq()
57 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq()
58 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq()
63 kfree(cq->sw_queue); in destroy_cq()
64 dma_free_coherent(&(rdev->lldi.pdev->dev), in destroy_cq()
65 cq->memsize, cq->queue, in destroy_cq()
67 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
77 int user = (uctx != &rdev->uctx); in create_cq()
85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
86 if (!cq->cqid) { in create_cq()
87 ret = -ENOMEM; in create_cq()
92 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); in create_cq()
93 if (!cq->sw_queue) { in create_cq()
94 ret = -ENOMEM; in create_cq()
98 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, in create_cq()
99 &cq->dma_addr, GFP_KERNEL); in create_cq()
100 if (!cq->queue) { in create_cq()
101 ret = -ENOMEM; in create_cq()
104 dma_unmap_addr_set(cq, mapping, cq->dma_addr); in create_cq()
106 if (user && ucontext->is_32b_cqe) { in create_cq()
107 cq->qp_errp = &((struct t4_status_page *) in create_cq()
108 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
109 (sizeof(*cq->queue) / 2)))->qp_err; in create_cq()
111 cq->qp_errp = &((struct t4_status_page *) in create_cq()
112 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
113 sizeof(*cq->queue)))->qp_err; in create_cq()
121 ret = -ENOMEM; in create_cq()
127 res_wr->op_nres = cpu_to_be32( in create_cq()
131 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in create_cq()
132 res_wr->cookie = (uintptr_t)wr_waitp; in create_cq()
133 res = res_wr->res; in create_cq()
134 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in create_cq()
135 res->u.cq.op = FW_RI_RES_OP_WRITE; in create_cq()
136 res->u.cq.iqid = cpu_to_be32(cq->cqid); in create_cq()
137 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( in create_cq()
142 rdev->lldi.ciq_ids[cq->vector])); in create_cq()
143 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( in create_cq()
148 ((user && ucontext->is_32b_cqe) ? in create_cq()
151 res->u.cq.iqsize = cpu_to_be16(cq->size); in create_cq()
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); in create_cq()
159 cq->gen = 1; in create_cq()
160 cq->gts = rdev->lldi.gts_reg; in create_cq()
161 cq->rdev = rdev; in create_cq()
163 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, in create_cq()
164 &cq->bar2_qid, in create_cq()
165 user ? &cq->bar2_pa : NULL); in create_cq()
166 if (user && !cq->bar2_pa) { in create_cq()
168 pci_name(rdev->lldi.pdev), cq->cqid); in create_cq()
169 ret = -EINVAL; in create_cq()
174 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, in create_cq()
177 kfree(cq->sw_queue); in create_cq()
179 c4iw_put_cqid(rdev, cq->cqid, uctx); in create_cq()
189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
195 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe()
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
206 int in_use = wq->rq.in_use - count; in c4iw_flush_rq()
209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
210 while (in_use--) { in c4iw_flush_rq()
223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
226 CQE_OPCODE_V(swcqe->opcode) | in insert_sq_cqe()
229 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe()
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
231 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_sq_cqe()
232 cq->sw_queue[cq->sw_pidx] = cqe; in insert_sq_cqe()
241 struct t4_wq *wq = &qhp->wq; in c4iw_flush_sq()
242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq()
243 struct t4_cq *cq = &chp->cq; in c4iw_flush_sq()
247 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq()
248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq()
249 idx = wq->sq.flush_cidx; in c4iw_flush_sq()
250 while (idx != wq->sq.pidx) { in c4iw_flush_sq()
251 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq()
252 swsqe->flushed = 1; in c4iw_flush_sq()
254 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq()
258 if (++idx == wq->sq.size) in c4iw_flush_sq()
261 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
262 if (wq->sq.flush_cidx >= wq->sq.size) in c4iw_flush_sq()
263 wq->sq.flush_cidx -= wq->sq.size; in c4iw_flush_sq()
272 if (wq->sq.flush_cidx == -1) in flush_completed_wrs()
273 wq->sq.flush_cidx = wq->sq.cidx; in flush_completed_wrs()
274 cidx = wq->sq.flush_cidx; in flush_completed_wrs()
276 while (cidx != wq->sq.pidx) { in flush_completed_wrs()
277 swsqe = &wq->sq.sw_sq[cidx]; in flush_completed_wrs()
278 if (!swsqe->signaled) { in flush_completed_wrs()
279 if (++cidx == wq->sq.size) in flush_completed_wrs()
281 } else if (swsqe->complete) { in flush_completed_wrs()
287 cidx, cq->sw_pidx); in flush_completed_wrs()
288 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); in flush_completed_wrs()
289 cq->sw_queue[cq->sw_pidx] = swsqe->cqe; in flush_completed_wrs()
291 swsqe->flushed = 1; in flush_completed_wrs()
292 if (++cidx == wq->sq.size) in flush_completed_wrs()
294 wq->sq.flush_cidx = cidx; in flush_completed_wrs()
303 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; in create_read_req_cqe()
304 read_cqe->len = htonl(wq->sq.oldest_read->read_len); in create_read_req_cqe()
305 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) | in create_read_req_cqe()
309 read_cqe->bits_type_ts = hw_cqe->bits_type_ts; in create_read_req_cqe()
315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; in advance_oldest_read()
317 if (rptr == wq->sq.size) in advance_oldest_read()
319 while (rptr != wq->sq.pidx) { in advance_oldest_read()
320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; in advance_oldest_read()
322 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) in advance_oldest_read()
324 if (++rptr == wq->sq.size) in advance_oldest_read()
327 wq->sq.oldest_read = NULL; in advance_oldest_read()
332 * Deal with out-of-order and/or completions that complete
342 pr_debug("cqid 0x%x\n", chp->cq.cqid); in c4iw_flush_hw_cq()
343 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); in c4iw_flush_hw_cq()
351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq()
354 * drop CQEs with no associated QP in c4iw_flush_hw_cq()
360 spin_lock(&qhp->lock); in c4iw_flush_hw_cq()
362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq()
386 if (!qhp->wq.sq.oldest_read->signaled) { in c4iw_flush_hw_cq()
387 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
393 * in local memory and move it into the swcq. in c4iw_flush_hw_cq()
395 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); in c4iw_flush_hw_cq()
397 advance_oldest_read(&qhp->wq); in c4iw_flush_hw_cq()
401 * unsignaled and now in-order completions into the swcq. in c4iw_flush_hw_cq()
404 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in c4iw_flush_hw_cq()
405 swsqe->cqe = *hw_cqe; in c4iw_flush_hw_cq()
406 swsqe->complete = 1; in c4iw_flush_hw_cq()
407 flush_completed_wrs(&qhp->wq, &chp->cq); in c4iw_flush_hw_cq()
409 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; in c4iw_flush_hw_cq()
411 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); in c4iw_flush_hw_cq()
412 t4_swcq_produce(&chp->cq); in c4iw_flush_hw_cq()
415 t4_hwcq_consume(&chp->cq); in c4iw_flush_hw_cq()
416 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); in c4iw_flush_hw_cq()
418 spin_unlock(&qhp->lock); in c4iw_flush_hw_cq()
425 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); in cqe_completes_wr()
450 ptr = cq->sw_cidx; in c4iw_count_rcqes()
451 while (ptr != cq->sw_pidx) { in c4iw_count_rcqes()
452 cqe = &cq->sw_queue[ptr]; in c4iw_count_rcqes()
454 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) in c4iw_count_rcqes()
456 if (++ptr == cq->size) in c4iw_count_rcqes()
467 while (srq->pending_in_use) { in post_pending_srq_wrs()
468 pwr = &srq->pending_wrs[srq->pending_cidx]; in post_pending_srq_wrs()
469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs()
470 srq->sw_rq[srq->pidx].valid = 1; in post_pending_srq_wrs()
474 srq->cidx, srq->pidx, srq->wq_pidx, in post_pending_srq_wrs()
475 srq->in_use, srq->size, in post_pending_srq_wrs()
476 (unsigned long long)pwr->wr_id); in post_pending_srq_wrs()
478 c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16); in post_pending_srq_wrs()
480 t4_srq_produce(srq, pwr->len16); in post_pending_srq_wrs()
481 idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE); in post_pending_srq_wrs()
485 t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe); in post_pending_srq_wrs()
486 srq->queue[srq->size].status.host_wq_pidx = in post_pending_srq_wrs()
487 srq->wq_pidx; in post_pending_srq_wrs()
493 int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx; in reap_srq_cqe()
496 srq->sw_rq[rel_idx].valid = 0; in reap_srq_cqe()
497 wr_id = srq->sw_rq[rel_idx].wr_id; in reap_srq_cqe()
499 if (rel_idx == srq->cidx) { in reap_srq_cqe()
501 __func__, rel_idx, srq->cidx, srq->pidx, in reap_srq_cqe()
502 srq->wq_pidx, srq->in_use, srq->size, in reap_srq_cqe()
503 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe()
505 while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) { in reap_srq_cqe()
507 __func__, srq->cidx, srq->pidx, in reap_srq_cqe()
508 srq->wq_pidx, srq->in_use, in reap_srq_cqe()
509 srq->size, srq->ooo_count, in reap_srq_cqe()
511 srq->sw_rq[srq->cidx].wr_id); in reap_srq_cqe()
514 if (srq->ooo_count == 0 && srq->pending_in_use) in reap_srq_cqe()
518 __func__, rel_idx, srq->cidx, in reap_srq_cqe()
519 srq->pidx, srq->wq_pidx, in reap_srq_cqe()
520 srq->in_use, srq->size, in reap_srq_cqe()
521 srq->ooo_count, in reap_srq_cqe()
522 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe()
541 * -EAGAIN CQE skipped, try again.
542 * -EOVERFLOW CQ overflow detected.
567 ret = -EAGAIN; in poll_cq()
574 if (wq->flushed && !SW_CQE(hw_cqe)) { in poll_cq()
575 ret = -EAGAIN; in poll_cq()
583 ret = -EAGAIN; in poll_cq()
612 ret = -EAGAIN; in poll_cq()
617 * was generated by the kernel driver as part of peer-2-peer in poll_cq()
623 ret = -EAGAIN; in poll_cq()
630 if (!wq->sq.oldest_read->signaled) { in poll_cq()
632 ret = -EAGAIN; in poll_cq()
638 * in local memory. in poll_cq()
662 CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { in poll_cq()
664 hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); in poll_cq()
674 * now in-order completions into the SW CQ. This handles in poll_cq()
680 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { in poll_cq()
685 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; in poll_cq()
686 swsqe->cqe = *hw_cqe; in poll_cq()
687 swsqe->complete = 1; in poll_cq()
688 ret = -EAGAIN; in poll_cq()
710 if (idx < wq->sq.cidx) in poll_cq()
711 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; in poll_cq()
713 wq->sq.in_use -= idx - wq->sq.cidx; in poll_cq()
715 wq->sq.cidx = (uint16_t)idx; in poll_cq()
716 pr_debug("completing sq idx %u\n", wq->sq.cidx); in poll_cq()
717 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; in poll_cq()
723 pr_debug("completing rq idx %u\n", wq->rq.cidx); in poll_cq()
724 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq()
731 wq->rq.msn++; in poll_cq()
737 * Flush any completed cqes that are now in-order. in poll_cq()
744 cq, cq->cqid, cq->sw_cidx); in poll_cq()
748 cq, cq->cqid, cq->cidx); in poll_cq()
755 struct ib_wc *wc, struct c4iw_srq *srq) in __c4iw_poll_cq_one() argument
758 struct t4_wq *wq = qhp ? &qhp->wq : NULL; in __c4iw_poll_cq_one()
764 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, in __c4iw_poll_cq_one()
765 srq ? &srq->wq : NULL); in __c4iw_poll_cq_one()
769 wc->wr_id = cookie; in __c4iw_poll_cq_one()
770 wc->qp = &qhp->ibqp; in __c4iw_poll_cq_one()
771 wc->vendor_err = CQE_STATUS(&cqe); in __c4iw_poll_cq_one()
772 wc->wc_flags = 0; in __c4iw_poll_cq_one()
777 if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed && in __c4iw_poll_cq_one()
778 srq->wq.in_use < srq->srq_limit) in __c4iw_poll_cq_one()
790 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one()
792 wc->byte_len = 0; in __c4iw_poll_cq_one()
796 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one()
800 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one()
801 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in __c4iw_poll_cq_one()
802 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in __c4iw_poll_cq_one()
803 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); in __c4iw_poll_cq_one()
806 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; in __c4iw_poll_cq_one()
807 wc->ex.imm_data = CQE_IMM_DATA(&cqe); in __c4iw_poll_cq_one()
808 wc->wc_flags |= IB_WC_WITH_IMM; in __c4iw_poll_cq_one()
813 ret = -EINVAL; in __c4iw_poll_cq_one()
820 wc->opcode = IB_WC_RDMA_WRITE; in __c4iw_poll_cq_one()
823 wc->opcode = IB_WC_RDMA_READ; in __c4iw_poll_cq_one()
824 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one()
828 wc->opcode = IB_WC_SEND; in __c4iw_poll_cq_one()
829 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in __c4iw_poll_cq_one()
833 wc->opcode = IB_WC_SEND; in __c4iw_poll_cq_one()
837 wc->opcode = IB_WC_LOCAL_INV; in __c4iw_poll_cq_one()
840 wc->opcode = IB_WC_REG_MR; in __c4iw_poll_cq_one()
844 c4iw_invalidate_mr(qhp->rhp, in __c4iw_poll_cq_one()
850 ret = -EINVAL; in __c4iw_poll_cq_one()
856 wc->status = IB_WC_WR_FLUSH_ERR; in __c4iw_poll_cq_one()
861 wc->status = IB_WC_SUCCESS; in __c4iw_poll_cq_one()
864 wc->status = IB_WC_LOC_ACCESS_ERR; in __c4iw_poll_cq_one()
867 wc->status = IB_WC_LOC_PROT_ERR; in __c4iw_poll_cq_one()
871 wc->status = IB_WC_LOC_ACCESS_ERR; in __c4iw_poll_cq_one()
874 wc->status = IB_WC_GENERAL_ERR; in __c4iw_poll_cq_one()
877 wc->status = IB_WC_LOC_LEN_ERR; in __c4iw_poll_cq_one()
881 wc->status = IB_WC_MW_BIND_ERR; in __c4iw_poll_cq_one()
897 wc->status = IB_WC_FATAL_ERR; in __c4iw_poll_cq_one()
900 wc->status = IB_WC_WR_FLUSH_ERR; in __c4iw_poll_cq_one()
905 wc->status = IB_WC_FATAL_ERR; in __c4iw_poll_cq_one()
917 * -ENODATA EMPTY;
918 * -EAGAIN caller must try again
919 * any other -errno fatal error
921 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) in c4iw_poll_cq_one() argument
928 ret = t4_next_cqe(&chp->cq, &rd_cqe); in c4iw_poll_cq_one()
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); in c4iw_poll_cq_one()
935 spin_lock(&qhp->lock); in c4iw_poll_cq_one()
936 srq = qhp->srq; in c4iw_poll_cq_one()
938 spin_lock(&srq->lock); in c4iw_poll_cq_one()
939 ret = __c4iw_poll_cq_one(chp, qhp, wc, srq); in c4iw_poll_cq_one()
940 spin_unlock(&qhp->lock); in c4iw_poll_cq_one()
942 spin_unlock(&srq->lock); in c4iw_poll_cq_one()
944 ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL); in c4iw_poll_cq_one()
949 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in c4iw_poll_cq() argument
958 spin_lock_irqsave(&chp->lock, flags); in c4iw_poll_cq()
961 err = c4iw_poll_cq_one(chp, wc + npolled); in c4iw_poll_cq()
962 } while (err == -EAGAIN); in c4iw_poll_cq()
966 spin_unlock_irqrestore(&chp->lock, flags); in c4iw_poll_cq()
967 return !err || err == -ENODATA ? npolled : err; in c4iw_poll_cq()
972 if (refcount_dec_and_test(&chp->refcnt)) in c4iw_cq_rem_ref()
973 complete(&chp->cq_rel_comp); in c4iw_cq_rem_ref()
984 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); in c4iw_destroy_cq()
986 wait_for_completion(&chp->cq_rel_comp); in c4iw_destroy_cq()
990 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_destroy_cq()
991 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, in c4iw_destroy_cq()
992 chp->destroy_skb, chp->wr_waitp); in c4iw_destroy_cq()
993 c4iw_put_wr_wait(chp->wr_waitp); in c4iw_destroy_cq()
1000 struct ib_udata *udata = &attrs->driver_udata; in c4iw_create_cq()
1001 struct ib_device *ibdev = ibcq->device; in c4iw_create_cq()
1002 int entries = attr->cqe; in c4iw_create_cq()
1003 int vector = attr->comp_vector; in c4iw_create_cq()
1004 struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device); in c4iw_create_cq()
1015 if (attr->flags) in c4iw_create_cq()
1016 return -EOPNOTSUPP; in c4iw_create_cq()
1018 if (entries < 1 || entries > ibdev->attrs.max_cqe) in c4iw_create_cq()
1019 return -EINVAL; in c4iw_create_cq()
1021 if (vector >= rhp->rdev.lldi.nciq) in c4iw_create_cq()
1022 return -EINVAL; in c4iw_create_cq()
1025 if (udata->inlen < sizeof(ucmd)) in c4iw_create_cq()
1026 ucontext->is_32b_cqe = 1; in c4iw_create_cq()
1029 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_cq()
1030 if (!chp->wr_waitp) { in c4iw_create_cq()
1031 ret = -ENOMEM; in c4iw_create_cq()
1034 c4iw_init_wr_wait(chp->wr_waitp); in c4iw_create_cq()
1037 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); in c4iw_create_cq()
1038 if (!chp->destroy_skb) { in c4iw_create_cq()
1039 ret = -ENOMEM; in c4iw_create_cq()
1057 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); in c4iw_create_cq()
1066 memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ? in c4iw_create_cq()
1067 (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue)); in c4iw_create_cq()
1075 chp->cq.size = hwentries; in c4iw_create_cq()
1076 chp->cq.memsize = memsize; in c4iw_create_cq()
1077 chp->cq.vector = vector; in c4iw_create_cq()
1079 ret = create_cq(&rhp->rdev, &chp->cq, in c4iw_create_cq()
1080 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
1081 chp->wr_waitp); in c4iw_create_cq()
1085 chp->rhp = rhp; in c4iw_create_cq()
1086 chp->cq.size--; /* status page */ in c4iw_create_cq()
1087 chp->ibcq.cqe = entries - 2; in c4iw_create_cq()
1088 spin_lock_init(&chp->lock); in c4iw_create_cq()
1089 spin_lock_init(&chp->comp_handler_lock); in c4iw_create_cq()
1090 refcount_set(&chp->refcnt, 1); in c4iw_create_cq()
1091 init_completion(&chp->cq_rel_comp); in c4iw_create_cq()
1092 ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); in c4iw_create_cq()
1097 ret = -ENOMEM; in c4iw_create_cq()
1106 uresp.qid_mask = rhp->rdev.cqmask; in c4iw_create_cq()
1107 uresp.cqid = chp->cq.cqid; in c4iw_create_cq()
1108 uresp.size = chp->cq.size; in c4iw_create_cq()
1109 uresp.memsize = chp->cq.memsize; in c4iw_create_cq()
1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq()
1111 uresp.key = ucontext->key; in c4iw_create_cq()
1112 ucontext->key += PAGE_SIZE; in c4iw_create_cq()
1113 uresp.gts_key = ucontext->key; in c4iw_create_cq()
1114 ucontext->key += PAGE_SIZE; in c4iw_create_cq()
1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
1122 ucontext->is_32b_cqe ? in c4iw_create_cq()
1123 sizeof(uresp) - sizeof(uresp.flags) : in c4iw_create_cq()
1128 mm->key = uresp.key; in c4iw_create_cq()
1129 mm->addr = 0; in c4iw_create_cq()
1130 mm->vaddr = chp->cq.queue; in c4iw_create_cq()
1131 mm->dma_addr = chp->cq.dma_addr; in c4iw_create_cq()
1132 mm->len = chp->cq.memsize; in c4iw_create_cq()
1133 insert_flag_to_mmap(&rhp->rdev, mm, mm->addr); in c4iw_create_cq()
1136 mm2->key = uresp.gts_key; in c4iw_create_cq()
1137 mm2->addr = chp->cq.bar2_pa; in c4iw_create_cq()
1138 mm2->len = PAGE_SIZE; in c4iw_create_cq()
1139 mm2->vaddr = NULL; in c4iw_create_cq()
1140 mm2->dma_addr = 0; in c4iw_create_cq()
1141 insert_flag_to_mmap(&rhp->rdev, mm2, mm2->addr); in c4iw_create_cq()
1146 chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, in c4iw_create_cq()
1147 &chp->cq.dma_addr); in c4iw_create_cq()
1154 xa_erase_irq(&rhp->cqs, chp->cq.cqid); in c4iw_create_cq()
1156 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_create_cq()
1157 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_cq()
1158 chp->destroy_skb, chp->wr_waitp); in c4iw_create_cq()
1160 kfree_skb(chp->destroy_skb); in c4iw_create_cq()
1162 c4iw_put_wr_wait(chp->wr_waitp); in c4iw_create_cq()
1174 spin_lock_irqsave(&chp->lock, flag); in c4iw_arm_cq()
1175 t4_arm_cq(&chp->cq, in c4iw_arm_cq()
1178 ret = t4_cq_notempty(&chp->cq); in c4iw_arm_cq()
1179 spin_unlock_irqrestore(&chp->lock, flag); in c4iw_arm_cq()
1185 struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in c4iw_flush_srqidx()
1189 spin_lock_irqsave(&rchp->lock, flag); in c4iw_flush_srqidx()
1190 spin_lock(&qhp->lock); in c4iw_flush_srqidx()
1193 insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx); in c4iw_flush_srqidx()
1195 spin_unlock(&qhp->lock); in c4iw_flush_srqidx()
1196 spin_unlock_irqrestore(&rchp->lock, flag); in c4iw_flush_srqidx()