Lines Matching defs:qp
96 struct siw_qp *qp;
105 qp = sk_to_qp(sk);
107 if (likely(!qp->rx_stream.rx_suspend &&
108 down_read_trylock(&qp->state_lock))) {
109 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 };
111 if (likely(qp->attrs.state == SIW_QP_STATE_RTS))
120 up_read(&qp->state_lock);
122 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n",
123 qp->rx_stream.rx_suspend);
129 void siw_qp_llp_close(struct siw_qp *qp)
131 siw_dbg_qp(qp, "enter llp close, state = %s\n",
132 siw_qp_state_to_string[qp->attrs.state]);
134 down_write(&qp->state_lock);
136 qp->rx_stream.rx_suspend = 1;
137 qp->tx_ctx.tx_suspend = 1;
138 qp->attrs.sk = NULL;
140 switch (qp->attrs.state) {
145 qp->attrs.state = SIW_QP_STATE_ERROR;
154 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
155 qp->attrs.state = SIW_QP_STATE_ERROR;
157 qp->attrs.state = SIW_QP_STATE_IDLE;
161 siw_dbg_qp(qp, "llp close: no state transition needed: %s\n",
162 siw_qp_state_to_string[qp->attrs.state]);
165 siw_sq_flush(qp);
166 siw_rq_flush(qp);
171 if (qp->cep) {
172 siw_cep_put(qp->cep);
173 qp->cep = NULL;
176 up_write(&qp->state_lock);
178 siw_dbg_qp(qp, "llp close exit: state %s\n",
179 siw_qp_state_to_string[qp->attrs.state]);
197 (void)siw_sq_start(cep->qp);
203 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
207 qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe));
208 if (!qp->irq) {
209 qp->attrs.irq_size = 0;
215 qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe));
216 if (!qp->orq) {
217 qp->attrs.orq_size = 0;
218 qp->attrs.irq_size = 0;
219 vfree(qp->irq);
223 qp->attrs.irq_size = irq_size;
224 qp->attrs.orq_size = orq_size;
225 siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
236 int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
238 struct siw_wqe *wqe = tx_wqe(qp);
242 spin_lock_irqsave(&qp->sq_lock, flags);
245 spin_unlock_irqrestore(&qp->sq_lock, flags);
271 spin_lock(&qp->orq_lock);
273 if (qp->attrs.orq_size)
274 rreq = orq_get_free(qp);
277 qp->orq_put++;
281 spin_unlock(&qp->orq_lock);
288 spin_unlock_irqrestore(&qp->sq_lock, flags);
291 rv = siw_sq_start(qp);
340 void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype,
343 if (!qp->term_info.valid) {
344 memset(&qp->term_info, 0, sizeof(qp->term_info));
345 qp->term_info.layer = layer;
346 qp->term_info.etype = etype;
347 qp->term_info.ecode = ecode;
348 qp->term_info.in_tx = in_tx;
349 qp->term_info.valid = 1;
351 siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n",
363 void siw_send_terminate(struct siw_qp *qp)
369 struct socket *s = qp->attrs.sk;
370 struct siw_rx_stream *srx = &qp->rx_stream;
375 if (!qp->term_info.valid)
378 qp->term_info.valid = 0;
380 if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) {
381 siw_dbg_qp(qp, "cannot send TERMINATE: op %d in progress\n",
382 tx_type(tx_wqe(qp)));
385 if (!s && qp->cep)
387 s = qp->cep->sock;
390 siw_dbg_qp(qp, "cannot send TERMINATE: not connected\n");
405 if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) ||
406 ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) &&
407 (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) {
417 __rdmap_term_set_layer(term, qp->term_info.layer);
418 __rdmap_term_set_etype(term, qp->term_info.etype);
419 __rdmap_term_set_ecode(term, qp->term_info.ecode);
421 switch (qp->term_info.layer) {
423 if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC)
427 if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) {
436 if (qp->term_info.in_tx) {
438 struct siw_wqe *wqe = tx_wqe(qp);
488 if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) ||
489 (qp->term_info.ecode == RDMAP_ECODE_OPCODE))
516 if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) &&
517 (qp->term_info.ecode == DDP_ECODE_T_VERSION)) ||
518 ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) &&
519 (qp->term_info.ecode == DDP_ECODE_UT_VERSION)))
559 if (qp->tx_ctx.mpa_crc_enabled) {
560 siw_crc_init(&qp->tx_ctx.mpa_crc);
561 siw_crc_update(&qp->tx_ctx.mpa_crc,
564 siw_crc_update(&qp->tx_ctx.mpa_crc,
567 siw_crc_final(&qp->tx_ctx.mpa_crc, (u8 *)&crc);
571 siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n",
582 static void siw_qp_modify_nonstate(struct siw_qp *qp,
588 qp->attrs.flags |= SIW_RDMA_BIND_ENABLED;
590 qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED;
593 qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED;
595 qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED;
598 qp->attrs.flags |= SIW_RDMA_READ_ENABLED;
600 qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED;
604 static int siw_qp_nextstate_from_idle(struct siw_qp *qp,
613 siw_crc_init(&qp->tx_ctx.mpa_crc);
614 qp->tx_ctx.mpa_crc_enabled = true;
615 siw_crc_init(&qp->rx_stream.mpa_crc);
616 qp->rx_stream.mpa_crc_enabled = true;
619 siw_dbg_qp(qp, "no socket\n");
624 siw_dbg_qp(qp, "no MPA\n");
631 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0;
632 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0;
633 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0;
638 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1;
639 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1;
640 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1;
646 rv = siw_qp_readq_init(qp, attrs->irq_size,
651 qp->attrs.sk = attrs->sk;
652 qp->attrs.state = SIW_QP_STATE_RTS;
654 siw_dbg_qp(qp, "enter RTS: crc=%s, ord=%u, ird=%u\n",
656 qp->attrs.orq_size, qp->attrs.irq_size);
660 siw_rq_flush(qp);
661 qp->attrs.state = SIW_QP_STATE_ERROR;
662 if (qp->cep) {
663 siw_cep_put(qp->cep);
664 qp->cep = NULL;
674 static int siw_qp_nextstate_from_rts(struct siw_qp *qp,
689 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) {
690 qp->attrs.state = SIW_QP_STATE_CLOSING;
692 qp->attrs.state = SIW_QP_STATE_ERROR;
693 siw_sq_flush(qp);
695 siw_rq_flush(qp);
701 qp->attrs.state = SIW_QP_STATE_TERMINATE;
703 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
723 siw_sq_flush(qp);
724 siw_rq_flush(qp);
725 qp->attrs.state = SIW_QP_STATE_ERROR;
735 static void siw_qp_nextstate_from_term(struct siw_qp *qp,
740 siw_rq_flush(qp);
741 qp->attrs.state = SIW_QP_STATE_ERROR;
743 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
744 siw_sq_flush(qp);
752 static int siw_qp_nextstate_from_close(struct siw_qp *qp,
759 WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE);
760 qp->attrs.state = SIW_QP_STATE_IDLE;
775 qp->attrs.state = SIW_QP_STATE_ERROR;
777 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE)
778 siw_sq_flush(qp);
780 siw_rq_flush(qp);
784 siw_dbg_qp(qp, "state transition undefined: %s => %s\n",
785 siw_qp_state_to_string[qp->attrs.state],
794 * Caller must hold qp->state_lock
796 int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs,
804 siw_dbg_qp(qp, "state: %s => %s\n",
805 siw_qp_state_to_string[qp->attrs.state],
809 siw_qp_modify_nonstate(qp, attrs, mask);
814 switch (qp->attrs.state) {
817 rv = siw_qp_nextstate_from_idle(qp, attrs, mask);
821 drop_conn = siw_qp_nextstate_from_rts(qp, attrs);
825 siw_qp_nextstate_from_term(qp, attrs);
829 siw_qp_nextstate_from_close(qp, attrs);
835 siw_qp_cm_drop(qp, 0);
852 static int siw_activate_tx_from_sq(struct siw_qp *qp)
855 struct siw_wqe *wqe = tx_wqe(qp);
858 sqe = sq_get_next(qp);
891 siw_dbg_qp(qp, "cannot fence read\n");
895 spin_lock(&qp->orq_lock);
897 if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
898 qp->tx_ctx.orq_fence = 1;
901 spin_unlock(&qp->orq_lock);
907 if (unlikely(!qp->attrs.orq_size)) {
914 spin_lock(&qp->orq_lock);
916 rreq = orq_get_free(qp);
923 qp->orq_put++;
925 qp->tx_ctx.orq_fence = 1;
928 spin_unlock(&qp->orq_lock);
933 qp->sq_get++;
936 siw_dbg_qp(qp, "error %d\n", rv);
945 * the active IRQ will not be served after qp->irq_burst, if the
948 int siw_activate_tx(struct siw_qp *qp)
951 struct siw_wqe *wqe = tx_wqe(qp);
953 if (!qp->attrs.irq_size)
954 return siw_activate_tx_from_sq(qp);
956 irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
959 return siw_activate_tx_from_sq(qp);
965 if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
966 qp->irq_burst = 0;
967 return siw_activate_tx_from_sq(qp);
993 qp->irq_get++;
1031 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
1034 struct siw_cq *cq = qp->scq;
1058 cqe->base_qp = &qp->base_qp;
1060 cqe->qp_id = qp_id(qp);
1089 int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
1092 struct siw_cq *cq = qp->rcq;
1116 cqe->base_qp = &qp->base_qp;
1122 cqe->qp_id = qp_id(qp);
1159 void siw_sq_flush(struct siw_qp *qp)
1162 struct siw_wqe *wqe = tx_wqe(qp);
1168 while (qp->attrs.orq_size) {
1169 sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size];
1173 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1177 qp->orq_get++;
1183 siw_dbg_qp(qp, "flush current SQE, type %d, status %d\n",
1196 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1204 while (qp->attrs.sq_size) {
1205 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
1210 if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1218 qp->sq_get++;
1221 siw_qp_event(qp, IB_EVENT_SQ_DRAINED);
1235 void siw_rq_flush(struct siw_qp *qp)
1237 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active;
1243 siw_dbg_qp(qp, "flush current rqe, type %d, status %d\n",
1249 siw_rqe_complete(qp, &wqe->rqe, wqe->bytes,
1254 siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR);
1258 wqe = &qp->rx_tagged.wqe_active;
1267 while (qp->attrs.rq_size) {
1269 &qp->recvq[qp->rq_get % qp->attrs.rq_size];
1274 if (siw_rqe_complete(qp, rqe, 0, 0, SIW_WC_WR_FLUSH_ERR) != 0)
1278 qp->rq_get++;
1282 int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
1284 int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
1288 kref_init(&qp->ref);
1289 qp->sdev = sdev;
1290 siw_dbg_qp(qp, "new QP\n");
1297 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
1298 struct siw_device *sdev = qp->sdev;
1301 if (qp->cep)
1302 siw_cep_put(qp->cep);
1304 found = xa_erase(&sdev->qp_xa, qp_id(qp));
1305 WARN_ON(found != qp);
1307 list_del(&qp->devq);
1310 vfree(qp->sendq);
1311 vfree(qp->recvq);
1312 vfree(qp->irq);
1313 vfree(qp->orq);
1315 siw_put_tx_cpu(qp->tx_cpu);
1316 complete(&qp->qp_free);