Lines Matching refs:c_tx
49 static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
51 struct siw_wqe *wqe = &c_tx->wqe_active;
69 } else if (c_tx->in_syscall) {
117 static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
119 struct siw_wqe *wqe = &c_tx->wqe_active;
126 memcpy(&c_tx->pkt.ctrl,
130 c_tx->pkt.rreq.rsvd = 0;
131 c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
132 c_tx->pkt.rreq.ddp_msn =
133 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]);
134 c_tx->pkt.rreq.ddp_mo = 0;
135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
136 c_tx->pkt.rreq.sink_to =
138 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
139 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
140 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
142 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq);
143 crc = (char *)&c_tx->pkt.rreq_pkt.crc;
148 memcpy(&c_tx->pkt.ctrl,
152 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl,
155 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
156 c_tx->pkt.send.ddp_msn =
157 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
158 c_tx->pkt.send.ddp_mo = 0;
160 c_tx->pkt.send_inv.inval_stag = 0;
162 c_tx->ctrl_len = sizeof(struct iwarp_send);
164 crc = (char *)&c_tx->pkt.send_pkt.crc;
165 data = siw_try_1seg(c_tx, crc);
170 memcpy(&c_tx->pkt.ctrl,
174 memcpy(&c_tx->pkt.ctrl,
178 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
179 c_tx->pkt.send.ddp_msn =
180 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
181 c_tx->pkt.send.ddp_mo = 0;
183 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
185 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
187 crc = (char *)&c_tx->pkt.send_pkt.crc;
188 data = siw_try_1seg(c_tx, crc);
192 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
195 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
196 c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
197 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
199 crc = (char *)&c_tx->pkt.write_pkt.crc;
200 data = siw_try_1seg(c_tx, crc);
204 memcpy(&c_tx->pkt.ctrl,
209 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
210 c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
212 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
214 crc = (char *)&c_tx->pkt.write_pkt.crc;
215 data = siw_try_1seg(c_tx, crc);
219 siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe));
225 c_tx->ctrl_sent = 0;
231 c_tx->pkt.ctrl.mpa_len =
232 htons(c_tx->ctrl_len + data - MPA_HDR_SIZE);
238 c_tx->ctrl_len += data;
240 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
241 c_tx->pkt.c_untagged.ddp_mo = 0;
243 c_tx->pkt.c_tagged.ddp_to =
251 if (c_tx->mpa_crc_enabled)
252 siw_crc_oneshot(&c_tx->pkt, c_tx->ctrl_len, (u8 *)crc);
253 c_tx->ctrl_len += MPA_CRC_SIZE;
257 c_tx->ctrl_len += MPA_CRC_SIZE;
258 c_tx->sge_idx = 0;
259 c_tx->sge_off = 0;
260 c_tx->pbl_idx = 0;
271 if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH &&
273 c_tx->use_sendpage = 1;
275 c_tx->use_sendpage = 0;
294 static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
298 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
299 .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
304 c_tx->ctrl_sent += rv;
306 if (c_tx->ctrl_sent == c_tx->ctrl_len)
438 static noinline_for_stack int siw_tx_hdt(struct siw_iwarp_tx *c_tx,
441 struct siw_wqe *wqe = &c_tx->wqe_active;
442 struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
446 int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
447 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
448 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
449 pbl_idx = c_tx->pbl_idx;
452 if (c_tx->state == SIW_SEND_HDR) {
453 if (c_tx->use_sendpage) {
454 rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE);
458 c_tx->state = SIW_SEND_DATA;
461 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent;
463 c_tx->ctrl_len - c_tx->ctrl_sent;
481 if (is_kva && !c_tx->use_sendpage) {
491 siw_crc_update(&c_tx->mpa_crc,
509 wqe->processed -= c_tx->bytes_unsent;
515 if (!c_tx->use_sendpage) {
525 &c_tx->mpa_crc,
530 siw_crc_update(&c_tx->mpa_crc,
543 siw_crc_update(&c_tx->mpa_crc,
554 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
556 wqe->processed -= c_tx->bytes_unsent;
571 if (likely(c_tx->state != SIW_SEND_TRAILER)) {
572 iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad];
573 iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad);
575 iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent];
576 iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent;
579 if (c_tx->pad) {
580 *(u32 *)c_tx->trailer.pad = 0;
582 siw_crc_update(&c_tx->mpa_crc,
583 (u8 *)&c_tx->trailer.crc - c_tx->pad,
584 c_tx->pad);
586 if (!c_tx->mpa_crc_enabled)
587 c_tx->trailer.crc = 0;
589 siw_crc_final(&c_tx->mpa_crc, (u8 *)&c_tx->trailer.crc);
591 data_len = c_tx->bytes_unsent;
593 if (c_tx->use_sendpage) {
594 rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
595 c_tx->sge_off, data_len);
614 c_tx->ctrl_sent += rv;
625 c_tx->sge_idx = sge_idx;
626 c_tx->sge_off = sge_off;
627 c_tx->pbl_idx = pbl_idx;
634 c_tx->state = SIW_SEND_TRAILER;
635 c_tx->ctrl_len = MAX_TRAILER;
636 c_tx->ctrl_sent = rv + 4 - c_tx->pad;
637 c_tx->bytes_unsent = 0;
643 c_tx->state = SIW_SEND_DATA;
653 c_tx->bytes_unsent -= rv;
654 sge = &wqe->sqe.sge[c_tx->sge_idx];
655 sge_unsent = sge->length - c_tx->sge_off;
659 c_tx->sge_idx++;
660 c_tx->sge_off = 0;
664 c_tx->sge_off += rv;
669 c_tx->do_crc = 0;
674 static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx,
680 if (c_tx->gso_seg_limit == 0)
681 c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs;
683 c_tx->tcp_seglen =
685 min_t(u16, c_tx->gso_seg_limit, tp->gso_segs);
687 c_tx->tcp_seglen = tp->mss_cache;
690 c_tx->tcp_seglen &= 0xfffffff8;
709 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
712 c_tx->ctrl_len =
713 iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len;
714 c_tx->ctrl_sent = 0;
719 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
721 c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed);
723 c_tx->pkt.c_tagged.ddp_to =
727 if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) {
729 data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE);
730 c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST;
731 c_tx->pad = 0;
733 c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST;
734 c_tx->pad = -data_len & 0x3;
736 c_tx->bytes_unsent = data_len;
738 c_tx->pkt.ctrl.mpa_len =
739 htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE);
744 if (c_tx->mpa_crc_enabled) {
745 siw_crc_init(&c_tx->mpa_crc);
746 siw_crc_update(&c_tx->mpa_crc, &c_tx->pkt, c_tx->ctrl_len);
747 c_tx->do_crc = 1;
796 struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
845 siw_update_tcpseg(c_tx, s);
847 rv = siw_qp_prepare_tx(c_tx);
849 c_tx->state = SIW_SEND_HDR;
852 c_tx->state = SIW_SEND_SHORT_FPDU;
867 if (c_tx->state == SIW_SEND_SHORT_FPDU) {
881 rv = siw_tx_ctrl(c_tx, s, msg_flags);
890 rv = siw_tx_hdt(c_tx, s);
897 if (unlikely(c_tx->tx_suspend)) {
906 if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) {
910 c_tx->state = SIW_SEND_HDR;
912 siw_update_tcpseg(c_tx, s);