1d6b92ffaSHans Petter Selasky /*
2d6b92ffaSHans Petter Selasky * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
3d6b92ffaSHans Petter Selasky *
4d6b92ffaSHans Petter Selasky * This software is available to you under a choice of one of two
5d6b92ffaSHans Petter Selasky * licenses. You may choose to be licensed under the terms of the GNU
6d6b92ffaSHans Petter Selasky * General Public License (GPL) Version 2, available from the file
7d6b92ffaSHans Petter Selasky * COPYING in the main directory of this source tree, or the
8d6b92ffaSHans Petter Selasky * OpenIB.org BSD license below:
9d6b92ffaSHans Petter Selasky *
10d6b92ffaSHans Petter Selasky * Redistribution and use in source and binary forms, with or
11d6b92ffaSHans Petter Selasky * without modification, are permitted provided that the following
12d6b92ffaSHans Petter Selasky * conditions are met:
13d6b92ffaSHans Petter Selasky *
14d6b92ffaSHans Petter Selasky * - Redistributions of source code must retain the above
15d6b92ffaSHans Petter Selasky * copyright notice, this list of conditions and the following
16d6b92ffaSHans Petter Selasky * disclaimer.
17d6b92ffaSHans Petter Selasky *
18d6b92ffaSHans Petter Selasky * - Redistributions in binary form must reproduce the above
19d6b92ffaSHans Petter Selasky * copyright notice, this list of conditions and the following
20d6b92ffaSHans Petter Selasky * disclaimer in the documentation and/or other materials
21d6b92ffaSHans Petter Selasky * provided with the distribution.
22d6b92ffaSHans Petter Selasky *
23d6b92ffaSHans Petter Selasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24d6b92ffaSHans Petter Selasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25d6b92ffaSHans Petter Selasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26d6b92ffaSHans Petter Selasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27d6b92ffaSHans Petter Selasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28d6b92ffaSHans Petter Selasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29d6b92ffaSHans Petter Selasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30d6b92ffaSHans Petter Selasky * SOFTWARE.
31d6b92ffaSHans Petter Selasky */
32d6b92ffaSHans Petter Selasky #include <config.h>
33d6b92ffaSHans Petter Selasky
34d6b92ffaSHans Petter Selasky #include <assert.h>
35d6b92ffaSHans Petter Selasky #include <stdlib.h>
36d6b92ffaSHans Petter Selasky #include <pthread.h>
37d6b92ffaSHans Petter Selasky #include <string.h>
38d6b92ffaSHans Petter Selasky #include <stdio.h>
39d6b92ffaSHans Petter Selasky #include "libcxgb4.h"
40d6b92ffaSHans Petter Selasky
41d6b92ffaSHans Petter Selasky #ifdef STATS
42d6b92ffaSHans Petter Selasky struct c4iw_stats c4iw_stats;
43d6b92ffaSHans Petter Selasky #endif
44d6b92ffaSHans Petter Selasky
copy_wr_to_sq(struct t4_wq * wq,union t4_wr * wqe,u8 len16)45d6b92ffaSHans Petter Selasky static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16)
46d6b92ffaSHans Petter Selasky {
47*5c2bacdeSNavdeep Parhar void *src, *dst;
48*5c2bacdeSNavdeep Parhar uintptr_t end;
49*5c2bacdeSNavdeep Parhar int total, len;
50d6b92ffaSHans Petter Selasky
51*5c2bacdeSNavdeep Parhar src = &wqe->flits[0];
52*5c2bacdeSNavdeep Parhar dst = &wq->sq.queue->flits[wq->sq.wq_pidx *
53*5c2bacdeSNavdeep Parhar (T4_EQ_ENTRY_SIZE / sizeof(__be64))];
54d6b92ffaSHans Petter Selasky if (t4_sq_onchip(wq)) {
55d6b92ffaSHans Petter Selasky len16 = align(len16, 4);
56d6b92ffaSHans Petter Selasky
57d6b92ffaSHans Petter Selasky /* In onchip mode the copy below will be made to WC memory and
58d6b92ffaSHans Petter Selasky * could trigger DMA. In offchip mode the copy below only
59d6b92ffaSHans Petter Selasky * queues the WQE, DMA cannot start until t4_ring_sq_db
60d6b92ffaSHans Petter Selasky * happens */
61d6b92ffaSHans Petter Selasky mmio_wc_start();
62d6b92ffaSHans Petter Selasky }
63d6b92ffaSHans Petter Selasky
64d6b92ffaSHans Petter Selasky /* NOTE len16 cannot be large enough to write to the
65d6b92ffaSHans Petter Selasky same sq.queue memory twice in this loop */
66*5c2bacdeSNavdeep Parhar total = len16 * 16;
67*5c2bacdeSNavdeep Parhar end = (uintptr_t)&wq->sq.queue[wq->sq.size];
68*5c2bacdeSNavdeep Parhar if (__predict_true((uintptr_t)dst + total <= end)) {
69*5c2bacdeSNavdeep Parhar /* Won't wrap around. */
70*5c2bacdeSNavdeep Parhar memcpy(dst, src, total);
71*5c2bacdeSNavdeep Parhar } else {
72*5c2bacdeSNavdeep Parhar len = end - (uintptr_t)dst;
73*5c2bacdeSNavdeep Parhar memcpy(dst, src, len);
74*5c2bacdeSNavdeep Parhar memcpy(wq->sq.queue, src + len, total - len);
75d6b92ffaSHans Petter Selasky }
76d6b92ffaSHans Petter Selasky
77d6b92ffaSHans Petter Selasky if (t4_sq_onchip(wq))
78d6b92ffaSHans Petter Selasky mmio_flush_writes();
79d6b92ffaSHans Petter Selasky }
80d6b92ffaSHans Petter Selasky
copy_wr_to_rq(struct t4_wq * wq,union t4_recv_wr * wqe,u8 len16)81d6b92ffaSHans Petter Selasky static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16)
82d6b92ffaSHans Petter Selasky {
83*5c2bacdeSNavdeep Parhar void *src, *dst;
84*5c2bacdeSNavdeep Parhar uintptr_t end;
85*5c2bacdeSNavdeep Parhar int total, len;
86d6b92ffaSHans Petter Selasky
87*5c2bacdeSNavdeep Parhar src = &wqe->flits[0];
88*5c2bacdeSNavdeep Parhar dst = &wq->rq.queue->flits[wq->rq.wq_pidx *
89*5c2bacdeSNavdeep Parhar (T4_EQ_ENTRY_SIZE / sizeof(__be64))];
90*5c2bacdeSNavdeep Parhar
91*5c2bacdeSNavdeep Parhar total = len16 * 16;
92*5c2bacdeSNavdeep Parhar end = (uintptr_t)&wq->rq.queue[wq->rq.size];
93*5c2bacdeSNavdeep Parhar if (__predict_true((uintptr_t)dst + total <= end)) {
94*5c2bacdeSNavdeep Parhar /* Won't wrap around. */
95*5c2bacdeSNavdeep Parhar memcpy(dst, src, total);
96*5c2bacdeSNavdeep Parhar } else {
97*5c2bacdeSNavdeep Parhar len = end - (uintptr_t)dst;
98*5c2bacdeSNavdeep Parhar memcpy(dst, src, len);
99*5c2bacdeSNavdeep Parhar memcpy(wq->rq.queue, src + len, total - len);
100d6b92ffaSHans Petter Selasky }
101d6b92ffaSHans Petter Selasky }
102d6b92ffaSHans Petter Selasky
build_immd(struct t4_sq * sq,struct fw_ri_immd * immdp,struct ibv_send_wr * wr,int max,u32 * plenp)103d6b92ffaSHans Petter Selasky static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
104d6b92ffaSHans Petter Selasky struct ibv_send_wr *wr, int max, u32 *plenp)
105d6b92ffaSHans Petter Selasky {
106d6b92ffaSHans Petter Selasky u8 *dstp, *srcp;
107d6b92ffaSHans Petter Selasky u32 plen = 0;
108d6b92ffaSHans Petter Selasky int i;
109d6b92ffaSHans Petter Selasky int len;
110d6b92ffaSHans Petter Selasky
111d6b92ffaSHans Petter Selasky dstp = (u8 *)immdp->data;
112d6b92ffaSHans Petter Selasky for (i = 0; i < wr->num_sge; i++) {
113d6b92ffaSHans Petter Selasky if ((plen + wr->sg_list[i].length) > max)
114d6b92ffaSHans Petter Selasky return -EMSGSIZE;
115d6b92ffaSHans Petter Selasky srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
116d6b92ffaSHans Petter Selasky plen += wr->sg_list[i].length;
117d6b92ffaSHans Petter Selasky len = wr->sg_list[i].length;
118d6b92ffaSHans Petter Selasky memcpy(dstp, srcp, len);
119d6b92ffaSHans Petter Selasky dstp += len;
120d6b92ffaSHans Petter Selasky srcp += len;
121d6b92ffaSHans Petter Selasky }
122d6b92ffaSHans Petter Selasky len = ROUND_UP(plen + 8, 16) - (plen + 8);
123d6b92ffaSHans Petter Selasky if (len)
124d6b92ffaSHans Petter Selasky memset(dstp, 0, len);
125d6b92ffaSHans Petter Selasky immdp->op = FW_RI_DATA_IMMD;
126d6b92ffaSHans Petter Selasky immdp->r1 = 0;
127d6b92ffaSHans Petter Selasky immdp->r2 = 0;
128d6b92ffaSHans Petter Selasky immdp->immdlen = htobe32(plen);
129d6b92ffaSHans Petter Selasky *plenp = plen;
130d6b92ffaSHans Petter Selasky return 0;
131d6b92ffaSHans Petter Selasky }
132d6b92ffaSHans Petter Selasky
build_isgl(struct fw_ri_isgl * isglp,struct ibv_sge * sg_list,int num_sge,u32 * plenp)133d6b92ffaSHans Petter Selasky static int build_isgl(struct fw_ri_isgl *isglp, struct ibv_sge *sg_list,
134d6b92ffaSHans Petter Selasky int num_sge, u32 *plenp)
135d6b92ffaSHans Petter Selasky {
136d6b92ffaSHans Petter Selasky int i;
137d6b92ffaSHans Petter Selasky u32 plen = 0;
138d6b92ffaSHans Petter Selasky __be64 *flitp = (__be64 *)isglp->sge;
139d6b92ffaSHans Petter Selasky
140d6b92ffaSHans Petter Selasky for (i = 0; i < num_sge; i++) {
141d6b92ffaSHans Petter Selasky if ((plen + sg_list[i].length) < plen)
142d6b92ffaSHans Petter Selasky return -EMSGSIZE;
143d6b92ffaSHans Petter Selasky plen += sg_list[i].length;
144d6b92ffaSHans Petter Selasky *flitp++ = htobe64(((u64)sg_list[i].lkey << 32) |
145d6b92ffaSHans Petter Selasky sg_list[i].length);
146d6b92ffaSHans Petter Selasky *flitp++ = htobe64(sg_list[i].addr);
147d6b92ffaSHans Petter Selasky }
148d6b92ffaSHans Petter Selasky *flitp = 0;
149d6b92ffaSHans Petter Selasky isglp->op = FW_RI_DATA_ISGL;
150d6b92ffaSHans Petter Selasky isglp->r1 = 0;
151d6b92ffaSHans Petter Selasky isglp->nsge = htobe16(num_sge);
152d6b92ffaSHans Petter Selasky isglp->r2 = 0;
153d6b92ffaSHans Petter Selasky if (plenp)
154d6b92ffaSHans Petter Selasky *plenp = plen;
155d6b92ffaSHans Petter Selasky return 0;
156d6b92ffaSHans Petter Selasky }
157d6b92ffaSHans Petter Selasky
build_rdma_send(struct t4_sq * sq,union t4_wr * wqe,struct ibv_send_wr * wr,u8 * len16)158d6b92ffaSHans Petter Selasky static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
159d6b92ffaSHans Petter Selasky struct ibv_send_wr *wr, u8 *len16)
160d6b92ffaSHans Petter Selasky {
161d6b92ffaSHans Petter Selasky u32 plen;
162d6b92ffaSHans Petter Selasky int size;
163d6b92ffaSHans Petter Selasky int ret;
164d6b92ffaSHans Petter Selasky
165d6b92ffaSHans Petter Selasky if (wr->num_sge > T4_MAX_SEND_SGE)
166d6b92ffaSHans Petter Selasky return -EINVAL;
167d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_SOLICITED)
168d6b92ffaSHans Petter Selasky wqe->send.sendop_pkd = htobe32(
169d6b92ffaSHans Petter Selasky FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
170d6b92ffaSHans Petter Selasky else
171d6b92ffaSHans Petter Selasky wqe->send.sendop_pkd = htobe32(
172d6b92ffaSHans Petter Selasky FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
173d6b92ffaSHans Petter Selasky wqe->send.stag_inv = 0;
174d6b92ffaSHans Petter Selasky wqe->send.r3 = 0;
175d6b92ffaSHans Petter Selasky wqe->send.r4 = 0;
176d6b92ffaSHans Petter Selasky
177d6b92ffaSHans Petter Selasky plen = 0;
178d6b92ffaSHans Petter Selasky if (wr->num_sge) {
179d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_INLINE) {
180d6b92ffaSHans Petter Selasky ret = build_immd(sq, wqe->send.u.immd_src, wr,
181d6b92ffaSHans Petter Selasky T4_MAX_SEND_INLINE, &plen);
182d6b92ffaSHans Petter Selasky if (ret)
183d6b92ffaSHans Petter Selasky return ret;
184d6b92ffaSHans Petter Selasky size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
185d6b92ffaSHans Petter Selasky plen;
186d6b92ffaSHans Petter Selasky } else {
187d6b92ffaSHans Petter Selasky ret = build_isgl(wqe->send.u.isgl_src,
188d6b92ffaSHans Petter Selasky wr->sg_list, wr->num_sge, &plen);
189d6b92ffaSHans Petter Selasky if (ret)
190d6b92ffaSHans Petter Selasky return ret;
191d6b92ffaSHans Petter Selasky size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
192d6b92ffaSHans Petter Selasky wr->num_sge * sizeof (struct fw_ri_sge);
193d6b92ffaSHans Petter Selasky }
194d6b92ffaSHans Petter Selasky } else {
195d6b92ffaSHans Petter Selasky wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
196d6b92ffaSHans Petter Selasky wqe->send.u.immd_src[0].r1 = 0;
197d6b92ffaSHans Petter Selasky wqe->send.u.immd_src[0].r2 = 0;
198d6b92ffaSHans Petter Selasky wqe->send.u.immd_src[0].immdlen = 0;
199d6b92ffaSHans Petter Selasky size = sizeof wqe->send + sizeof(struct fw_ri_immd);
200d6b92ffaSHans Petter Selasky plen = 0;
201d6b92ffaSHans Petter Selasky }
202d6b92ffaSHans Petter Selasky *len16 = DIV_ROUND_UP(size, 16);
203d6b92ffaSHans Petter Selasky wqe->send.plen = htobe32(plen);
204d6b92ffaSHans Petter Selasky return 0;
205d6b92ffaSHans Petter Selasky }
206d6b92ffaSHans Petter Selasky
build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,struct ibv_send_wr * wr,u8 * len16)207d6b92ffaSHans Petter Selasky static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
208d6b92ffaSHans Petter Selasky struct ibv_send_wr *wr, u8 *len16)
209d6b92ffaSHans Petter Selasky {
210d6b92ffaSHans Petter Selasky u32 plen;
211d6b92ffaSHans Petter Selasky int size;
212d6b92ffaSHans Petter Selasky int ret;
213d6b92ffaSHans Petter Selasky
214d6b92ffaSHans Petter Selasky if (wr->num_sge > T4_MAX_SEND_SGE)
215d6b92ffaSHans Petter Selasky return -EINVAL;
216d6b92ffaSHans Petter Selasky wqe->write.r2 = 0;
217d6b92ffaSHans Petter Selasky wqe->write.stag_sink = htobe32(wr->wr.rdma.rkey);
218d6b92ffaSHans Petter Selasky wqe->write.to_sink = htobe64(wr->wr.rdma.remote_addr);
219d6b92ffaSHans Petter Selasky if (wr->num_sge) {
220d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_INLINE) {
221d6b92ffaSHans Petter Selasky ret = build_immd(sq, wqe->write.u.immd_src, wr,
222d6b92ffaSHans Petter Selasky T4_MAX_WRITE_INLINE, &plen);
223d6b92ffaSHans Petter Selasky if (ret)
224d6b92ffaSHans Petter Selasky return ret;
225d6b92ffaSHans Petter Selasky size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
226d6b92ffaSHans Petter Selasky plen;
227d6b92ffaSHans Petter Selasky } else {
228d6b92ffaSHans Petter Selasky ret = build_isgl(wqe->write.u.isgl_src,
229d6b92ffaSHans Petter Selasky wr->sg_list, wr->num_sge, &plen);
230d6b92ffaSHans Petter Selasky if (ret)
231d6b92ffaSHans Petter Selasky return ret;
232d6b92ffaSHans Petter Selasky size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
233d6b92ffaSHans Petter Selasky wr->num_sge * sizeof (struct fw_ri_sge);
234d6b92ffaSHans Petter Selasky }
235d6b92ffaSHans Petter Selasky } else {
236d6b92ffaSHans Petter Selasky wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
237d6b92ffaSHans Petter Selasky wqe->write.u.immd_src[0].r1 = 0;
238d6b92ffaSHans Petter Selasky wqe->write.u.immd_src[0].r2 = 0;
239d6b92ffaSHans Petter Selasky wqe->write.u.immd_src[0].immdlen = 0;
240d6b92ffaSHans Petter Selasky size = sizeof wqe->write + sizeof(struct fw_ri_immd);
241d6b92ffaSHans Petter Selasky plen = 0;
242d6b92ffaSHans Petter Selasky }
243d6b92ffaSHans Petter Selasky *len16 = DIV_ROUND_UP(size, 16);
244d6b92ffaSHans Petter Selasky wqe->write.plen = htobe32(plen);
245d6b92ffaSHans Petter Selasky return 0;
246d6b92ffaSHans Petter Selasky }
247d6b92ffaSHans Petter Selasky
build_rdma_read(union t4_wr * wqe,struct ibv_send_wr * wr,u8 * len16)248d6b92ffaSHans Petter Selasky static int build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16)
249d6b92ffaSHans Petter Selasky {
250d6b92ffaSHans Petter Selasky if (wr->num_sge > 1)
251d6b92ffaSHans Petter Selasky return -EINVAL;
252d6b92ffaSHans Petter Selasky if (wr->num_sge) {
253d6b92ffaSHans Petter Selasky wqe->read.stag_src = htobe32(wr->wr.rdma.rkey);
254d6b92ffaSHans Petter Selasky wqe->read.to_src_hi = htobe32((u32)(wr->wr.rdma.remote_addr >>32));
255d6b92ffaSHans Petter Selasky wqe->read.to_src_lo = htobe32((u32)wr->wr.rdma.remote_addr);
256d6b92ffaSHans Petter Selasky wqe->read.stag_sink = htobe32(wr->sg_list[0].lkey);
257d6b92ffaSHans Petter Selasky wqe->read.plen = htobe32(wr->sg_list[0].length);
258d6b92ffaSHans Petter Selasky wqe->read.to_sink_hi = htobe32((u32)(wr->sg_list[0].addr >> 32));
259d6b92ffaSHans Petter Selasky wqe->read.to_sink_lo = htobe32((u32)(wr->sg_list[0].addr));
260d6b92ffaSHans Petter Selasky } else {
261d6b92ffaSHans Petter Selasky wqe->read.stag_src = htobe32(2);
262d6b92ffaSHans Petter Selasky wqe->read.to_src_hi = 0;
263d6b92ffaSHans Petter Selasky wqe->read.to_src_lo = 0;
264d6b92ffaSHans Petter Selasky wqe->read.stag_sink = htobe32(2);
265d6b92ffaSHans Petter Selasky wqe->read.plen = 0;
266d6b92ffaSHans Petter Selasky wqe->read.to_sink_hi = 0;
267d6b92ffaSHans Petter Selasky wqe->read.to_sink_lo = 0;
268d6b92ffaSHans Petter Selasky }
269d6b92ffaSHans Petter Selasky wqe->read.r2 = 0;
270d6b92ffaSHans Petter Selasky wqe->read.r5 = 0;
271d6b92ffaSHans Petter Selasky *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
272d6b92ffaSHans Petter Selasky return 0;
273d6b92ffaSHans Petter Selasky }
274d6b92ffaSHans Petter Selasky
build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,struct ibv_recv_wr * wr,u8 * len16)275d6b92ffaSHans Petter Selasky static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
276d6b92ffaSHans Petter Selasky struct ibv_recv_wr *wr, u8 *len16)
277d6b92ffaSHans Petter Selasky {
278d6b92ffaSHans Petter Selasky int ret;
279d6b92ffaSHans Petter Selasky
280d6b92ffaSHans Petter Selasky ret = build_isgl(&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
281d6b92ffaSHans Petter Selasky if (ret)
282d6b92ffaSHans Petter Selasky return ret;
283d6b92ffaSHans Petter Selasky *len16 = DIV_ROUND_UP(sizeof wqe->recv +
284d6b92ffaSHans Petter Selasky wr->num_sge * sizeof(struct fw_ri_sge), 16);
285d6b92ffaSHans Petter Selasky return 0;
286d6b92ffaSHans Petter Selasky }
287d6b92ffaSHans Petter Selasky
ring_kernel_db(struct c4iw_qp * qhp,u32 qid,u16 idx)288d6b92ffaSHans Petter Selasky static void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx)
289d6b92ffaSHans Petter Selasky {
290d6b92ffaSHans Petter Selasky struct ibv_modify_qp cmd = {};
291d6b92ffaSHans Petter Selasky struct ibv_qp_attr attr;
292d6b92ffaSHans Petter Selasky int mask;
293d6b92ffaSHans Petter Selasky int __attribute__((unused)) ret;
294d6b92ffaSHans Petter Selasky
295d6b92ffaSHans Petter Selasky /* FIXME: Why do we need this barrier if the kernel is going to
296d6b92ffaSHans Petter Selasky trigger the DMA? */
297d6b92ffaSHans Petter Selasky udma_to_device_barrier();
298d6b92ffaSHans Petter Selasky if (qid == qhp->wq.sq.qid) {
299d6b92ffaSHans Petter Selasky attr.sq_psn = idx;
300d6b92ffaSHans Petter Selasky mask = IBV_QP_SQ_PSN;
301d6b92ffaSHans Petter Selasky } else {
302d6b92ffaSHans Petter Selasky attr.rq_psn = idx;
303d6b92ffaSHans Petter Selasky mask = IBV_QP_RQ_PSN;
304d6b92ffaSHans Petter Selasky }
305d6b92ffaSHans Petter Selasky ret = ibv_cmd_modify_qp(&qhp->ibv_qp, &attr, mask, &cmd, sizeof cmd);
306d6b92ffaSHans Petter Selasky assert(!ret);
307d6b92ffaSHans Petter Selasky }
308d6b92ffaSHans Petter Selasky
c4iw_post_send(struct ibv_qp * ibqp,struct ibv_send_wr * wr,struct ibv_send_wr ** bad_wr)309d6b92ffaSHans Petter Selasky int c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
310d6b92ffaSHans Petter Selasky struct ibv_send_wr **bad_wr)
311d6b92ffaSHans Petter Selasky {
312d6b92ffaSHans Petter Selasky int err = 0;
313d6b92ffaSHans Petter Selasky u8 len16 = 0;
314d6b92ffaSHans Petter Selasky enum fw_wr_opcodes fw_opcode;
315d6b92ffaSHans Petter Selasky enum fw_ri_wr_flags fw_flags;
316d6b92ffaSHans Petter Selasky struct c4iw_qp *qhp;
317d6b92ffaSHans Petter Selasky union t4_wr *wqe, lwqe;
318d6b92ffaSHans Petter Selasky u32 num_wrs;
319d6b92ffaSHans Petter Selasky struct t4_swsqe *swsqe;
320d6b92ffaSHans Petter Selasky u16 idx = 0;
321d6b92ffaSHans Petter Selasky
322d6b92ffaSHans Petter Selasky qhp = to_c4iw_qp(ibqp);
323d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
324d6b92ffaSHans Petter Selasky if (t4_wq_in_error(&qhp->wq)) {
325d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
326d6b92ffaSHans Petter Selasky *bad_wr = wr;
327d6b92ffaSHans Petter Selasky return -EINVAL;
328d6b92ffaSHans Petter Selasky }
329d6b92ffaSHans Petter Selasky num_wrs = t4_sq_avail(&qhp->wq);
330d6b92ffaSHans Petter Selasky if (num_wrs == 0) {
331d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
332d6b92ffaSHans Petter Selasky *bad_wr = wr;
333d6b92ffaSHans Petter Selasky return -ENOMEM;
334d6b92ffaSHans Petter Selasky }
335d6b92ffaSHans Petter Selasky while (wr) {
336d6b92ffaSHans Petter Selasky if (num_wrs == 0) {
337d6b92ffaSHans Petter Selasky err = -ENOMEM;
338d6b92ffaSHans Petter Selasky *bad_wr = wr;
339d6b92ffaSHans Petter Selasky break;
340d6b92ffaSHans Petter Selasky }
341d6b92ffaSHans Petter Selasky
342d6b92ffaSHans Petter Selasky wqe = &lwqe;
343d6b92ffaSHans Petter Selasky fw_flags = 0;
344d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_SOLICITED)
345d6b92ffaSHans Petter Selasky fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
346d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_SIGNALED || qhp->sq_sig_all)
347d6b92ffaSHans Petter Selasky fw_flags |= FW_RI_COMPLETION_FLAG;
348d6b92ffaSHans Petter Selasky swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
349d6b92ffaSHans Petter Selasky switch (wr->opcode) {
350d6b92ffaSHans Petter Selasky case IBV_WR_SEND:
351d6b92ffaSHans Petter Selasky INC_STAT(send);
352d6b92ffaSHans Petter Selasky if (wr->send_flags & IBV_SEND_FENCE)
353d6b92ffaSHans Petter Selasky fw_flags |= FW_RI_READ_FENCE_FLAG;
354d6b92ffaSHans Petter Selasky fw_opcode = FW_RI_SEND_WR;
355d6b92ffaSHans Petter Selasky swsqe->opcode = FW_RI_SEND;
356d6b92ffaSHans Petter Selasky err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
357d6b92ffaSHans Petter Selasky break;
358d6b92ffaSHans Petter Selasky case IBV_WR_RDMA_WRITE:
359d6b92ffaSHans Petter Selasky INC_STAT(write);
360d6b92ffaSHans Petter Selasky fw_opcode = FW_RI_RDMA_WRITE_WR;
361d6b92ffaSHans Petter Selasky swsqe->opcode = FW_RI_RDMA_WRITE;
362d6b92ffaSHans Petter Selasky err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
363d6b92ffaSHans Petter Selasky break;
364d6b92ffaSHans Petter Selasky case IBV_WR_RDMA_READ:
365d6b92ffaSHans Petter Selasky INC_STAT(read);
366d6b92ffaSHans Petter Selasky fw_opcode = FW_RI_RDMA_READ_WR;
367d6b92ffaSHans Petter Selasky swsqe->opcode = FW_RI_READ_REQ;
368d6b92ffaSHans Petter Selasky fw_flags = 0;
369d6b92ffaSHans Petter Selasky err = build_rdma_read(wqe, wr, &len16);
370d6b92ffaSHans Petter Selasky if (err)
371d6b92ffaSHans Petter Selasky break;
372d6b92ffaSHans Petter Selasky swsqe->read_len = wr->sg_list ? wr->sg_list[0].length :
373d6b92ffaSHans Petter Selasky 0;
374d6b92ffaSHans Petter Selasky if (!qhp->wq.sq.oldest_read)
375d6b92ffaSHans Petter Selasky qhp->wq.sq.oldest_read = swsqe;
376d6b92ffaSHans Petter Selasky break;
377d6b92ffaSHans Petter Selasky default:
378d6b92ffaSHans Petter Selasky PDBG("%s post of type=%d TBD!\n", __func__,
379d6b92ffaSHans Petter Selasky wr->opcode);
380d6b92ffaSHans Petter Selasky err = -EINVAL;
381d6b92ffaSHans Petter Selasky }
382d6b92ffaSHans Petter Selasky if (err) {
383d6b92ffaSHans Petter Selasky *bad_wr = wr;
384d6b92ffaSHans Petter Selasky break;
385d6b92ffaSHans Petter Selasky }
386d6b92ffaSHans Petter Selasky swsqe->idx = qhp->wq.sq.pidx;
387d6b92ffaSHans Petter Selasky swsqe->complete = 0;
388d6b92ffaSHans Petter Selasky swsqe->signaled = (wr->send_flags & IBV_SEND_SIGNALED) ||
389d6b92ffaSHans Petter Selasky qhp->sq_sig_all;
390d6b92ffaSHans Petter Selasky swsqe->flushed = 0;
391d6b92ffaSHans Petter Selasky swsqe->wr_id = wr->wr_id;
392d6b92ffaSHans Petter Selasky
393d6b92ffaSHans Petter Selasky init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
394d6b92ffaSHans Petter Selasky PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x\n",
395d6b92ffaSHans Petter Selasky __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
396d6b92ffaSHans Petter Selasky swsqe->opcode);
397d6b92ffaSHans Petter Selasky wr = wr->next;
398d6b92ffaSHans Petter Selasky num_wrs--;
399d6b92ffaSHans Petter Selasky copy_wr_to_sq(&qhp->wq, wqe, len16);
400d6b92ffaSHans Petter Selasky t4_sq_produce(&qhp->wq, len16);
401d6b92ffaSHans Petter Selasky idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
402d6b92ffaSHans Petter Selasky }
403d6b92ffaSHans Petter Selasky if (t4_wq_db_enabled(&qhp->wq)) {
404d6b92ffaSHans Petter Selasky t4_ring_sq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp),
405d6b92ffaSHans Petter Selasky len16, wqe);
406d6b92ffaSHans Petter Selasky } else
407d6b92ffaSHans Petter Selasky ring_kernel_db(qhp, qhp->wq.sq.qid, idx);
408d6b92ffaSHans Petter Selasky /* This write is only for debugging, the value does not matter for DMA
409d6b92ffaSHans Petter Selasky */
410d6b92ffaSHans Petter Selasky qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \
411d6b92ffaSHans Petter Selasky (qhp->wq.sq.wq_pidx);
412d6b92ffaSHans Petter Selasky
413d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
414d6b92ffaSHans Petter Selasky return err;
415d6b92ffaSHans Petter Selasky }
416d6b92ffaSHans Petter Selasky
c4iw_post_receive(struct ibv_qp * ibqp,struct ibv_recv_wr * wr,struct ibv_recv_wr ** bad_wr)417d6b92ffaSHans Petter Selasky int c4iw_post_receive(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
418d6b92ffaSHans Petter Selasky struct ibv_recv_wr **bad_wr)
419d6b92ffaSHans Petter Selasky {
420d6b92ffaSHans Petter Selasky int err = 0;
421d6b92ffaSHans Petter Selasky struct c4iw_qp *qhp;
422d6b92ffaSHans Petter Selasky union t4_recv_wr *wqe, lwqe;
423d6b92ffaSHans Petter Selasky u32 num_wrs;
424d6b92ffaSHans Petter Selasky u8 len16 = 0;
425d6b92ffaSHans Petter Selasky u16 idx = 0;
426d6b92ffaSHans Petter Selasky
427d6b92ffaSHans Petter Selasky qhp = to_c4iw_qp(ibqp);
428d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
429d6b92ffaSHans Petter Selasky if (t4_wq_in_error(&qhp->wq)) {
430d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
431d6b92ffaSHans Petter Selasky *bad_wr = wr;
432d6b92ffaSHans Petter Selasky return -EINVAL;
433d6b92ffaSHans Petter Selasky }
434d6b92ffaSHans Petter Selasky INC_STAT(recv);
435d6b92ffaSHans Petter Selasky num_wrs = t4_rq_avail(&qhp->wq);
436d6b92ffaSHans Petter Selasky if (num_wrs == 0) {
437d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
438d6b92ffaSHans Petter Selasky *bad_wr = wr;
439d6b92ffaSHans Petter Selasky return -ENOMEM;
440d6b92ffaSHans Petter Selasky }
441d6b92ffaSHans Petter Selasky while (wr) {
442d6b92ffaSHans Petter Selasky if (wr->num_sge > T4_MAX_RECV_SGE) {
443d6b92ffaSHans Petter Selasky err = -EINVAL;
444d6b92ffaSHans Petter Selasky *bad_wr = wr;
445d6b92ffaSHans Petter Selasky break;
446d6b92ffaSHans Petter Selasky }
447d6b92ffaSHans Petter Selasky wqe = &lwqe;
448d6b92ffaSHans Petter Selasky if (num_wrs)
449d6b92ffaSHans Petter Selasky err = build_rdma_recv(qhp, wqe, wr, &len16);
450d6b92ffaSHans Petter Selasky else
451d6b92ffaSHans Petter Selasky err = -ENOMEM;
452d6b92ffaSHans Petter Selasky if (err) {
453d6b92ffaSHans Petter Selasky *bad_wr = wr;
454d6b92ffaSHans Petter Selasky break;
455d6b92ffaSHans Petter Selasky }
456d6b92ffaSHans Petter Selasky
457d6b92ffaSHans Petter Selasky qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
458d6b92ffaSHans Petter Selasky
459d6b92ffaSHans Petter Selasky wqe->recv.opcode = FW_RI_RECV_WR;
460d6b92ffaSHans Petter Selasky wqe->recv.r1 = 0;
461d6b92ffaSHans Petter Selasky wqe->recv.wrid = qhp->wq.rq.pidx;
462d6b92ffaSHans Petter Selasky wqe->recv.r2[0] = 0;
463d6b92ffaSHans Petter Selasky wqe->recv.r2[1] = 0;
464d6b92ffaSHans Petter Selasky wqe->recv.r2[2] = 0;
465d6b92ffaSHans Petter Selasky wqe->recv.len16 = len16;
466d6b92ffaSHans Petter Selasky PDBG("%s cookie 0x%llx pidx %u\n", __func__,
467d6b92ffaSHans Petter Selasky (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
468d6b92ffaSHans Petter Selasky copy_wr_to_rq(&qhp->wq, wqe, len16);
469d6b92ffaSHans Petter Selasky t4_rq_produce(&qhp->wq, len16);
470d6b92ffaSHans Petter Selasky idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
471d6b92ffaSHans Petter Selasky wr = wr->next;
472d6b92ffaSHans Petter Selasky num_wrs--;
473d6b92ffaSHans Petter Selasky }
474d6b92ffaSHans Petter Selasky if (t4_wq_db_enabled(&qhp->wq))
475d6b92ffaSHans Petter Selasky t4_ring_rq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp),
476d6b92ffaSHans Petter Selasky len16, wqe);
477d6b92ffaSHans Petter Selasky else
478d6b92ffaSHans Petter Selasky ring_kernel_db(qhp, qhp->wq.rq.qid, idx);
479d6b92ffaSHans Petter Selasky qhp->wq.rq.queue[qhp->wq.rq.size].status.host_wq_pidx = \
480d6b92ffaSHans Petter Selasky (qhp->wq.rq.wq_pidx);
481d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
482d6b92ffaSHans Petter Selasky return err;
483d6b92ffaSHans Petter Selasky }
484d6b92ffaSHans Petter Selasky
update_qp_state(struct c4iw_qp * qhp)485d6b92ffaSHans Petter Selasky static void update_qp_state(struct c4iw_qp *qhp)
486d6b92ffaSHans Petter Selasky {
487d6b92ffaSHans Petter Selasky struct ibv_query_qp cmd;
488d6b92ffaSHans Petter Selasky struct ibv_qp_attr attr;
489d6b92ffaSHans Petter Selasky struct ibv_qp_init_attr iattr;
490d6b92ffaSHans Petter Selasky int ret;
491d6b92ffaSHans Petter Selasky
492d6b92ffaSHans Petter Selasky ret = ibv_cmd_query_qp(&qhp->ibv_qp, &attr, IBV_QP_STATE, &iattr,
493d6b92ffaSHans Petter Selasky &cmd, sizeof cmd);
494d6b92ffaSHans Petter Selasky assert(!ret);
495d6b92ffaSHans Petter Selasky if (!ret)
496d6b92ffaSHans Petter Selasky qhp->ibv_qp.state = attr.qp_state;
497d6b92ffaSHans Petter Selasky }
498d6b92ffaSHans Petter Selasky
499d6b92ffaSHans Petter Selasky /*
500d6b92ffaSHans Petter Selasky * Assumes qhp lock is held.
501d6b92ffaSHans Petter Selasky */
c4iw_flush_qp(struct c4iw_qp * qhp)502d6b92ffaSHans Petter Selasky void c4iw_flush_qp(struct c4iw_qp *qhp)
503d6b92ffaSHans Petter Selasky {
504d6b92ffaSHans Petter Selasky struct c4iw_cq *rchp, *schp;
505d6b92ffaSHans Petter Selasky int count;
506d6b92ffaSHans Petter Selasky
507d6b92ffaSHans Petter Selasky if (qhp->wq.flushed)
508d6b92ffaSHans Petter Selasky return;
509d6b92ffaSHans Petter Selasky
510d6b92ffaSHans Petter Selasky update_qp_state(qhp);
511d6b92ffaSHans Petter Selasky
512d6b92ffaSHans Petter Selasky rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq);
513d6b92ffaSHans Petter Selasky schp = to_c4iw_cq(qhp->ibv_qp.send_cq);
514d6b92ffaSHans Petter Selasky
515d6b92ffaSHans Petter Selasky PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
516d6b92ffaSHans Petter Selasky qhp->wq.flushed = 1;
517d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
518d6b92ffaSHans Petter Selasky
519d6b92ffaSHans Petter Selasky /* locking heirarchy: cq lock first, then qp lock. */
520d6b92ffaSHans Petter Selasky pthread_spin_lock(&rchp->lock);
521d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
522d6b92ffaSHans Petter Selasky c4iw_flush_hw_cq(rchp);
523d6b92ffaSHans Petter Selasky c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
524d6b92ffaSHans Petter Selasky c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
525d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
526d6b92ffaSHans Petter Selasky pthread_spin_unlock(&rchp->lock);
527d6b92ffaSHans Petter Selasky
528d6b92ffaSHans Petter Selasky /* locking heirarchy: cq lock first, then qp lock. */
529d6b92ffaSHans Petter Selasky pthread_spin_lock(&schp->lock);
530d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
531d6b92ffaSHans Petter Selasky if (schp != rchp)
532d6b92ffaSHans Petter Selasky c4iw_flush_hw_cq(schp);
533d6b92ffaSHans Petter Selasky c4iw_flush_sq(qhp);
534d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
535d6b92ffaSHans Petter Selasky pthread_spin_unlock(&schp->lock);
536d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
537d6b92ffaSHans Petter Selasky }
538d6b92ffaSHans Petter Selasky
c4iw_flush_qps(struct c4iw_dev * dev)539d6b92ffaSHans Petter Selasky void c4iw_flush_qps(struct c4iw_dev *dev)
540d6b92ffaSHans Petter Selasky {
541d6b92ffaSHans Petter Selasky int i;
542d6b92ffaSHans Petter Selasky
543d6b92ffaSHans Petter Selasky pthread_spin_lock(&dev->lock);
544d6b92ffaSHans Petter Selasky for (i=0; i < dev->max_qp; i++) {
545d6b92ffaSHans Petter Selasky struct c4iw_qp *qhp = dev->qpid2ptr[i];
546d6b92ffaSHans Petter Selasky if (qhp) {
547d6b92ffaSHans Petter Selasky if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) {
548d6b92ffaSHans Petter Selasky pthread_spin_lock(&qhp->lock);
549d6b92ffaSHans Petter Selasky c4iw_flush_qp(qhp);
550d6b92ffaSHans Petter Selasky pthread_spin_unlock(&qhp->lock);
551d6b92ffaSHans Petter Selasky }
552d6b92ffaSHans Petter Selasky }
553d6b92ffaSHans Petter Selasky }
554d6b92ffaSHans Petter Selasky pthread_spin_unlock(&dev->lock);
555d6b92ffaSHans Petter Selasky }
556