xref: /freebsd/sys/dev/mlx5/mlx5_fpga/mlx5fpga_conn.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1*e9dcd831SSlava Shwartsman /*-
2*e9dcd831SSlava Shwartsman  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3*e9dcd831SSlava Shwartsman  *
4*e9dcd831SSlava Shwartsman  * This software is available to you under a choice of one of two
5*e9dcd831SSlava Shwartsman  * licenses.  You may choose to be licensed under the terms of the GNU
6*e9dcd831SSlava Shwartsman  * General Public License (GPL) Version 2, available from the file
7*e9dcd831SSlava Shwartsman  * COPYING in the main directory of this source tree, or the
8*e9dcd831SSlava Shwartsman  * OpenIB.org BSD license below:
9*e9dcd831SSlava Shwartsman  *
10*e9dcd831SSlava Shwartsman  *     Redistribution and use in source and binary forms, with or
11*e9dcd831SSlava Shwartsman  *     without modification, are permitted provided that the following
12*e9dcd831SSlava Shwartsman  *     conditions are met:
13*e9dcd831SSlava Shwartsman  *
14*e9dcd831SSlava Shwartsman  *      - Redistributions of source code must retain the above
15*e9dcd831SSlava Shwartsman  *        copyright notice, this list of conditions and the following
16*e9dcd831SSlava Shwartsman  *        disclaimer.
17*e9dcd831SSlava Shwartsman  *
18*e9dcd831SSlava Shwartsman  *      - Redistributions in binary form must reproduce the above
19*e9dcd831SSlava Shwartsman  *        copyright notice, this list of conditions and the following
20*e9dcd831SSlava Shwartsman  *        disclaimer in the documentation and/or other materials
21*e9dcd831SSlava Shwartsman  *        provided with the distribution.
22*e9dcd831SSlava Shwartsman  *
23*e9dcd831SSlava Shwartsman  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*e9dcd831SSlava Shwartsman  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*e9dcd831SSlava Shwartsman  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*e9dcd831SSlava Shwartsman  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*e9dcd831SSlava Shwartsman  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*e9dcd831SSlava Shwartsman  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*e9dcd831SSlava Shwartsman  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*e9dcd831SSlava Shwartsman  * SOFTWARE.
31*e9dcd831SSlava Shwartsman  */
32*e9dcd831SSlava Shwartsman 
33*e9dcd831SSlava Shwartsman #include <linux/etherdevice.h>
34*e9dcd831SSlava Shwartsman #include <dev/mlx5/vport.h>
35*e9dcd831SSlava Shwartsman #include <dev/mlx5/mlx5_core/mlx5_core.h>
36*e9dcd831SSlava Shwartsman #include <dev/mlx5/mlx5_lib/mlx5.h>
37*e9dcd831SSlava Shwartsman #include <dev/mlx5/mlx5_fpga/core.h>
38*e9dcd831SSlava Shwartsman #include <dev/mlx5/mlx5_fpga/conn.h>
39*e9dcd831SSlava Shwartsman 
40*e9dcd831SSlava Shwartsman #define MLX5_FPGA_PKEY 0xFFFF
41*e9dcd831SSlava Shwartsman #define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
42*e9dcd831SSlava Shwartsman #define MLX5_FPGA_RECV_SIZE 2048
43*e9dcd831SSlava Shwartsman #define MLX5_FPGA_PORT_NUM 1
44*e9dcd831SSlava Shwartsman #define MLX5_FPGA_CQ_BUDGET 64
45*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn * conn,struct mlx5_fpga_dma_buf * buf)46*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
47*e9dcd831SSlava Shwartsman 				  struct mlx5_fpga_dma_buf *buf)
48*e9dcd831SSlava Shwartsman {
49*e9dcd831SSlava Shwartsman 	struct device *dma_device;
50*e9dcd831SSlava Shwartsman 	int err = 0;
51*e9dcd831SSlava Shwartsman 
52*e9dcd831SSlava Shwartsman 	if (unlikely(!buf->sg[0].data))
53*e9dcd831SSlava Shwartsman 		goto out;
54*e9dcd831SSlava Shwartsman 
55*e9dcd831SSlava Shwartsman 	dma_device = &conn->fdev->mdev->pdev->dev;
56*e9dcd831SSlava Shwartsman 	buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
57*e9dcd831SSlava Shwartsman 					     buf->sg[0].size, buf->dma_dir);
58*e9dcd831SSlava Shwartsman 	err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
59*e9dcd831SSlava Shwartsman 	if (unlikely(err)) {
60*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
61*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
62*e9dcd831SSlava Shwartsman 		goto out;
63*e9dcd831SSlava Shwartsman 	}
64*e9dcd831SSlava Shwartsman 
65*e9dcd831SSlava Shwartsman 	if (!buf->sg[1].data)
66*e9dcd831SSlava Shwartsman 		goto out;
67*e9dcd831SSlava Shwartsman 
68*e9dcd831SSlava Shwartsman 	buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
69*e9dcd831SSlava Shwartsman 					     buf->sg[1].size, buf->dma_dir);
70*e9dcd831SSlava Shwartsman 	err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
71*e9dcd831SSlava Shwartsman 	if (unlikely(err)) {
72*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
73*e9dcd831SSlava Shwartsman 		dma_unmap_single(dma_device, buf->sg[0].dma_addr,
74*e9dcd831SSlava Shwartsman 				 buf->sg[0].size, buf->dma_dir);
75*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
76*e9dcd831SSlava Shwartsman 	}
77*e9dcd831SSlava Shwartsman 
78*e9dcd831SSlava Shwartsman out:
79*e9dcd831SSlava Shwartsman 	return err;
80*e9dcd831SSlava Shwartsman }
81*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn * conn,struct mlx5_fpga_dma_buf * buf)82*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
83*e9dcd831SSlava Shwartsman 				     struct mlx5_fpga_dma_buf *buf)
84*e9dcd831SSlava Shwartsman {
85*e9dcd831SSlava Shwartsman 	struct device *dma_device;
86*e9dcd831SSlava Shwartsman 
87*e9dcd831SSlava Shwartsman 	dma_device = &conn->fdev->mdev->pdev->dev;
88*e9dcd831SSlava Shwartsman 	if (buf->sg[1].data)
89*e9dcd831SSlava Shwartsman 		dma_unmap_single(dma_device, buf->sg[1].dma_addr,
90*e9dcd831SSlava Shwartsman 				 buf->sg[1].size, buf->dma_dir);
91*e9dcd831SSlava Shwartsman 
92*e9dcd831SSlava Shwartsman 	if (likely(buf->sg[0].data))
93*e9dcd831SSlava Shwartsman 		dma_unmap_single(dma_device, buf->sg[0].dma_addr,
94*e9dcd831SSlava Shwartsman 				 buf->sg[0].size, buf->dma_dir);
95*e9dcd831SSlava Shwartsman }
96*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn * conn,struct mlx5_fpga_dma_buf * buf)97*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
98*e9dcd831SSlava Shwartsman 				    struct mlx5_fpga_dma_buf *buf)
99*e9dcd831SSlava Shwartsman {
100*e9dcd831SSlava Shwartsman 	struct mlx5_wqe_data_seg *data;
101*e9dcd831SSlava Shwartsman 	unsigned int ix;
102*e9dcd831SSlava Shwartsman 	int err = 0;
103*e9dcd831SSlava Shwartsman 
104*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_map_buf(conn, buf);
105*e9dcd831SSlava Shwartsman 	if (unlikely(err))
106*e9dcd831SSlava Shwartsman 		goto out;
107*e9dcd831SSlava Shwartsman 
108*e9dcd831SSlava Shwartsman 	if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
109*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_unmap_buf(conn, buf);
110*e9dcd831SSlava Shwartsman 		return -EBUSY;
111*e9dcd831SSlava Shwartsman 	}
112*e9dcd831SSlava Shwartsman 
113*e9dcd831SSlava Shwartsman 	ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
114*e9dcd831SSlava Shwartsman 	data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
115*e9dcd831SSlava Shwartsman 	data->byte_count = cpu_to_be32(buf->sg[0].size);
116*e9dcd831SSlava Shwartsman 	data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
117*e9dcd831SSlava Shwartsman 	data->addr = cpu_to_be64(buf->sg[0].dma_addr);
118*e9dcd831SSlava Shwartsman 
119*e9dcd831SSlava Shwartsman 	conn->qp.rq.pc++;
120*e9dcd831SSlava Shwartsman 	conn->qp.rq.bufs[ix] = buf;
121*e9dcd831SSlava Shwartsman 
122*e9dcd831SSlava Shwartsman 	/* Make sure that descriptors are written before doorbell record. */
123*e9dcd831SSlava Shwartsman 	dma_wmb();
124*e9dcd831SSlava Shwartsman 	*conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
125*e9dcd831SSlava Shwartsman out:
126*e9dcd831SSlava Shwartsman 	return err;
127*e9dcd831SSlava Shwartsman }
128*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn * conn,void * wqe)129*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
130*e9dcd831SSlava Shwartsman {
131*e9dcd831SSlava Shwartsman 	/* ensure wqe is visible to device before updating doorbell record */
132*e9dcd831SSlava Shwartsman 	dma_wmb();
133*e9dcd831SSlava Shwartsman 	*conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
134*e9dcd831SSlava Shwartsman 	/* Make sure that doorbell record is visible before ringing */
135*e9dcd831SSlava Shwartsman 	wmb();
136*e9dcd831SSlava Shwartsman 	mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
137*e9dcd831SSlava Shwartsman }
138*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_post_send(struct mlx5_fpga_conn * conn,struct mlx5_fpga_dma_buf * buf)139*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
140*e9dcd831SSlava Shwartsman 				     struct mlx5_fpga_dma_buf *buf)
141*e9dcd831SSlava Shwartsman {
142*e9dcd831SSlava Shwartsman 	struct mlx5_wqe_ctrl_seg *ctrl;
143*e9dcd831SSlava Shwartsman 	struct mlx5_wqe_data_seg *data;
144*e9dcd831SSlava Shwartsman 	unsigned int ix, sgi;
145*e9dcd831SSlava Shwartsman 	int size = 1;
146*e9dcd831SSlava Shwartsman 
147*e9dcd831SSlava Shwartsman 	ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
148*e9dcd831SSlava Shwartsman 
149*e9dcd831SSlava Shwartsman 	ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
150*e9dcd831SSlava Shwartsman 	data = (void *)(ctrl + 1);
151*e9dcd831SSlava Shwartsman 
152*e9dcd831SSlava Shwartsman 	for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
153*e9dcd831SSlava Shwartsman 		if (!buf->sg[sgi].data)
154*e9dcd831SSlava Shwartsman 			break;
155*e9dcd831SSlava Shwartsman 		data->byte_count = cpu_to_be32(buf->sg[sgi].size);
156*e9dcd831SSlava Shwartsman 		data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
157*e9dcd831SSlava Shwartsman 		data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
158*e9dcd831SSlava Shwartsman 		data++;
159*e9dcd831SSlava Shwartsman 		size++;
160*e9dcd831SSlava Shwartsman 	}
161*e9dcd831SSlava Shwartsman 
162*e9dcd831SSlava Shwartsman 	ctrl->imm = 0;
163*e9dcd831SSlava Shwartsman 	ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
164*e9dcd831SSlava Shwartsman 	ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
165*e9dcd831SSlava Shwartsman 					     MLX5_OPCODE_SEND);
166*e9dcd831SSlava Shwartsman 	ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
167*e9dcd831SSlava Shwartsman 
168*e9dcd831SSlava Shwartsman 	conn->qp.sq.pc++;
169*e9dcd831SSlava Shwartsman 	conn->qp.sq.bufs[ix] = buf;
170*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_notify_hw(conn, ctrl);
171*e9dcd831SSlava Shwartsman }
172*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_send(struct mlx5_fpga_conn * conn,struct mlx5_fpga_dma_buf * buf)173*e9dcd831SSlava Shwartsman int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
174*e9dcd831SSlava Shwartsman 			struct mlx5_fpga_dma_buf *buf)
175*e9dcd831SSlava Shwartsman {
176*e9dcd831SSlava Shwartsman 	unsigned long flags;
177*e9dcd831SSlava Shwartsman 	int err;
178*e9dcd831SSlava Shwartsman 
179*e9dcd831SSlava Shwartsman 	if (!conn->qp.active)
180*e9dcd831SSlava Shwartsman 		return -ENOTCONN;
181*e9dcd831SSlava Shwartsman 
182*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_map_buf(conn, buf);
183*e9dcd831SSlava Shwartsman 	if (err)
184*e9dcd831SSlava Shwartsman 		return err;
185*e9dcd831SSlava Shwartsman 
186*e9dcd831SSlava Shwartsman 	spin_lock_irqsave(&conn->qp.sq.lock, flags);
187*e9dcd831SSlava Shwartsman 
188*e9dcd831SSlava Shwartsman 	if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
189*e9dcd831SSlava Shwartsman 		list_add_tail(&buf->list, &conn->qp.sq.backlog);
190*e9dcd831SSlava Shwartsman 		goto out_unlock;
191*e9dcd831SSlava Shwartsman 	}
192*e9dcd831SSlava Shwartsman 
193*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_post_send(conn, buf);
194*e9dcd831SSlava Shwartsman 
195*e9dcd831SSlava Shwartsman out_unlock:
196*e9dcd831SSlava Shwartsman 	spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
197*e9dcd831SSlava Shwartsman 	return err;
198*e9dcd831SSlava Shwartsman }
199*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn * conn)200*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
201*e9dcd831SSlava Shwartsman {
202*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_dma_buf *buf;
203*e9dcd831SSlava Shwartsman 	int err;
204*e9dcd831SSlava Shwartsman 
205*e9dcd831SSlava Shwartsman 	buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
206*e9dcd831SSlava Shwartsman 	if (!buf)
207*e9dcd831SSlava Shwartsman 		return -ENOMEM;
208*e9dcd831SSlava Shwartsman 
209*e9dcd831SSlava Shwartsman 	buf->sg[0].data = (void *)(buf + 1);
210*e9dcd831SSlava Shwartsman 	buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
211*e9dcd831SSlava Shwartsman 	buf->dma_dir = DMA_FROM_DEVICE;
212*e9dcd831SSlava Shwartsman 
213*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_post_recv(conn, buf);
214*e9dcd831SSlava Shwartsman 	if (err)
215*e9dcd831SSlava Shwartsman 		kfree(buf);
216*e9dcd831SSlava Shwartsman 
217*e9dcd831SSlava Shwartsman 	return err;
218*e9dcd831SSlava Shwartsman }
219*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_create_mkey(struct mlx5_core_dev * mdev,u32 pdn,struct mlx5_core_mkey * mkey)220*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
221*e9dcd831SSlava Shwartsman 				      struct mlx5_core_mkey *mkey)
222*e9dcd831SSlava Shwartsman {
223*e9dcd831SSlava Shwartsman 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
224*e9dcd831SSlava Shwartsman 	void *mkc;
225*e9dcd831SSlava Shwartsman 	u32 *in;
226*e9dcd831SSlava Shwartsman 	int err;
227*e9dcd831SSlava Shwartsman 
228*e9dcd831SSlava Shwartsman 	in = kvzalloc(inlen, GFP_KERNEL);
229*e9dcd831SSlava Shwartsman 	if (!in)
230*e9dcd831SSlava Shwartsman 		return -ENOMEM;
231*e9dcd831SSlava Shwartsman 
232*e9dcd831SSlava Shwartsman 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
233*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
234*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, lw, 1);
235*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, lr, 1);
236*e9dcd831SSlava Shwartsman 
237*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, pd, pdn);
238*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, length64, 1);
239*e9dcd831SSlava Shwartsman 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
240*e9dcd831SSlava Shwartsman 
241*e9dcd831SSlava Shwartsman 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
242*e9dcd831SSlava Shwartsman 
243*e9dcd831SSlava Shwartsman 	kvfree(in);
244*e9dcd831SSlava Shwartsman 	return err;
245*e9dcd831SSlava Shwartsman }
246*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn * conn,struct mlx5_cqe64 * cqe,u8 status)247*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
248*e9dcd831SSlava Shwartsman 				  struct mlx5_cqe64 *cqe, u8 status)
249*e9dcd831SSlava Shwartsman {
250*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_dma_buf *buf;
251*e9dcd831SSlava Shwartsman 	int ix, err;
252*e9dcd831SSlava Shwartsman 
253*e9dcd831SSlava Shwartsman 	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
254*e9dcd831SSlava Shwartsman 	buf = conn->qp.rq.bufs[ix];
255*e9dcd831SSlava Shwartsman 	conn->qp.rq.bufs[ix] = NULL;
256*e9dcd831SSlava Shwartsman 	if (!status)
257*e9dcd831SSlava Shwartsman 		buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
258*e9dcd831SSlava Shwartsman 	conn->qp.rq.cc++;
259*e9dcd831SSlava Shwartsman 
260*e9dcd831SSlava Shwartsman 	if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
261*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
262*e9dcd831SSlava Shwartsman 			       buf, conn->fpga_qpn, status);
263*e9dcd831SSlava Shwartsman 	else
264*e9dcd831SSlava Shwartsman 		mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
265*e9dcd831SSlava Shwartsman 			      buf, conn->fpga_qpn, status);
266*e9dcd831SSlava Shwartsman 
267*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_unmap_buf(conn, buf);
268*e9dcd831SSlava Shwartsman 
269*e9dcd831SSlava Shwartsman 	if (unlikely(status || !conn->qp.active)) {
270*e9dcd831SSlava Shwartsman 		conn->qp.active = false;
271*e9dcd831SSlava Shwartsman 		kfree(buf);
272*e9dcd831SSlava Shwartsman 		return;
273*e9dcd831SSlava Shwartsman 	}
274*e9dcd831SSlava Shwartsman 
275*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
276*e9dcd831SSlava Shwartsman 		      buf->sg[0].size);
277*e9dcd831SSlava Shwartsman 	conn->recv_cb(conn->cb_arg, buf);
278*e9dcd831SSlava Shwartsman 
279*e9dcd831SSlava Shwartsman 	buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
280*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_post_recv(conn, buf);
281*e9dcd831SSlava Shwartsman 	if (unlikely(err)) {
282*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev,
283*e9dcd831SSlava Shwartsman 			       "Failed to re-post recv buf: %d\n", err);
284*e9dcd831SSlava Shwartsman 		kfree(buf);
285*e9dcd831SSlava Shwartsman 	}
286*e9dcd831SSlava Shwartsman }
287*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn * conn,struct mlx5_cqe64 * cqe,u8 status)288*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
289*e9dcd831SSlava Shwartsman 				  struct mlx5_cqe64 *cqe, u8 status)
290*e9dcd831SSlava Shwartsman {
291*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_dma_buf *buf, *nextbuf;
292*e9dcd831SSlava Shwartsman 	unsigned long flags;
293*e9dcd831SSlava Shwartsman 	int ix;
294*e9dcd831SSlava Shwartsman 
295*e9dcd831SSlava Shwartsman 	spin_lock_irqsave(&conn->qp.sq.lock, flags);
296*e9dcd831SSlava Shwartsman 
297*e9dcd831SSlava Shwartsman 	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
298*e9dcd831SSlava Shwartsman 	buf = conn->qp.sq.bufs[ix];
299*e9dcd831SSlava Shwartsman 	conn->qp.sq.bufs[ix] = NULL;
300*e9dcd831SSlava Shwartsman 	conn->qp.sq.cc++;
301*e9dcd831SSlava Shwartsman 
302*e9dcd831SSlava Shwartsman 	/* Handle backlog still under the spinlock to ensure message post order */
303*e9dcd831SSlava Shwartsman 	if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
304*e9dcd831SSlava Shwartsman 		if (likely(conn->qp.active)) {
305*e9dcd831SSlava Shwartsman 			nextbuf = list_first_entry(&conn->qp.sq.backlog,
306*e9dcd831SSlava Shwartsman 						   struct mlx5_fpga_dma_buf, list);
307*e9dcd831SSlava Shwartsman 			list_del(&nextbuf->list);
308*e9dcd831SSlava Shwartsman 			mlx5_fpga_conn_post_send(conn, nextbuf);
309*e9dcd831SSlava Shwartsman 		}
310*e9dcd831SSlava Shwartsman 	}
311*e9dcd831SSlava Shwartsman 
312*e9dcd831SSlava Shwartsman 	spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
313*e9dcd831SSlava Shwartsman 
314*e9dcd831SSlava Shwartsman 	if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
315*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
316*e9dcd831SSlava Shwartsman 			       buf, conn->fpga_qpn, status);
317*e9dcd831SSlava Shwartsman 	else
318*e9dcd831SSlava Shwartsman 		mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
319*e9dcd831SSlava Shwartsman 			      buf, conn->fpga_qpn, status);
320*e9dcd831SSlava Shwartsman 
321*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_unmap_buf(conn, buf);
322*e9dcd831SSlava Shwartsman 
323*e9dcd831SSlava Shwartsman 	if (likely(buf->complete))
324*e9dcd831SSlava Shwartsman 		buf->complete(conn, conn->fdev, buf, status);
325*e9dcd831SSlava Shwartsman 
326*e9dcd831SSlava Shwartsman 	if (unlikely(status))
327*e9dcd831SSlava Shwartsman 		conn->qp.active = false;
328*e9dcd831SSlava Shwartsman }
329*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn * conn,struct mlx5_cqe64 * cqe)330*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
331*e9dcd831SSlava Shwartsman 				      struct mlx5_cqe64 *cqe)
332*e9dcd831SSlava Shwartsman {
333*e9dcd831SSlava Shwartsman 	u8 opcode, status = 0;
334*e9dcd831SSlava Shwartsman 
335*e9dcd831SSlava Shwartsman 	opcode = cqe->op_own >> 4;
336*e9dcd831SSlava Shwartsman 
337*e9dcd831SSlava Shwartsman 	switch (opcode) {
338*e9dcd831SSlava Shwartsman 	case MLX5_CQE_REQ_ERR:
339*e9dcd831SSlava Shwartsman 		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
340*e9dcd831SSlava Shwartsman 		/* Fall through */
341*e9dcd831SSlava Shwartsman 	case MLX5_CQE_REQ:
342*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_sq_cqe(conn, cqe, status);
343*e9dcd831SSlava Shwartsman 		break;
344*e9dcd831SSlava Shwartsman 
345*e9dcd831SSlava Shwartsman 	case MLX5_CQE_RESP_ERR:
346*e9dcd831SSlava Shwartsman 		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
347*e9dcd831SSlava Shwartsman 		/* Fall through */
348*e9dcd831SSlava Shwartsman 	case MLX5_CQE_RESP_SEND:
349*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_rq_cqe(conn, cqe, status);
350*e9dcd831SSlava Shwartsman 		break;
351*e9dcd831SSlava Shwartsman 	default:
352*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
353*e9dcd831SSlava Shwartsman 			       opcode);
354*e9dcd831SSlava Shwartsman 	}
355*e9dcd831SSlava Shwartsman }
356*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn * conn)357*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
358*e9dcd831SSlava Shwartsman {
359*e9dcd831SSlava Shwartsman 	mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
360*e9dcd831SSlava Shwartsman 		    conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
361*e9dcd831SSlava Shwartsman }
362*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_cq_event(struct mlx5_core_cq * mcq,enum mlx5_event event)363*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
364*e9dcd831SSlava Shwartsman 				    enum mlx5_event event)
365*e9dcd831SSlava Shwartsman {
366*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_conn *conn;
367*e9dcd831SSlava Shwartsman 
368*e9dcd831SSlava Shwartsman 	conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
369*e9dcd831SSlava Shwartsman 	mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
370*e9dcd831SSlava Shwartsman }
371*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_event(struct mlx5_core_qp * mqp,int event)372*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
373*e9dcd831SSlava Shwartsman {
374*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_conn *conn;
375*e9dcd831SSlava Shwartsman 
376*e9dcd831SSlava Shwartsman 	conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
377*e9dcd831SSlava Shwartsman 	mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
378*e9dcd831SSlava Shwartsman }
379*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_cqes(struct mlx5_fpga_conn * conn,unsigned int budget)380*e9dcd831SSlava Shwartsman static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
381*e9dcd831SSlava Shwartsman 				       unsigned int budget)
382*e9dcd831SSlava Shwartsman {
383*e9dcd831SSlava Shwartsman 	struct mlx5_cqe64 *cqe;
384*e9dcd831SSlava Shwartsman 
385*e9dcd831SSlava Shwartsman 	while (budget) {
386*e9dcd831SSlava Shwartsman 		cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
387*e9dcd831SSlava Shwartsman 		if (!cqe)
388*e9dcd831SSlava Shwartsman 			break;
389*e9dcd831SSlava Shwartsman 
390*e9dcd831SSlava Shwartsman 		budget--;
391*e9dcd831SSlava Shwartsman 		mlx5_cqwq_pop(&conn->cq.wq);
392*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_handle_cqe(conn, cqe);
393*e9dcd831SSlava Shwartsman 		mlx5_cqwq_update_db_record(&conn->cq.wq);
394*e9dcd831SSlava Shwartsman 	}
395*e9dcd831SSlava Shwartsman 	if (!budget) {
396*e9dcd831SSlava Shwartsman 		tasklet_schedule(&conn->cq.tasklet);
397*e9dcd831SSlava Shwartsman 		return;
398*e9dcd831SSlava Shwartsman 	}
399*e9dcd831SSlava Shwartsman 
400*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
401*e9dcd831SSlava Shwartsman 	/* ensure cq space is freed before enabling more cqes */
402*e9dcd831SSlava Shwartsman 	wmb();
403*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_arm_cq(conn);
404*e9dcd831SSlava Shwartsman }
405*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_cq_tasklet(unsigned long data)406*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
407*e9dcd831SSlava Shwartsman {
408*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_conn *conn = (void *)data;
409*e9dcd831SSlava Shwartsman 
410*e9dcd831SSlava Shwartsman 	if (unlikely(!conn->qp.active))
411*e9dcd831SSlava Shwartsman 		return;
412*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
413*e9dcd831SSlava Shwartsman }
414*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_cq_complete(struct mlx5_core_cq * mcq)415*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
416*e9dcd831SSlava Shwartsman {
417*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_conn *conn;
418*e9dcd831SSlava Shwartsman 
419*e9dcd831SSlava Shwartsman 	conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
420*e9dcd831SSlava Shwartsman 	if (unlikely(!conn->qp.active))
421*e9dcd831SSlava Shwartsman 		return;
422*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
423*e9dcd831SSlava Shwartsman }
424*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn * conn,int cq_size)425*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
426*e9dcd831SSlava Shwartsman {
427*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
428*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
429*e9dcd831SSlava Shwartsman 	u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
430*e9dcd831SSlava Shwartsman 	struct mlx5_wq_param wqp;
431*e9dcd831SSlava Shwartsman 	struct mlx5_cqe64 *cqe;
432*e9dcd831SSlava Shwartsman 	int inlen, err, eqn;
433*e9dcd831SSlava Shwartsman 	unsigned int irqn;
434*e9dcd831SSlava Shwartsman 	void *cqc, *in;
435*e9dcd831SSlava Shwartsman 	__be64 *pas;
436*e9dcd831SSlava Shwartsman 	u32 i;
437*e9dcd831SSlava Shwartsman 
438*e9dcd831SSlava Shwartsman 	cq_size = roundup_pow_of_two(cq_size);
439*e9dcd831SSlava Shwartsman 	MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
440*e9dcd831SSlava Shwartsman 
441*e9dcd831SSlava Shwartsman 	wqp.buf_numa_node = mdev->priv.numa_node;
442*e9dcd831SSlava Shwartsman 	wqp.db_numa_node  = mdev->priv.numa_node;
443*e9dcd831SSlava Shwartsman 
444*e9dcd831SSlava Shwartsman 	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
445*e9dcd831SSlava Shwartsman 			       &conn->cq.wq_ctrl);
446*e9dcd831SSlava Shwartsman 	if (err)
447*e9dcd831SSlava Shwartsman 		return err;
448*e9dcd831SSlava Shwartsman 
449*e9dcd831SSlava Shwartsman 	for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
450*e9dcd831SSlava Shwartsman 		cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
451*e9dcd831SSlava Shwartsman 		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
452*e9dcd831SSlava Shwartsman 	}
453*e9dcd831SSlava Shwartsman 
454*e9dcd831SSlava Shwartsman 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
455*e9dcd831SSlava Shwartsman 		sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
456*e9dcd831SSlava Shwartsman 	in = kvzalloc(inlen, GFP_KERNEL);
457*e9dcd831SSlava Shwartsman 	if (!in) {
458*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
459*e9dcd831SSlava Shwartsman 		goto err_cqwq;
460*e9dcd831SSlava Shwartsman 	}
461*e9dcd831SSlava Shwartsman 
462*e9dcd831SSlava Shwartsman 	err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
463*e9dcd831SSlava Shwartsman 	if (err)
464*e9dcd831SSlava Shwartsman 		goto err_cqwq;
465*e9dcd831SSlava Shwartsman 
466*e9dcd831SSlava Shwartsman 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
467*e9dcd831SSlava Shwartsman 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
468*e9dcd831SSlava Shwartsman 	MLX5_SET(cqc, cqc, c_eqn, eqn);
469*e9dcd831SSlava Shwartsman 	MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
470*e9dcd831SSlava Shwartsman 	MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
471*e9dcd831SSlava Shwartsman 			   MLX5_ADAPTER_PAGE_SHIFT);
472*e9dcd831SSlava Shwartsman 	MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
473*e9dcd831SSlava Shwartsman 
474*e9dcd831SSlava Shwartsman 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
475*e9dcd831SSlava Shwartsman 	mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
476*e9dcd831SSlava Shwartsman 
477*e9dcd831SSlava Shwartsman 	err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
478*e9dcd831SSlava Shwartsman 	kvfree(in);
479*e9dcd831SSlava Shwartsman 
480*e9dcd831SSlava Shwartsman 	if (err)
481*e9dcd831SSlava Shwartsman 		goto err_cqwq;
482*e9dcd831SSlava Shwartsman 
483*e9dcd831SSlava Shwartsman 	conn->cq.mcq.cqe_sz     = 64;
484*e9dcd831SSlava Shwartsman 	conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
485*e9dcd831SSlava Shwartsman 	conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
486*e9dcd831SSlava Shwartsman 	*conn->cq.mcq.set_ci_db = 0;
487*e9dcd831SSlava Shwartsman 	*conn->cq.mcq.arm_db    = 0;
488*e9dcd831SSlava Shwartsman 	conn->cq.mcq.vector     = 0;
489*e9dcd831SSlava Shwartsman 	conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
490*e9dcd831SSlava Shwartsman 	conn->cq.mcq.event      = mlx5_fpga_conn_cq_event;
491*e9dcd831SSlava Shwartsman 	conn->cq.mcq.irqn       = irqn;
492*e9dcd831SSlava Shwartsman 	conn->cq.mcq.uar        = fdev->conn_res.uar;
493*e9dcd831SSlava Shwartsman 	tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
494*e9dcd831SSlava Shwartsman 		     (unsigned long)conn);
495*e9dcd831SSlava Shwartsman 
496*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
497*e9dcd831SSlava Shwartsman 
498*e9dcd831SSlava Shwartsman 	goto out;
499*e9dcd831SSlava Shwartsman 
500*e9dcd831SSlava Shwartsman err_cqwq:
501*e9dcd831SSlava Shwartsman 	mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
502*e9dcd831SSlava Shwartsman out:
503*e9dcd831SSlava Shwartsman 	return err;
504*e9dcd831SSlava Shwartsman }
505*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn * conn)506*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
507*e9dcd831SSlava Shwartsman {
508*e9dcd831SSlava Shwartsman 	tasklet_disable(&conn->cq.tasklet);
509*e9dcd831SSlava Shwartsman 	tasklet_kill(&conn->cq.tasklet);
510*e9dcd831SSlava Shwartsman 	mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
511*e9dcd831SSlava Shwartsman 	mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
512*e9dcd831SSlava Shwartsman }
513*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn * conn,void * qpc)514*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
515*e9dcd831SSlava Shwartsman {
516*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
517*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
518*e9dcd831SSlava Shwartsman 	struct mlx5_wq_param wqp;
519*e9dcd831SSlava Shwartsman 
520*e9dcd831SSlava Shwartsman 	wqp.buf_numa_node = mdev->priv.numa_node;
521*e9dcd831SSlava Shwartsman 	wqp.db_numa_node  = mdev->priv.numa_node;
522*e9dcd831SSlava Shwartsman 
523*e9dcd831SSlava Shwartsman 	return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
524*e9dcd831SSlava Shwartsman 				 &conn->qp.wq_ctrl);
525*e9dcd831SSlava Shwartsman }
526*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn * conn,unsigned int tx_size,unsigned int rx_size)527*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
528*e9dcd831SSlava Shwartsman 				    unsigned int tx_size, unsigned int rx_size)
529*e9dcd831SSlava Shwartsman {
530*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
531*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
532*e9dcd831SSlava Shwartsman 	u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
533*e9dcd831SSlava Shwartsman 	void *in = NULL, *qpc;
534*e9dcd831SSlava Shwartsman 	int err, inlen;
535*e9dcd831SSlava Shwartsman 
536*e9dcd831SSlava Shwartsman 	conn->qp.rq.pc = 0;
537*e9dcd831SSlava Shwartsman 	conn->qp.rq.cc = 0;
538*e9dcd831SSlava Shwartsman 	conn->qp.rq.size = roundup_pow_of_two(rx_size);
539*e9dcd831SSlava Shwartsman 	conn->qp.sq.pc = 0;
540*e9dcd831SSlava Shwartsman 	conn->qp.sq.cc = 0;
541*e9dcd831SSlava Shwartsman 	conn->qp.sq.size = roundup_pow_of_two(tx_size);
542*e9dcd831SSlava Shwartsman 
543*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
544*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
545*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
546*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
547*e9dcd831SSlava Shwartsman 	if (err)
548*e9dcd831SSlava Shwartsman 		goto out;
549*e9dcd831SSlava Shwartsman 
550*e9dcd831SSlava Shwartsman 	conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
551*e9dcd831SSlava Shwartsman 				    conn->qp.rq.size, GFP_KERNEL);
552*e9dcd831SSlava Shwartsman 	if (!conn->qp.rq.bufs) {
553*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
554*e9dcd831SSlava Shwartsman 		goto err_wq;
555*e9dcd831SSlava Shwartsman 	}
556*e9dcd831SSlava Shwartsman 
557*e9dcd831SSlava Shwartsman 	conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
558*e9dcd831SSlava Shwartsman 				    conn->qp.sq.size, GFP_KERNEL);
559*e9dcd831SSlava Shwartsman 	if (!conn->qp.sq.bufs) {
560*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
561*e9dcd831SSlava Shwartsman 		goto err_rq_bufs;
562*e9dcd831SSlava Shwartsman 	}
563*e9dcd831SSlava Shwartsman 
564*e9dcd831SSlava Shwartsman 	inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
565*e9dcd831SSlava Shwartsman 		MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
566*e9dcd831SSlava Shwartsman 		conn->qp.wq_ctrl.buf.npages;
567*e9dcd831SSlava Shwartsman 	in = kvzalloc(inlen, GFP_KERNEL);
568*e9dcd831SSlava Shwartsman 	if (!in) {
569*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
570*e9dcd831SSlava Shwartsman 		goto err_sq_bufs;
571*e9dcd831SSlava Shwartsman 	}
572*e9dcd831SSlava Shwartsman 
573*e9dcd831SSlava Shwartsman 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
574*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
575*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_page_size,
576*e9dcd831SSlava Shwartsman 		 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
577*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, fre, 1);
578*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, rlky, 1);
579*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
580*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
581*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
582*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
583*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
584*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
585*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
586*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
587*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
588*e9dcd831SSlava Shwartsman 	MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
589*e9dcd831SSlava Shwartsman 	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
590*e9dcd831SSlava Shwartsman 		MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
591*e9dcd831SSlava Shwartsman 
592*e9dcd831SSlava Shwartsman 	mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
593*e9dcd831SSlava Shwartsman 			     (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
594*e9dcd831SSlava Shwartsman 
595*e9dcd831SSlava Shwartsman 	err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
596*e9dcd831SSlava Shwartsman 	if (err)
597*e9dcd831SSlava Shwartsman 		goto err_sq_bufs;
598*e9dcd831SSlava Shwartsman 
599*e9dcd831SSlava Shwartsman 	conn->qp.mqp.event = mlx5_fpga_conn_event;
600*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
601*e9dcd831SSlava Shwartsman 
602*e9dcd831SSlava Shwartsman 	goto out;
603*e9dcd831SSlava Shwartsman 
604*e9dcd831SSlava Shwartsman err_sq_bufs:
605*e9dcd831SSlava Shwartsman 	kvfree(conn->qp.sq.bufs);
606*e9dcd831SSlava Shwartsman err_rq_bufs:
607*e9dcd831SSlava Shwartsman 	kvfree(conn->qp.rq.bufs);
608*e9dcd831SSlava Shwartsman err_wq:
609*e9dcd831SSlava Shwartsman 	mlx5_wq_destroy(&conn->qp.wq_ctrl);
610*e9dcd831SSlava Shwartsman out:
611*e9dcd831SSlava Shwartsman 	kvfree(in);
612*e9dcd831SSlava Shwartsman 	return err;
613*e9dcd831SSlava Shwartsman }
614*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn * conn)615*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
616*e9dcd831SSlava Shwartsman {
617*e9dcd831SSlava Shwartsman 	int ix;
618*e9dcd831SSlava Shwartsman 
619*e9dcd831SSlava Shwartsman 	for (ix = 0; ix < conn->qp.rq.size; ix++) {
620*e9dcd831SSlava Shwartsman 		if (!conn->qp.rq.bufs[ix])
621*e9dcd831SSlava Shwartsman 			continue;
622*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
623*e9dcd831SSlava Shwartsman 		kfree(conn->qp.rq.bufs[ix]);
624*e9dcd831SSlava Shwartsman 		conn->qp.rq.bufs[ix] = NULL;
625*e9dcd831SSlava Shwartsman 	}
626*e9dcd831SSlava Shwartsman }
627*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn * conn)628*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
629*e9dcd831SSlava Shwartsman {
630*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_dma_buf *buf, *temp;
631*e9dcd831SSlava Shwartsman 	int ix;
632*e9dcd831SSlava Shwartsman 
633*e9dcd831SSlava Shwartsman 	for (ix = 0; ix < conn->qp.sq.size; ix++) {
634*e9dcd831SSlava Shwartsman 		buf = conn->qp.sq.bufs[ix];
635*e9dcd831SSlava Shwartsman 		if (!buf)
636*e9dcd831SSlava Shwartsman 			continue;
637*e9dcd831SSlava Shwartsman 		conn->qp.sq.bufs[ix] = NULL;
638*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_unmap_buf(conn, buf);
639*e9dcd831SSlava Shwartsman 		if (!buf->complete)
640*e9dcd831SSlava Shwartsman 			continue;
641*e9dcd831SSlava Shwartsman 		buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
642*e9dcd831SSlava Shwartsman 	}
643*e9dcd831SSlava Shwartsman 	list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
644*e9dcd831SSlava Shwartsman 		mlx5_fpga_conn_unmap_buf(conn, buf);
645*e9dcd831SSlava Shwartsman 		if (!buf->complete)
646*e9dcd831SSlava Shwartsman 			continue;
647*e9dcd831SSlava Shwartsman 		buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
648*e9dcd831SSlava Shwartsman 	}
649*e9dcd831SSlava Shwartsman }
650*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn * conn)651*e9dcd831SSlava Shwartsman static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
652*e9dcd831SSlava Shwartsman {
653*e9dcd831SSlava Shwartsman 	mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
654*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_free_recv_bufs(conn);
655*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_flush_send_bufs(conn);
656*e9dcd831SSlava Shwartsman 	kvfree(conn->qp.sq.bufs);
657*e9dcd831SSlava Shwartsman 	kvfree(conn->qp.rq.bufs);
658*e9dcd831SSlava Shwartsman 	mlx5_wq_destroy(&conn->qp.wq_ctrl);
659*e9dcd831SSlava Shwartsman }
660*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn * conn)661*e9dcd831SSlava Shwartsman static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
662*e9dcd831SSlava Shwartsman {
663*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = conn->fdev->mdev;
664*e9dcd831SSlava Shwartsman 
665*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
666*e9dcd831SSlava Shwartsman 
667*e9dcd831SSlava Shwartsman 	return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
668*e9dcd831SSlava Shwartsman 				   &conn->qp.mqp);
669*e9dcd831SSlava Shwartsman }
670*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn * conn)671*e9dcd831SSlava Shwartsman static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
672*e9dcd831SSlava Shwartsman {
673*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
674*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
675*e9dcd831SSlava Shwartsman 	u32 *qpc = NULL;
676*e9dcd831SSlava Shwartsman 	int err;
677*e9dcd831SSlava Shwartsman 
678*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
679*e9dcd831SSlava Shwartsman 
680*e9dcd831SSlava Shwartsman 	qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
681*e9dcd831SSlava Shwartsman 	if (!qpc) {
682*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
683*e9dcd831SSlava Shwartsman 		goto out;
684*e9dcd831SSlava Shwartsman 	}
685*e9dcd831SSlava Shwartsman 
686*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
687*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
688*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
689*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
690*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
691*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
692*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
693*e9dcd831SSlava Shwartsman 	MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
694*e9dcd831SSlava Shwartsman 
695*e9dcd831SSlava Shwartsman 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
696*e9dcd831SSlava Shwartsman 				  &conn->qp.mqp);
697*e9dcd831SSlava Shwartsman 	if (err) {
698*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
699*e9dcd831SSlava Shwartsman 		goto out;
700*e9dcd831SSlava Shwartsman 	}
701*e9dcd831SSlava Shwartsman 
702*e9dcd831SSlava Shwartsman out:
703*e9dcd831SSlava Shwartsman 	kfree(qpc);
704*e9dcd831SSlava Shwartsman 	return err;
705*e9dcd831SSlava Shwartsman }
706*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn * conn)707*e9dcd831SSlava Shwartsman static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
708*e9dcd831SSlava Shwartsman {
709*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
710*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
711*e9dcd831SSlava Shwartsman 	u32 *qpc = NULL;
712*e9dcd831SSlava Shwartsman 	int err;
713*e9dcd831SSlava Shwartsman 
714*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
715*e9dcd831SSlava Shwartsman 
716*e9dcd831SSlava Shwartsman 	qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
717*e9dcd831SSlava Shwartsman 	if (!qpc) {
718*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
719*e9dcd831SSlava Shwartsman 		goto out;
720*e9dcd831SSlava Shwartsman 	}
721*e9dcd831SSlava Shwartsman 
722*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
723*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
724*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
725*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, next_rcv_psn,
726*e9dcd831SSlava Shwartsman 		 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
727*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
728*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
729*e9dcd831SSlava Shwartsman 	ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
730*e9dcd831SSlava Shwartsman 			MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
731*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
732*e9dcd831SSlava Shwartsman 		 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
733*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
734*e9dcd831SSlava Shwartsman 		 conn->qp.sgid_index);
735*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
736*e9dcd831SSlava Shwartsman 	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
737*e9dcd831SSlava Shwartsman 	       MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
738*e9dcd831SSlava Shwartsman 	       MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
739*e9dcd831SSlava Shwartsman 
740*e9dcd831SSlava Shwartsman 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
741*e9dcd831SSlava Shwartsman 				  &conn->qp.mqp);
742*e9dcd831SSlava Shwartsman 	if (err) {
743*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
744*e9dcd831SSlava Shwartsman 		goto out;
745*e9dcd831SSlava Shwartsman 	}
746*e9dcd831SSlava Shwartsman 
747*e9dcd831SSlava Shwartsman out:
748*e9dcd831SSlava Shwartsman 	kfree(qpc);
749*e9dcd831SSlava Shwartsman 	return err;
750*e9dcd831SSlava Shwartsman }
751*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn * conn)752*e9dcd831SSlava Shwartsman static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
753*e9dcd831SSlava Shwartsman {
754*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
755*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
756*e9dcd831SSlava Shwartsman 	u32 *qpc = NULL;
757*e9dcd831SSlava Shwartsman 	u32 opt_mask;
758*e9dcd831SSlava Shwartsman 	int err;
759*e9dcd831SSlava Shwartsman 
760*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
761*e9dcd831SSlava Shwartsman 
762*e9dcd831SSlava Shwartsman 	qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
763*e9dcd831SSlava Shwartsman 	if (!qpc) {
764*e9dcd831SSlava Shwartsman 		err = -ENOMEM;
765*e9dcd831SSlava Shwartsman 		goto out;
766*e9dcd831SSlava Shwartsman 	}
767*e9dcd831SSlava Shwartsman 
768*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
769*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
770*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
771*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, next_send_psn,
772*e9dcd831SSlava Shwartsman 		 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
773*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, retry_count, 7);
774*e9dcd831SSlava Shwartsman 	MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
775*e9dcd831SSlava Shwartsman 
776*e9dcd831SSlava Shwartsman 	opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
777*e9dcd831SSlava Shwartsman 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
778*e9dcd831SSlava Shwartsman 				  &conn->qp.mqp);
779*e9dcd831SSlava Shwartsman 	if (err) {
780*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
781*e9dcd831SSlava Shwartsman 		goto out;
782*e9dcd831SSlava Shwartsman 	}
783*e9dcd831SSlava Shwartsman 
784*e9dcd831SSlava Shwartsman out:
785*e9dcd831SSlava Shwartsman 	kfree(qpc);
786*e9dcd831SSlava Shwartsman 	return err;
787*e9dcd831SSlava Shwartsman }
788*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_connect(struct mlx5_fpga_conn * conn)789*e9dcd831SSlava Shwartsman static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
790*e9dcd831SSlava Shwartsman {
791*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
792*e9dcd831SSlava Shwartsman 	int err;
793*e9dcd831SSlava Shwartsman 
794*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
795*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
796*e9dcd831SSlava Shwartsman 				  MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
797*e9dcd831SSlava Shwartsman 	if (err) {
798*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
799*e9dcd831SSlava Shwartsman 		goto out;
800*e9dcd831SSlava Shwartsman 	}
801*e9dcd831SSlava Shwartsman 
802*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_reset_qp(conn);
803*e9dcd831SSlava Shwartsman 	if (err) {
804*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
805*e9dcd831SSlava Shwartsman 		goto err_fpga_qp;
806*e9dcd831SSlava Shwartsman 	}
807*e9dcd831SSlava Shwartsman 
808*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_init_qp(conn);
809*e9dcd831SSlava Shwartsman 	if (err) {
810*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
811*e9dcd831SSlava Shwartsman 		goto err_fpga_qp;
812*e9dcd831SSlava Shwartsman 	}
813*e9dcd831SSlava Shwartsman 	conn->qp.active = true;
814*e9dcd831SSlava Shwartsman 
815*e9dcd831SSlava Shwartsman 	while (!mlx5_fpga_conn_post_recv_buf(conn))
816*e9dcd831SSlava Shwartsman 		;
817*e9dcd831SSlava Shwartsman 
818*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_rtr_qp(conn);
819*e9dcd831SSlava Shwartsman 	if (err) {
820*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
821*e9dcd831SSlava Shwartsman 		goto err_recv_bufs;
822*e9dcd831SSlava Shwartsman 	}
823*e9dcd831SSlava Shwartsman 
824*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_rts_qp(conn);
825*e9dcd831SSlava Shwartsman 	if (err) {
826*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
827*e9dcd831SSlava Shwartsman 		goto err_recv_bufs;
828*e9dcd831SSlava Shwartsman 	}
829*e9dcd831SSlava Shwartsman 	goto out;
830*e9dcd831SSlava Shwartsman 
831*e9dcd831SSlava Shwartsman err_recv_bufs:
832*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_free_recv_bufs(conn);
833*e9dcd831SSlava Shwartsman err_fpga_qp:
834*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
835*e9dcd831SSlava Shwartsman 	if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
836*e9dcd831SSlava Shwartsman 				MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
837*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
838*e9dcd831SSlava Shwartsman out:
839*e9dcd831SSlava Shwartsman 	return err;
840*e9dcd831SSlava Shwartsman }
841*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_create(struct mlx5_fpga_device * fdev,struct mlx5_fpga_conn_attr * attr,enum mlx5_ifc_fpga_qp_type qp_type)842*e9dcd831SSlava Shwartsman struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
843*e9dcd831SSlava Shwartsman 					     struct mlx5_fpga_conn_attr *attr,
844*e9dcd831SSlava Shwartsman 					     enum mlx5_ifc_fpga_qp_type qp_type)
845*e9dcd831SSlava Shwartsman {
846*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_conn *ret, *conn;
847*e9dcd831SSlava Shwartsman 	u8 *remote_mac, *remote_ip;
848*e9dcd831SSlava Shwartsman 	int err;
849*e9dcd831SSlava Shwartsman 
850*e9dcd831SSlava Shwartsman 	if (!attr->recv_cb)
851*e9dcd831SSlava Shwartsman 		return ERR_PTR(-EINVAL);
852*e9dcd831SSlava Shwartsman 
853*e9dcd831SSlava Shwartsman 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
854*e9dcd831SSlava Shwartsman 	if (!conn)
855*e9dcd831SSlava Shwartsman 		return ERR_PTR(-ENOMEM);
856*e9dcd831SSlava Shwartsman 
857*e9dcd831SSlava Shwartsman 	conn->fdev = fdev;
858*e9dcd831SSlava Shwartsman 	INIT_LIST_HEAD(&conn->qp.sq.backlog);
859*e9dcd831SSlava Shwartsman 
860*e9dcd831SSlava Shwartsman 	spin_lock_init(&conn->qp.sq.lock);
861*e9dcd831SSlava Shwartsman 
862*e9dcd831SSlava Shwartsman 	conn->recv_cb = attr->recv_cb;
863*e9dcd831SSlava Shwartsman 	conn->cb_arg = attr->cb_arg;
864*e9dcd831SSlava Shwartsman 
865*e9dcd831SSlava Shwartsman 	remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
866*e9dcd831SSlava Shwartsman 	err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
867*e9dcd831SSlava Shwartsman 	if (err) {
868*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
869*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
870*e9dcd831SSlava Shwartsman 		goto err;
871*e9dcd831SSlava Shwartsman 	}
872*e9dcd831SSlava Shwartsman 
873*e9dcd831SSlava Shwartsman 	/* Build Modified EUI-64 IPv6 address from the MAC address */
874*e9dcd831SSlava Shwartsman 	remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
875*e9dcd831SSlava Shwartsman 	remote_ip[0] = 0xfe;
876*e9dcd831SSlava Shwartsman 	remote_ip[1] = 0x80;
877*e9dcd831SSlava Shwartsman 	addrconf_addr_eui48(&remote_ip[8], remote_mac);
878*e9dcd831SSlava Shwartsman 
879*e9dcd831SSlava Shwartsman 	err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
880*e9dcd831SSlava Shwartsman 	if (err) {
881*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
882*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
883*e9dcd831SSlava Shwartsman 		goto err;
884*e9dcd831SSlava Shwartsman 	}
885*e9dcd831SSlava Shwartsman 
886*e9dcd831SSlava Shwartsman 	err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
887*e9dcd831SSlava Shwartsman 				     MLX5_ROCE_VERSION_2,
888*e9dcd831SSlava Shwartsman 				     MLX5_ROCE_L3_TYPE_IPV6,
889*e9dcd831SSlava Shwartsman 				     remote_ip, remote_mac, true, 0);
890*e9dcd831SSlava Shwartsman 	if (err) {
891*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
892*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
893*e9dcd831SSlava Shwartsman 		goto err_rsvd_gid;
894*e9dcd831SSlava Shwartsman 	}
895*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
896*e9dcd831SSlava Shwartsman 
897*e9dcd831SSlava Shwartsman 	/* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
898*e9dcd831SSlava Shwartsman 	 * created during processing of the cqe
899*e9dcd831SSlava Shwartsman 	 */
900*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_create_cq(conn,
901*e9dcd831SSlava Shwartsman 				       (attr->tx_size + attr->rx_size) * 2);
902*e9dcd831SSlava Shwartsman 	if (err) {
903*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
904*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
905*e9dcd831SSlava Shwartsman 		goto err_gid;
906*e9dcd831SSlava Shwartsman 	}
907*e9dcd831SSlava Shwartsman 
908*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_arm_cq(conn);
909*e9dcd831SSlava Shwartsman 
910*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
911*e9dcd831SSlava Shwartsman 	if (err) {
912*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
913*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
914*e9dcd831SSlava Shwartsman 		goto err_cq;
915*e9dcd831SSlava Shwartsman 	}
916*e9dcd831SSlava Shwartsman 
917*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
918*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
919*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
920*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
921*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
922*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
923*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
924*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
925*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
926*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
927*e9dcd831SSlava Shwartsman 	MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
928*e9dcd831SSlava Shwartsman 
929*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
930*e9dcd831SSlava Shwartsman 				  &conn->fpga_qpn);
931*e9dcd831SSlava Shwartsman 	if (err) {
932*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
933*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
934*e9dcd831SSlava Shwartsman 		goto err_qp;
935*e9dcd831SSlava Shwartsman 	}
936*e9dcd831SSlava Shwartsman 
937*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_connect(conn);
938*e9dcd831SSlava Shwartsman 	if (err) {
939*e9dcd831SSlava Shwartsman 		ret = ERR_PTR(err);
940*e9dcd831SSlava Shwartsman 		goto err_conn;
941*e9dcd831SSlava Shwartsman 	}
942*e9dcd831SSlava Shwartsman 
943*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
944*e9dcd831SSlava Shwartsman 	ret = conn;
945*e9dcd831SSlava Shwartsman 	goto out;
946*e9dcd831SSlava Shwartsman 
947*e9dcd831SSlava Shwartsman err_conn:
948*e9dcd831SSlava Shwartsman 	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
949*e9dcd831SSlava Shwartsman err_qp:
950*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_destroy_qp(conn);
951*e9dcd831SSlava Shwartsman err_cq:
952*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_destroy_cq(conn);
953*e9dcd831SSlava Shwartsman err_gid:
954*e9dcd831SSlava Shwartsman 	mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
955*e9dcd831SSlava Shwartsman 			       NULL, false, 0);
956*e9dcd831SSlava Shwartsman err_rsvd_gid:
957*e9dcd831SSlava Shwartsman 	mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
958*e9dcd831SSlava Shwartsman err:
959*e9dcd831SSlava Shwartsman 	kfree(conn);
960*e9dcd831SSlava Shwartsman out:
961*e9dcd831SSlava Shwartsman 	return ret;
962*e9dcd831SSlava Shwartsman }
963*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_destroy(struct mlx5_fpga_conn * conn)964*e9dcd831SSlava Shwartsman void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
965*e9dcd831SSlava Shwartsman {
966*e9dcd831SSlava Shwartsman 	struct mlx5_fpga_device *fdev = conn->fdev;
967*e9dcd831SSlava Shwartsman 	struct mlx5_core_dev *mdev = fdev->mdev;
968*e9dcd831SSlava Shwartsman 	int err = 0;
969*e9dcd831SSlava Shwartsman 
970*e9dcd831SSlava Shwartsman 	conn->qp.active = false;
971*e9dcd831SSlava Shwartsman 	tasklet_disable(&conn->cq.tasklet);
972*e9dcd831SSlava Shwartsman 	synchronize_irq(conn->cq.mcq.irqn);
973*e9dcd831SSlava Shwartsman 
974*e9dcd831SSlava Shwartsman 	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
975*e9dcd831SSlava Shwartsman 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
976*e9dcd831SSlava Shwartsman 				  &conn->qp.mqp);
977*e9dcd831SSlava Shwartsman 	if (err)
978*e9dcd831SSlava Shwartsman 		mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
979*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_destroy_qp(conn);
980*e9dcd831SSlava Shwartsman 	mlx5_fpga_conn_destroy_cq(conn);
981*e9dcd831SSlava Shwartsman 
982*e9dcd831SSlava Shwartsman 	mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
983*e9dcd831SSlava Shwartsman 			       NULL, NULL, false, 0);
984*e9dcd831SSlava Shwartsman 	mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
985*e9dcd831SSlava Shwartsman 	kfree(conn);
986*e9dcd831SSlava Shwartsman }
987*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_device_init(struct mlx5_fpga_device * fdev)988*e9dcd831SSlava Shwartsman int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
989*e9dcd831SSlava Shwartsman {
990*e9dcd831SSlava Shwartsman 	int err;
991*e9dcd831SSlava Shwartsman 
992*e9dcd831SSlava Shwartsman 	err = mlx5_nic_vport_enable_roce(fdev->mdev);
993*e9dcd831SSlava Shwartsman 	if (err) {
994*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
995*e9dcd831SSlava Shwartsman 		goto out;
996*e9dcd831SSlava Shwartsman 	}
997*e9dcd831SSlava Shwartsman 
998*e9dcd831SSlava Shwartsman 	fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
999*e9dcd831SSlava Shwartsman 	if (IS_ERR(fdev->conn_res.uar)) {
1000*e9dcd831SSlava Shwartsman 		err = PTR_ERR(fdev->conn_res.uar);
1001*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
1002*e9dcd831SSlava Shwartsman 		goto err_roce;
1003*e9dcd831SSlava Shwartsman 	}
1004*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
1005*e9dcd831SSlava Shwartsman 		      fdev->conn_res.uar->index);
1006*e9dcd831SSlava Shwartsman 
1007*e9dcd831SSlava Shwartsman 	err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
1008*e9dcd831SSlava Shwartsman 	if (err) {
1009*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
1010*e9dcd831SSlava Shwartsman 		goto err_uar;
1011*e9dcd831SSlava Shwartsman 	}
1012*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
1013*e9dcd831SSlava Shwartsman 
1014*e9dcd831SSlava Shwartsman 	err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
1015*e9dcd831SSlava Shwartsman 					 &fdev->conn_res.mkey);
1016*e9dcd831SSlava Shwartsman 	if (err) {
1017*e9dcd831SSlava Shwartsman 		mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
1018*e9dcd831SSlava Shwartsman 		goto err_dealloc_pd;
1019*e9dcd831SSlava Shwartsman 	}
1020*e9dcd831SSlava Shwartsman 	mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
1021*e9dcd831SSlava Shwartsman 
1022*e9dcd831SSlava Shwartsman 	return 0;
1023*e9dcd831SSlava Shwartsman 
1024*e9dcd831SSlava Shwartsman err_dealloc_pd:
1025*e9dcd831SSlava Shwartsman 	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1026*e9dcd831SSlava Shwartsman err_uar:
1027*e9dcd831SSlava Shwartsman 	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1028*e9dcd831SSlava Shwartsman err_roce:
1029*e9dcd831SSlava Shwartsman 	mlx5_nic_vport_disable_roce(fdev->mdev);
1030*e9dcd831SSlava Shwartsman out:
1031*e9dcd831SSlava Shwartsman 	return err;
1032*e9dcd831SSlava Shwartsman }
1033*e9dcd831SSlava Shwartsman 
mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device * fdev)1034*e9dcd831SSlava Shwartsman void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
1035*e9dcd831SSlava Shwartsman {
1036*e9dcd831SSlava Shwartsman 	mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1037*e9dcd831SSlava Shwartsman 	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1038*e9dcd831SSlava Shwartsman 	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1039*e9dcd831SSlava Shwartsman 	mlx5_nic_vport_disable_roce(fdev->mdev);
1040*e9dcd831SSlava Shwartsman }
1041