xref: /freebsd/sys/dev/mlx5/mlx5_lib/mlx5_aso.c (revision e23731db48ef9c6568d4768b1f87d48514339faa)
1*e23731dbSKonstantin Belousov // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*e23731dbSKonstantin Belousov // Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3*e23731dbSKonstantin Belousov 
4*e23731dbSKonstantin Belousov #include <linux/printk.h>
5*e23731dbSKonstantin Belousov #include <dev/mlx5/driver.h>
6*e23731dbSKonstantin Belousov #include <dev/mlx5/mlx5_core/transobj.h>
7*e23731dbSKonstantin Belousov #include "aso.h"
8*e23731dbSKonstantin Belousov #include <dev/mlx5/mlx5_core/wq.h>
9*e23731dbSKonstantin Belousov #include <dev/mlx5/cq.h>
10*e23731dbSKonstantin Belousov 
11*e23731dbSKonstantin Belousov struct mlx5_aso_cq {
12*e23731dbSKonstantin Belousov 	/* data path - accessed per cqe */
13*e23731dbSKonstantin Belousov 	struct mlx5_cqwq           wq;
14*e23731dbSKonstantin Belousov 
15*e23731dbSKonstantin Belousov 	/* data path - accessed per napi poll */
16*e23731dbSKonstantin Belousov 	struct mlx5_core_cq        mcq;
17*e23731dbSKonstantin Belousov 
18*e23731dbSKonstantin Belousov 	/* control */
19*e23731dbSKonstantin Belousov 	struct mlx5_core_dev      *mdev;
20*e23731dbSKonstantin Belousov 	struct mlx5_wq_ctrl        wq_ctrl;
21*e23731dbSKonstantin Belousov } ____cacheline_aligned_in_smp;
22*e23731dbSKonstantin Belousov 
23*e23731dbSKonstantin Belousov struct mlx5_aso {
24*e23731dbSKonstantin Belousov 	/* data path */
25*e23731dbSKonstantin Belousov 	u16                        cc;
26*e23731dbSKonstantin Belousov 	u16                        pc;
27*e23731dbSKonstantin Belousov 
28*e23731dbSKonstantin Belousov 	struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
29*e23731dbSKonstantin Belousov 	struct mlx5_aso_cq         cq;
30*e23731dbSKonstantin Belousov 
31*e23731dbSKonstantin Belousov 	/* read only */
32*e23731dbSKonstantin Belousov 	struct mlx5_wq_cyc         wq;
33*e23731dbSKonstantin Belousov 	void __iomem               *uar_map;
34*e23731dbSKonstantin Belousov 	u32                        sqn;
35*e23731dbSKonstantin Belousov 
36*e23731dbSKonstantin Belousov 	/* control path */
37*e23731dbSKonstantin Belousov 	struct mlx5_wq_ctrl        wq_ctrl;
38*e23731dbSKonstantin Belousov 
39*e23731dbSKonstantin Belousov } ____cacheline_aligned_in_smp;
40*e23731dbSKonstantin Belousov 
41*e23731dbSKonstantin Belousov static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
42*e23731dbSKonstantin Belousov {
43*e23731dbSKonstantin Belousov 	mlx5_wq_destroy(&cq->wq_ctrl);
44*e23731dbSKonstantin Belousov }
45*e23731dbSKonstantin Belousov 
46*e23731dbSKonstantin Belousov static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
47*e23731dbSKonstantin Belousov 			     void *cqc_data, struct mlx5_aso_cq *cq)
48*e23731dbSKonstantin Belousov {
49*e23731dbSKonstantin Belousov 	struct mlx5_core_cq *mcq = &cq->mcq;
50*e23731dbSKonstantin Belousov 	struct mlx5_wq_param param;
51*e23731dbSKonstantin Belousov 	int err;
52*e23731dbSKonstantin Belousov 	u32 i;
53*e23731dbSKonstantin Belousov 
54*e23731dbSKonstantin Belousov 	param.linear = 1;
55*e23731dbSKonstantin Belousov 	err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
56*e23731dbSKonstantin Belousov 	if (err)
57*e23731dbSKonstantin Belousov 		return err;
58*e23731dbSKonstantin Belousov 
59*e23731dbSKonstantin Belousov 	mcq->cqe_sz     = 64;
60*e23731dbSKonstantin Belousov 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
61*e23731dbSKonstantin Belousov 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
62*e23731dbSKonstantin Belousov 
63*e23731dbSKonstantin Belousov 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
64*e23731dbSKonstantin Belousov 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
65*e23731dbSKonstantin Belousov 
66*e23731dbSKonstantin Belousov 		cqe->op_own = 0xf1;
67*e23731dbSKonstantin Belousov 	}
68*e23731dbSKonstantin Belousov 
69*e23731dbSKonstantin Belousov 	cq->mdev = mdev;
70*e23731dbSKonstantin Belousov 
71*e23731dbSKonstantin Belousov 	return 0;
72*e23731dbSKonstantin Belousov }
73*e23731dbSKonstantin Belousov 
74*e23731dbSKonstantin Belousov static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
75*e23731dbSKonstantin Belousov {
76*e23731dbSKonstantin Belousov 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
77*e23731dbSKonstantin Belousov 	struct mlx5_core_dev *mdev = cq->mdev;
78*e23731dbSKonstantin Belousov 	struct mlx5_core_cq *mcq = &cq->mcq;
79*e23731dbSKonstantin Belousov 	int inlen, eqn, irqn_not_used;
80*e23731dbSKonstantin Belousov 	void *in, *cqc;
81*e23731dbSKonstantin Belousov 	int err;
82*e23731dbSKonstantin Belousov 
83*e23731dbSKonstantin Belousov 	err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn_not_used);
84*e23731dbSKonstantin Belousov 	if (err)
85*e23731dbSKonstantin Belousov 		return err;
86*e23731dbSKonstantin Belousov 
87*e23731dbSKonstantin Belousov 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
88*e23731dbSKonstantin Belousov 		sizeof(u64) * cq->wq_ctrl.buf.npages;
89*e23731dbSKonstantin Belousov 	in = kvzalloc(inlen, GFP_KERNEL);
90*e23731dbSKonstantin Belousov 	if (!in)
91*e23731dbSKonstantin Belousov 		return -ENOMEM;
92*e23731dbSKonstantin Belousov 
93*e23731dbSKonstantin Belousov 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
94*e23731dbSKonstantin Belousov 
95*e23731dbSKonstantin Belousov 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
96*e23731dbSKonstantin Belousov 
97*e23731dbSKonstantin Belousov 	mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
98*e23731dbSKonstantin Belousov 
99*e23731dbSKonstantin Belousov 	MLX5_SET(cqc,   cqc, cq_period_mode, 0);
100*e23731dbSKonstantin Belousov 	MLX5_SET(cqc,   cqc, c_eqn, eqn);
101*e23731dbSKonstantin Belousov 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
102*e23731dbSKonstantin Belousov 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
103*e23731dbSKonstantin Belousov 					    MLX5_ADAPTER_PAGE_SHIFT);
104*e23731dbSKonstantin Belousov 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
105*e23731dbSKonstantin Belousov 
106*e23731dbSKonstantin Belousov 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
107*e23731dbSKonstantin Belousov 
108*e23731dbSKonstantin Belousov 	kvfree(in);
109*e23731dbSKonstantin Belousov 
110*e23731dbSKonstantin Belousov 	return err;
111*e23731dbSKonstantin Belousov }
112*e23731dbSKonstantin Belousov 
113*e23731dbSKonstantin Belousov static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
114*e23731dbSKonstantin Belousov {
115*e23731dbSKonstantin Belousov 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
116*e23731dbSKonstantin Belousov 	mlx5_wq_destroy(&cq->wq_ctrl);
117*e23731dbSKonstantin Belousov }
118*e23731dbSKonstantin Belousov 
119*e23731dbSKonstantin Belousov static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
120*e23731dbSKonstantin Belousov 			      struct mlx5_aso_cq *cq)
121*e23731dbSKonstantin Belousov {
122*e23731dbSKonstantin Belousov 	void *cqc_data;
123*e23731dbSKonstantin Belousov 	int err;
124*e23731dbSKonstantin Belousov 
125*e23731dbSKonstantin Belousov 	cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
126*e23731dbSKonstantin Belousov 	if (!cqc_data)
127*e23731dbSKonstantin Belousov 		return -ENOMEM;
128*e23731dbSKonstantin Belousov 
129*e23731dbSKonstantin Belousov 	MLX5_SET(cqc, cqc_data, log_cq_size, 1);
130*e23731dbSKonstantin Belousov 	MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
131*e23731dbSKonstantin Belousov 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
132*e23731dbSKonstantin Belousov 		MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
133*e23731dbSKonstantin Belousov 
134*e23731dbSKonstantin Belousov 	err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
135*e23731dbSKonstantin Belousov 	if (err) {
136*e23731dbSKonstantin Belousov 		mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
137*e23731dbSKonstantin Belousov 		goto err_out;
138*e23731dbSKonstantin Belousov 	}
139*e23731dbSKonstantin Belousov 
140*e23731dbSKonstantin Belousov 	err = create_aso_cq(cq, cqc_data);
141*e23731dbSKonstantin Belousov 	if (err) {
142*e23731dbSKonstantin Belousov 		mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
143*e23731dbSKonstantin Belousov 		goto err_free_cq;
144*e23731dbSKonstantin Belousov 	}
145*e23731dbSKonstantin Belousov 
146*e23731dbSKonstantin Belousov 	kvfree(cqc_data);
147*e23731dbSKonstantin Belousov 	return 0;
148*e23731dbSKonstantin Belousov 
149*e23731dbSKonstantin Belousov err_free_cq:
150*e23731dbSKonstantin Belousov 	mlx5_aso_free_cq(cq);
151*e23731dbSKonstantin Belousov err_out:
152*e23731dbSKonstantin Belousov 	kvfree(cqc_data);
153*e23731dbSKonstantin Belousov 	return err;
154*e23731dbSKonstantin Belousov }
155*e23731dbSKonstantin Belousov 
156*e23731dbSKonstantin Belousov static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
157*e23731dbSKonstantin Belousov 			     void *sqc_data, struct mlx5_aso *sq)
158*e23731dbSKonstantin Belousov {
159*e23731dbSKonstantin Belousov 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
160*e23731dbSKonstantin Belousov 	struct mlx5_wq_cyc *wq = &sq->wq;
161*e23731dbSKonstantin Belousov 	struct mlx5_wq_param param;
162*e23731dbSKonstantin Belousov 	int err;
163*e23731dbSKonstantin Belousov 
164*e23731dbSKonstantin Belousov 	sq->uar_map = mdev->priv.uar->map;
165*e23731dbSKonstantin Belousov 
166*e23731dbSKonstantin Belousov 	param.linear = 1;
167*e23731dbSKonstantin Belousov 	err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
168*e23731dbSKonstantin Belousov 	if (err)
169*e23731dbSKonstantin Belousov 		return err;
170*e23731dbSKonstantin Belousov 
171*e23731dbSKonstantin Belousov 	wq->db = &wq->db[MLX5_SND_DBR];
172*e23731dbSKonstantin Belousov 
173*e23731dbSKonstantin Belousov 	return 0;
174*e23731dbSKonstantin Belousov }
175*e23731dbSKonstantin Belousov 
176*e23731dbSKonstantin Belousov static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
177*e23731dbSKonstantin Belousov 			 void *sqc_data, struct mlx5_aso *sq)
178*e23731dbSKonstantin Belousov {
179*e23731dbSKonstantin Belousov 	void *in, *sqc, *wq;
180*e23731dbSKonstantin Belousov 	int inlen, err;
181*e23731dbSKonstantin Belousov 	u8 ts_format;
182*e23731dbSKonstantin Belousov 
183*e23731dbSKonstantin Belousov 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
184*e23731dbSKonstantin Belousov 		sizeof(u64) * sq->wq_ctrl.buf.npages;
185*e23731dbSKonstantin Belousov 	in = kvzalloc(inlen, GFP_KERNEL);
186*e23731dbSKonstantin Belousov 	if (!in)
187*e23731dbSKonstantin Belousov 		return -ENOMEM;
188*e23731dbSKonstantin Belousov 
189*e23731dbSKonstantin Belousov 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
190*e23731dbSKonstantin Belousov 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
191*e23731dbSKonstantin Belousov 
192*e23731dbSKonstantin Belousov 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
193*e23731dbSKonstantin Belousov 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
194*e23731dbSKonstantin Belousov 
195*e23731dbSKonstantin Belousov 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
196*e23731dbSKonstantin Belousov 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
197*e23731dbSKonstantin Belousov 
198*e23731dbSKonstantin Belousov 	ts_format = mlx5_get_sq_default_ts(mdev);
199*e23731dbSKonstantin Belousov 	MLX5_SET(sqc, sqc, ts_format, ts_format);
200*e23731dbSKonstantin Belousov 
201*e23731dbSKonstantin Belousov 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
202*e23731dbSKonstantin Belousov 	MLX5_SET(wq,   wq, uar_page,      mdev->priv.uar->index);
203*e23731dbSKonstantin Belousov 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
204*e23731dbSKonstantin Belousov 					  MLX5_ADAPTER_PAGE_SHIFT);
205*e23731dbSKonstantin Belousov 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
206*e23731dbSKonstantin Belousov 
207*e23731dbSKonstantin Belousov 	mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
208*e23731dbSKonstantin Belousov 
209*e23731dbSKonstantin Belousov 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
210*e23731dbSKonstantin Belousov 
211*e23731dbSKonstantin Belousov 	kvfree(in);
212*e23731dbSKonstantin Belousov 
213*e23731dbSKonstantin Belousov 	return err;
214*e23731dbSKonstantin Belousov }
215*e23731dbSKonstantin Belousov 
216*e23731dbSKonstantin Belousov static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
217*e23731dbSKonstantin Belousov {
218*e23731dbSKonstantin Belousov 	void *in, *sqc;
219*e23731dbSKonstantin Belousov 	int inlen, err;
220*e23731dbSKonstantin Belousov 
221*e23731dbSKonstantin Belousov 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
222*e23731dbSKonstantin Belousov 	in = kvzalloc(inlen, GFP_KERNEL);
223*e23731dbSKonstantin Belousov 	if (!in)
224*e23731dbSKonstantin Belousov 		return -ENOMEM;
225*e23731dbSKonstantin Belousov 
226*e23731dbSKonstantin Belousov 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
227*e23731dbSKonstantin Belousov 	MLX5_SET(modify_sq_in, in, sqn, sqn);
228*e23731dbSKonstantin Belousov 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
229*e23731dbSKonstantin Belousov 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
230*e23731dbSKonstantin Belousov 
231*e23731dbSKonstantin Belousov 	err = mlx5_core_modify_sq(mdev, in, inlen);
232*e23731dbSKonstantin Belousov 
233*e23731dbSKonstantin Belousov 	kvfree(in);
234*e23731dbSKonstantin Belousov 
235*e23731dbSKonstantin Belousov 	return err;
236*e23731dbSKonstantin Belousov }
237*e23731dbSKonstantin Belousov 
238*e23731dbSKonstantin Belousov static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
239*e23731dbSKonstantin Belousov 				  void *sqc_data, struct mlx5_aso *sq)
240*e23731dbSKonstantin Belousov {
241*e23731dbSKonstantin Belousov 	int err;
242*e23731dbSKonstantin Belousov 
243*e23731dbSKonstantin Belousov 	err = create_aso_sq(mdev, pdn, sqc_data, sq);
244*e23731dbSKonstantin Belousov 	if (err)
245*e23731dbSKonstantin Belousov 		return err;
246*e23731dbSKonstantin Belousov 
247*e23731dbSKonstantin Belousov 	err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
248*e23731dbSKonstantin Belousov 	if (err)
249*e23731dbSKonstantin Belousov 		mlx5_core_destroy_sq(mdev, sq->sqn);
250*e23731dbSKonstantin Belousov 
251*e23731dbSKonstantin Belousov 	return err;
252*e23731dbSKonstantin Belousov }
253*e23731dbSKonstantin Belousov 
254*e23731dbSKonstantin Belousov static void mlx5_aso_free_sq(struct mlx5_aso *sq)
255*e23731dbSKonstantin Belousov {
256*e23731dbSKonstantin Belousov 	mlx5_wq_destroy(&sq->wq_ctrl);
257*e23731dbSKonstantin Belousov }
258*e23731dbSKonstantin Belousov 
259*e23731dbSKonstantin Belousov static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
260*e23731dbSKonstantin Belousov {
261*e23731dbSKonstantin Belousov 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
262*e23731dbSKonstantin Belousov 	mlx5_aso_free_sq(sq);
263*e23731dbSKonstantin Belousov }
264*e23731dbSKonstantin Belousov 
265*e23731dbSKonstantin Belousov static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
266*e23731dbSKonstantin Belousov 			      u32 pdn, struct mlx5_aso *sq)
267*e23731dbSKonstantin Belousov {
268*e23731dbSKonstantin Belousov 	void *sqc_data, *wq;
269*e23731dbSKonstantin Belousov 	int err;
270*e23731dbSKonstantin Belousov 
271*e23731dbSKonstantin Belousov 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
272*e23731dbSKonstantin Belousov 	if (!sqc_data)
273*e23731dbSKonstantin Belousov 		return -ENOMEM;
274*e23731dbSKonstantin Belousov 
275*e23731dbSKonstantin Belousov 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
276*e23731dbSKonstantin Belousov 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
277*e23731dbSKonstantin Belousov 	MLX5_SET(wq, wq, pd, pdn);
278*e23731dbSKonstantin Belousov 	MLX5_SET(wq, wq, log_wq_sz, 1);
279*e23731dbSKonstantin Belousov 
280*e23731dbSKonstantin Belousov 	err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
281*e23731dbSKonstantin Belousov 	if (err) {
282*e23731dbSKonstantin Belousov 		mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
283*e23731dbSKonstantin Belousov 		goto err_out;
284*e23731dbSKonstantin Belousov 	}
285*e23731dbSKonstantin Belousov 
286*e23731dbSKonstantin Belousov 	err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
287*e23731dbSKonstantin Belousov 	if (err) {
288*e23731dbSKonstantin Belousov 		mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
289*e23731dbSKonstantin Belousov 		goto err_free_asosq;
290*e23731dbSKonstantin Belousov 	}
291*e23731dbSKonstantin Belousov 
292*e23731dbSKonstantin Belousov 	mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
293*e23731dbSKonstantin Belousov 
294*e23731dbSKonstantin Belousov 	kvfree(sqc_data);
295*e23731dbSKonstantin Belousov 	return 0;
296*e23731dbSKonstantin Belousov 
297*e23731dbSKonstantin Belousov err_free_asosq:
298*e23731dbSKonstantin Belousov 	mlx5_aso_free_sq(sq);
299*e23731dbSKonstantin Belousov err_out:
300*e23731dbSKonstantin Belousov 	kvfree(sqc_data);
301*e23731dbSKonstantin Belousov 	return err;
302*e23731dbSKonstantin Belousov }
303*e23731dbSKonstantin Belousov 
304*e23731dbSKonstantin Belousov struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
305*e23731dbSKonstantin Belousov {
306*e23731dbSKonstantin Belousov 	int numa_node = dev_to_node(&mdev->pdev->dev);
307*e23731dbSKonstantin Belousov 	struct mlx5_aso *aso;
308*e23731dbSKonstantin Belousov 	int err;
309*e23731dbSKonstantin Belousov 
310*e23731dbSKonstantin Belousov 	aso = kzalloc(sizeof(*aso), GFP_KERNEL);
311*e23731dbSKonstantin Belousov 	if (!aso)
312*e23731dbSKonstantin Belousov 		return ERR_PTR(-ENOMEM);
313*e23731dbSKonstantin Belousov 
314*e23731dbSKonstantin Belousov 	err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
315*e23731dbSKonstantin Belousov 	if (err)
316*e23731dbSKonstantin Belousov 		goto err_cq;
317*e23731dbSKonstantin Belousov 
318*e23731dbSKonstantin Belousov 	err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
319*e23731dbSKonstantin Belousov 	if (err)
320*e23731dbSKonstantin Belousov 		goto err_sq;
321*e23731dbSKonstantin Belousov 
322*e23731dbSKonstantin Belousov 	return aso;
323*e23731dbSKonstantin Belousov 
324*e23731dbSKonstantin Belousov err_sq:
325*e23731dbSKonstantin Belousov 	mlx5_aso_destroy_cq(&aso->cq);
326*e23731dbSKonstantin Belousov err_cq:
327*e23731dbSKonstantin Belousov 	kfree(aso);
328*e23731dbSKonstantin Belousov 	return ERR_PTR(err);
329*e23731dbSKonstantin Belousov }
330*e23731dbSKonstantin Belousov 
331*e23731dbSKonstantin Belousov void mlx5_aso_destroy(struct mlx5_aso *aso)
332*e23731dbSKonstantin Belousov {
333*e23731dbSKonstantin Belousov 	mlx5_aso_destroy_sq(aso);
334*e23731dbSKonstantin Belousov 	mlx5_aso_destroy_cq(&aso->cq);
335*e23731dbSKonstantin Belousov 	kfree(aso);
336*e23731dbSKonstantin Belousov }
337*e23731dbSKonstantin Belousov 
338*e23731dbSKonstantin Belousov void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
339*e23731dbSKonstantin Belousov 			struct mlx5_aso_wqe *aso_wqe,
340*e23731dbSKonstantin Belousov 			u32 obj_id, u32 opc_mode)
341*e23731dbSKonstantin Belousov {
342*e23731dbSKonstantin Belousov 	struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
343*e23731dbSKonstantin Belousov 
344*e23731dbSKonstantin Belousov 	cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
345*e23731dbSKonstantin Belousov 					     (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
346*e23731dbSKonstantin Belousov 					     MLX5_OPCODE_ACCESS_ASO);
347*e23731dbSKonstantin Belousov 	cseg->qpn_ds     = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
348*e23731dbSKonstantin Belousov 	cseg->fm_ce_se   = MLX5_WQE_CTRL_CQ_UPDATE;
349*e23731dbSKonstantin Belousov 	cseg->general_id = cpu_to_be32(obj_id);
350*e23731dbSKonstantin Belousov }
351*e23731dbSKonstantin Belousov 
352*e23731dbSKonstantin Belousov struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
353*e23731dbSKonstantin Belousov {
354*e23731dbSKonstantin Belousov 	struct mlx5_aso_wqe *wqe;
355*e23731dbSKonstantin Belousov 	u16 pi;
356*e23731dbSKonstantin Belousov 
357*e23731dbSKonstantin Belousov 	pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
358*e23731dbSKonstantin Belousov 	wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
359*e23731dbSKonstantin Belousov 	memset(wqe, 0, sizeof(*wqe));
360*e23731dbSKonstantin Belousov 	return wqe;
361*e23731dbSKonstantin Belousov }
362*e23731dbSKonstantin Belousov 
363*e23731dbSKonstantin Belousov void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
364*e23731dbSKonstantin Belousov 		       struct mlx5_wqe_ctrl_seg *doorbell_cseg)
365*e23731dbSKonstantin Belousov {
366*e23731dbSKonstantin Belousov 	doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
367*e23731dbSKonstantin Belousov 	/* ensure wqe is visible to device before updating doorbell record */
368*e23731dbSKonstantin Belousov 	wmb();
369*e23731dbSKonstantin Belousov 
370*e23731dbSKonstantin Belousov 	if (with_data)
371*e23731dbSKonstantin Belousov 		aso->pc += MLX5_ASO_WQEBBS_DATA;
372*e23731dbSKonstantin Belousov 	else
373*e23731dbSKonstantin Belousov 		aso->pc += MLX5_ASO_WQEBBS;
374*e23731dbSKonstantin Belousov 	*aso->wq.db = cpu_to_be32(aso->pc);
375*e23731dbSKonstantin Belousov 
376*e23731dbSKonstantin Belousov 	/* ensure doorbell record is visible to device before ringing the
377*e23731dbSKonstantin Belousov 	 * doorbell
378*e23731dbSKonstantin Belousov 	 */
379*e23731dbSKonstantin Belousov 	wmb();
380*e23731dbSKonstantin Belousov 
381*e23731dbSKonstantin Belousov 	mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map + MLX5_BF_OFFSET, NULL);
382*e23731dbSKonstantin Belousov 
383*e23731dbSKonstantin Belousov 	/* Ensure doorbell is written on uar_page before poll_cq */
384*e23731dbSKonstantin Belousov 	WRITE_ONCE(doorbell_cseg, NULL);
385*e23731dbSKonstantin Belousov }
386*e23731dbSKonstantin Belousov 
387*e23731dbSKonstantin Belousov int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
388*e23731dbSKonstantin Belousov {
389*e23731dbSKonstantin Belousov 	struct mlx5_aso_cq *cq = &aso->cq;
390*e23731dbSKonstantin Belousov 	struct mlx5_cqe64 *cqe;
391*e23731dbSKonstantin Belousov 
392*e23731dbSKonstantin Belousov 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
393*e23731dbSKonstantin Belousov 	if (!cqe)
394*e23731dbSKonstantin Belousov 		return -ETIMEDOUT;
395*e23731dbSKonstantin Belousov 
396*e23731dbSKonstantin Belousov 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
397*e23731dbSKonstantin Belousov 	 * otherwise a cq overrun may occur
398*e23731dbSKonstantin Belousov 	 */
399*e23731dbSKonstantin Belousov 	mlx5_cqwq_pop(&cq->wq);
400*e23731dbSKonstantin Belousov 
401*e23731dbSKonstantin Belousov 	if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
402*e23731dbSKonstantin Belousov 		struct mlx5_err_cqe *err_cqe;
403*e23731dbSKonstantin Belousov 
404*e23731dbSKonstantin Belousov 		mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
405*e23731dbSKonstantin Belousov 			      get_cqe_opcode(cqe));
406*e23731dbSKonstantin Belousov 
407*e23731dbSKonstantin Belousov 		err_cqe = (struct mlx5_err_cqe *)cqe;
408*e23731dbSKonstantin Belousov 		mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
409*e23731dbSKonstantin Belousov 			      err_cqe->vendor_err_synd);
410*e23731dbSKonstantin Belousov 		mlx5_core_err(cq->mdev, "syndrome=%x\n",
411*e23731dbSKonstantin Belousov 			      err_cqe->syndrome);
412*e23731dbSKonstantin Belousov 		print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
413*e23731dbSKonstantin Belousov 			       16, 1, err_cqe,
414*e23731dbSKonstantin Belousov 			       sizeof(*err_cqe), false);
415*e23731dbSKonstantin Belousov 	}
416*e23731dbSKonstantin Belousov 
417*e23731dbSKonstantin Belousov 	mlx5_cqwq_update_db_record(&cq->wq);
418*e23731dbSKonstantin Belousov 
419*e23731dbSKonstantin Belousov 	/* ensure cq space is freed before enabling more cqes */
420*e23731dbSKonstantin Belousov 	wmb();
421*e23731dbSKonstantin Belousov 
422*e23731dbSKonstantin Belousov 	if (with_data)
423*e23731dbSKonstantin Belousov 		aso->cc += MLX5_ASO_WQEBBS_DATA;
424*e23731dbSKonstantin Belousov 	else
425*e23731dbSKonstantin Belousov 		aso->cc += MLX5_ASO_WQEBBS;
426*e23731dbSKonstantin Belousov 
427*e23731dbSKonstantin Belousov 	return 0;
428*e23731dbSKonstantin Belousov }
429