xref: /freebsd/sys/dev/mlx5/mlx5_lib/mlx5_aso.c (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/printk.h>
5 #include <dev/mlx5/driver.h>
6 #include <dev/mlx5/mlx5_core/transobj.h>
7 #include "aso.h"
8 #include <dev/mlx5/mlx5_core/wq.h>
9 #include <dev/mlx5/cq.h>
10 
11 struct mlx5_aso_cq {
12 	/* data path - accessed per cqe */
13 	struct mlx5_cqwq           wq;
14 
15 	/* data path - accessed per napi poll */
16 	struct mlx5_core_cq        mcq;
17 
18 	/* control */
19 	struct mlx5_core_dev      *mdev;
20 	struct mlx5_wq_ctrl        wq_ctrl;
21 } ____cacheline_aligned_in_smp;
22 
23 struct mlx5_aso {
24 	/* data path */
25 	u16                        cc;
26 	u16                        pc;
27 
28 	struct mlx5_wqe_ctrl_seg  *doorbell_cseg;
29 	struct mlx5_aso_cq         cq;
30 
31 	/* read only */
32 	struct mlx5_wq_cyc         wq;
33 	void __iomem               *uar_map;
34 	u32                        sqn;
35 
36 	/* control path */
37 	struct mlx5_wq_ctrl        wq_ctrl;
38 
39 } ____cacheline_aligned_in_smp;
40 
41 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
42 {
43 	mlx5_wq_destroy(&cq->wq_ctrl);
44 }
45 
46 static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
47 			     void *cqc_data, struct mlx5_aso_cq *cq)
48 {
49 	struct mlx5_core_cq *mcq = &cq->mcq;
50 	struct mlx5_wq_param param;
51 	int err;
52 	u32 i;
53 
54 	param.linear = 1;
55 	err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
56 	if (err)
57 		return err;
58 
59 	mcq->cqe_sz     = 64;
60 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
61 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
62 
63 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
64 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
65 
66 		cqe->op_own = 0xf1;
67 	}
68 
69 	cq->mdev = mdev;
70 
71 	return 0;
72 }
73 
74 static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
75 {
76 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
77 	struct mlx5_core_dev *mdev = cq->mdev;
78 	struct mlx5_core_cq *mcq = &cq->mcq;
79 	int inlen, eqn, irqn_not_used;
80 	void *in, *cqc;
81 	int err;
82 
83 	err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn_not_used);
84 	if (err)
85 		return err;
86 
87 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
88 		sizeof(u64) * cq->wq_ctrl.buf.npages;
89 	in = kvzalloc(inlen, GFP_KERNEL);
90 	if (!in)
91 		return -ENOMEM;
92 
93 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
94 
95 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
96 
97 	mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
98 
99 	MLX5_SET(cqc,   cqc, cq_period_mode, 0);
100 	MLX5_SET(cqc,   cqc, c_eqn, eqn);
101 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
102 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
103 					    MLX5_ADAPTER_PAGE_SHIFT);
104 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
105 
106 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
107 
108 	kvfree(in);
109 
110 	return err;
111 }
112 
113 static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
114 {
115 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
116 	mlx5_wq_destroy(&cq->wq_ctrl);
117 }
118 
119 static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
120 			      struct mlx5_aso_cq *cq)
121 {
122 	void *cqc_data;
123 	int err;
124 
125 	cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
126 	if (!cqc_data)
127 		return -ENOMEM;
128 
129 	MLX5_SET(cqc, cqc_data, log_cq_size, 1);
130 	MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
131 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
132 		MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
133 
134 	err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
135 	if (err) {
136 		mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
137 		goto err_out;
138 	}
139 
140 	err = create_aso_cq(cq, cqc_data);
141 	if (err) {
142 		mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
143 		goto err_free_cq;
144 	}
145 
146 	kvfree(cqc_data);
147 	return 0;
148 
149 err_free_cq:
150 	mlx5_aso_free_cq(cq);
151 err_out:
152 	kvfree(cqc_data);
153 	return err;
154 }
155 
156 static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
157 			     void *sqc_data, struct mlx5_aso *sq)
158 {
159 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
160 	struct mlx5_wq_cyc *wq = &sq->wq;
161 	struct mlx5_wq_param param;
162 	int err;
163 
164 	sq->uar_map = mdev->priv.uar->map;
165 
166 	param.linear = 1;
167 	err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
168 	if (err)
169 		return err;
170 
171 	wq->db = &wq->db[MLX5_SND_DBR];
172 
173 	return 0;
174 }
175 
176 static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
177 			 void *sqc_data, struct mlx5_aso *sq)
178 {
179 	void *in, *sqc, *wq;
180 	int inlen, err;
181 	u8 ts_format;
182 
183 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
184 		sizeof(u64) * sq->wq_ctrl.buf.npages;
185 	in = kvzalloc(inlen, GFP_KERNEL);
186 	if (!in)
187 		return -ENOMEM;
188 
189 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
190 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
191 
192 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
193 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
194 
195 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
196 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
197 
198 	ts_format = mlx5_get_sq_default_ts(mdev);
199 	MLX5_SET(sqc, sqc, ts_format, ts_format);
200 
201 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
202 	MLX5_SET(wq,   wq, uar_page,      mdev->priv.uar->index);
203 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
204 					  MLX5_ADAPTER_PAGE_SHIFT);
205 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
206 
207 	mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
208 
209 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
210 
211 	kvfree(in);
212 
213 	return err;
214 }
215 
216 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
217 {
218 	void *in, *sqc;
219 	int inlen, err;
220 
221 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
222 	in = kvzalloc(inlen, GFP_KERNEL);
223 	if (!in)
224 		return -ENOMEM;
225 
226 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
227 	MLX5_SET(modify_sq_in, in, sqn, sqn);
228 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
229 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
230 
231 	err = mlx5_core_modify_sq(mdev, in, inlen);
232 
233 	kvfree(in);
234 
235 	return err;
236 }
237 
238 static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
239 				  void *sqc_data, struct mlx5_aso *sq)
240 {
241 	int err;
242 
243 	err = create_aso_sq(mdev, pdn, sqc_data, sq);
244 	if (err)
245 		return err;
246 
247 	err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
248 	if (err)
249 		mlx5_core_destroy_sq(mdev, sq->sqn);
250 
251 	return err;
252 }
253 
254 static void mlx5_aso_free_sq(struct mlx5_aso *sq)
255 {
256 	mlx5_wq_destroy(&sq->wq_ctrl);
257 }
258 
259 static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
260 {
261 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
262 	mlx5_aso_free_sq(sq);
263 }
264 
265 static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
266 			      u32 pdn, struct mlx5_aso *sq)
267 {
268 	void *sqc_data, *wq;
269 	int err;
270 
271 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
272 	if (!sqc_data)
273 		return -ENOMEM;
274 
275 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
276 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
277 	MLX5_SET(wq, wq, pd, pdn);
278 	MLX5_SET(wq, wq, log_wq_sz, 1);
279 
280 	err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
281 	if (err) {
282 		mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
283 		goto err_out;
284 	}
285 
286 	err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
287 	if (err) {
288 		mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
289 		goto err_free_asosq;
290 	}
291 
292 	mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
293 
294 	kvfree(sqc_data);
295 	return 0;
296 
297 err_free_asosq:
298 	mlx5_aso_free_sq(sq);
299 err_out:
300 	kvfree(sqc_data);
301 	return err;
302 }
303 
304 struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
305 {
306 	int numa_node = dev_to_node(&mdev->pdev->dev);
307 	struct mlx5_aso *aso;
308 	int err;
309 
310 	aso = kzalloc(sizeof(*aso), GFP_KERNEL);
311 	if (!aso)
312 		return ERR_PTR(-ENOMEM);
313 
314 	err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
315 	if (err)
316 		goto err_cq;
317 
318 	err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
319 	if (err)
320 		goto err_sq;
321 
322 	return aso;
323 
324 err_sq:
325 	mlx5_aso_destroy_cq(&aso->cq);
326 err_cq:
327 	kfree(aso);
328 	return ERR_PTR(err);
329 }
330 
331 void mlx5_aso_destroy(struct mlx5_aso *aso)
332 {
333 	mlx5_aso_destroy_sq(aso);
334 	mlx5_aso_destroy_cq(&aso->cq);
335 	kfree(aso);
336 }
337 
338 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
339 			struct mlx5_aso_wqe *aso_wqe,
340 			u32 obj_id, u32 opc_mode)
341 {
342 	struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
343 
344 	cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
345 					     (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
346 					     MLX5_OPCODE_ACCESS_ASO);
347 	cseg->qpn_ds     = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
348 	cseg->fm_ce_se   = MLX5_WQE_CTRL_CQ_UPDATE;
349 	cseg->general_id = cpu_to_be32(obj_id);
350 }
351 
352 struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
353 {
354 	struct mlx5_aso_wqe *wqe;
355 	u16 pi;
356 
357 	pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
358 	wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
359 	memset(wqe, 0, sizeof(*wqe));
360 	return wqe;
361 }
362 
363 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
364 		       struct mlx5_wqe_ctrl_seg *doorbell_cseg)
365 {
366 	doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
367 	/* ensure wqe is visible to device before updating doorbell record */
368 	wmb();
369 
370 	if (with_data)
371 		aso->pc += MLX5_ASO_WQEBBS_DATA;
372 	else
373 		aso->pc += MLX5_ASO_WQEBBS;
374 	*aso->wq.db = cpu_to_be32(aso->pc);
375 
376 	/* ensure doorbell record is visible to device before ringing the
377 	 * doorbell
378 	 */
379 	wmb();
380 
381 	mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map + MLX5_BF_OFFSET, NULL);
382 
383 	/* Ensure doorbell is written on uar_page before poll_cq */
384 	WRITE_ONCE(doorbell_cseg, NULL);
385 }
386 
387 int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
388 {
389 	struct mlx5_aso_cq *cq = &aso->cq;
390 	struct mlx5_cqe64 *cqe;
391 
392 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
393 	if (!cqe)
394 		return -ETIMEDOUT;
395 
396 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
397 	 * otherwise a cq overrun may occur
398 	 */
399 	mlx5_cqwq_pop(&cq->wq);
400 
401 	if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
402 		struct mlx5_err_cqe *err_cqe;
403 
404 		mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
405 			      get_cqe_opcode(cqe));
406 
407 		err_cqe = (struct mlx5_err_cqe *)cqe;
408 		mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
409 			      err_cqe->vendor_err_synd);
410 		mlx5_core_err(cq->mdev, "syndrome=%x\n",
411 			      err_cqe->syndrome);
412 		print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
413 			       16, 1, err_cqe,
414 			       sizeof(*err_cqe), false);
415 	}
416 
417 	mlx5_cqwq_update_db_record(&cq->wq);
418 
419 	/* ensure cq space is freed before enabling more cqes */
420 	wmb();
421 
422 	if (with_data)
423 		aso->cc += MLX5_ASO_WQEBBS_DATA;
424 	else
425 		aso->cc += MLX5_ASO_WQEBBS;
426 
427 	return 0;
428 }
429