xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/wc.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/io.h>
5 #include <linux/mlx5/transobj.h>
6 #include "lib/clock.h"
7 #include "mlx5_core.h"
8 #include "wq.h"
9 
10 #define TEST_WC_NUM_WQES 255
11 #define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
12 #define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
13 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
14 
15 struct mlx5_wc_cq {
16 	/* data path - accessed per cqe */
17 	struct mlx5_cqwq wq;
18 
19 	/* data path - accessed per napi poll */
20 	struct mlx5_core_cq mcq;
21 
22 	/* control */
23 	struct mlx5_core_dev *mdev;
24 	struct mlx5_wq_ctrl wq_ctrl;
25 };
26 
27 struct mlx5_wc_sq {
28 	/* data path */
29 	u16 cc;
30 	u16 pc;
31 
32 	/* read only */
33 	struct mlx5_wq_cyc wq;
34 	u32 sqn;
35 
36 	/* control path */
37 	struct mlx5_wq_ctrl wq_ctrl;
38 
39 	struct mlx5_wc_cq cq;
40 	struct mlx5_sq_bfreg bfreg;
41 };
42 
mlx5_wc_create_cqwq(struct mlx5_core_dev * mdev,void * cqc,struct mlx5_wc_cq * cq)43 static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
44 			       struct mlx5_wc_cq *cq)
45 {
46 	struct mlx5_core_cq *mcq = &cq->mcq;
47 	struct mlx5_wq_param param = {};
48 	int err;
49 	u32 i;
50 
51 	err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
52 	if (err)
53 		return err;
54 
55 	mcq->cqe_sz     = 64;
56 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
57 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
58 
59 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
60 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
61 
62 		cqe->op_own = 0xf1;
63 	}
64 
65 	cq->mdev = mdev;
66 
67 	return 0;
68 }
69 
create_wc_cq(struct mlx5_wc_cq * cq,void * cqc_data)70 static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
71 {
72 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
73 	struct mlx5_core_dev *mdev = cq->mdev;
74 	struct mlx5_core_cq *mcq = &cq->mcq;
75 	int err, inlen, eqn;
76 	void *in, *cqc;
77 
78 	err = mlx5_comp_eqn_get(mdev, 0, &eqn);
79 	if (err)
80 		return err;
81 
82 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
83 		sizeof(u64) * cq->wq_ctrl.buf.npages;
84 	in = kvzalloc(inlen, GFP_KERNEL);
85 	if (!in)
86 		return -ENOMEM;
87 
88 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
89 
90 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
91 
92 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
93 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
94 
95 	MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
96 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
97 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
98 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
99 					    MLX5_ADAPTER_PAGE_SHIFT);
100 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
101 
102 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
103 
104 	kvfree(in);
105 
106 	return err;
107 }
108 
mlx5_wc_create_cq(struct mlx5_core_dev * mdev,struct mlx5_wc_cq * cq)109 static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
110 {
111 	void *cqc;
112 	int err;
113 
114 	cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
115 	if (!cqc)
116 		return -ENOMEM;
117 
118 	MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
119 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
120 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
121 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
122 
123 	err = mlx5_wc_create_cqwq(mdev, cqc, cq);
124 	if (err) {
125 		mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
126 		goto err_create_cqwq;
127 	}
128 
129 	err = create_wc_cq(cq, cqc);
130 	if (err) {
131 		mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
132 		goto err_create_cq;
133 	}
134 
135 	kvfree(cqc);
136 	return 0;
137 
138 err_create_cq:
139 	mlx5_wq_destroy(&cq->wq_ctrl);
140 err_create_cqwq:
141 	kvfree(cqc);
142 	return err;
143 }
144 
mlx5_wc_destroy_cq(struct mlx5_wc_cq * cq)145 static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
146 {
147 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
148 	mlx5_wq_destroy(&cq->wq_ctrl);
149 }
150 
create_wc_sq(struct mlx5_core_dev * mdev,void * sqc_data,struct mlx5_wc_sq * sq)151 static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
152 			struct mlx5_wc_sq *sq)
153 {
154 	void *in, *sqc, *wq;
155 	int inlen, err;
156 	u8 ts_format;
157 
158 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
159 		sizeof(u64) * sq->wq_ctrl.buf.npages;
160 	in = kvzalloc(inlen, GFP_KERNEL);
161 	if (!in)
162 		return -ENOMEM;
163 
164 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
165 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
166 
167 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
168 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
169 
170 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
171 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
172 
173 	ts_format = mlx5_is_real_time_sq(mdev) ?
174 			MLX5_TIMESTAMP_FORMAT_REAL_TIME :
175 			MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
176 	MLX5_SET(sqc, sqc, ts_format, ts_format);
177 
178 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
179 	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
180 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
181 					  MLX5_ADAPTER_PAGE_SHIFT);
182 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
183 
184 	mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
185 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
186 
187 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
188 	if (err) {
189 		mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
190 		goto err_create_sq;
191 	}
192 
193 	memset(in, 0,  MLX5_ST_SZ_BYTES(modify_sq_in));
194 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
195 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
196 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
197 
198 	err = mlx5_core_modify_sq(mdev, sq->sqn, in);
199 	if (err) {
200 		mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
201 			      sq->sqn, err);
202 		goto err_modify_sq;
203 	}
204 
205 	kvfree(in);
206 	return 0;
207 
208 err_modify_sq:
209 	mlx5_core_destroy_sq(mdev, sq->sqn);
210 err_create_sq:
211 	kvfree(in);
212 	return err;
213 }
214 
mlx5_wc_create_sq(struct mlx5_core_dev * mdev,struct mlx5_wc_sq * sq)215 static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
216 {
217 	struct mlx5_wq_param param = {};
218 	void *sqc_data, *wq;
219 	int err;
220 
221 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
222 	if (!sqc_data)
223 		return -ENOMEM;
224 
225 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
226 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
227 	MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
228 	MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
229 
230 	err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
231 	if (err) {
232 		mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
233 		goto err_create_wq_cyc;
234 	}
235 
236 	err = create_wc_sq(mdev, sqc_data, sq);
237 	if (err)
238 		goto err_create_sq;
239 
240 	mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
241 
242 	kvfree(sqc_data);
243 	return 0;
244 
245 err_create_sq:
246 	mlx5_wq_destroy(&sq->wq_ctrl);
247 err_create_wq_cyc:
248 	kvfree(sqc_data);
249 	return err;
250 }
251 
mlx5_wc_destroy_sq(struct mlx5_wc_sq * sq)252 static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
253 {
254 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
255 	mlx5_wq_destroy(&sq->wq_ctrl);
256 }
257 
mlx5_wc_post_nop(struct mlx5_wc_sq * sq,bool signaled)258 static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
259 {
260 	int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
261 	struct mlx5_wqe_ctrl_seg *ctrl;
262 	__be32 mmio_wqe[16] = {};
263 	u16 pi;
264 
265 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
266 	ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
267 	memset(ctrl, 0, sizeof(*ctrl));
268 	ctrl->opmod_idx_opcode =
269 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
270 	ctrl->qpn_ds =
271 		cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
272 			    DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
273 	if (signaled)
274 		ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
275 
276 	memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
277 	((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
278 		MLX5_WQE_CTRL_CQ_UPDATE;
279 
280 	/* ensure wqe is visible to device before updating doorbell record */
281 	dma_wmb();
282 
283 	sq->pc++;
284 	sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
285 
286 	/* ensure doorbell record is visible to device before ringing the
287 	 * doorbell
288 	 */
289 	wmb();
290 
291 	__iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
292 			 sizeof(mmio_wqe) / 8);
293 
294 	sq->bfreg.offset ^= buf_size;
295 }
296 
mlx5_wc_poll_cq(struct mlx5_wc_sq * sq)297 static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
298 {
299 	struct mlx5_wc_cq *cq = &sq->cq;
300 	struct mlx5_cqe64 *cqe;
301 
302 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
303 	if (!cqe)
304 		return -ETIMEDOUT;
305 
306 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
307 	 * otherwise a cq overrun may occur
308 	 */
309 	mlx5_cqwq_pop(&cq->wq);
310 
311 	if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
312 		int wqe_counter = be16_to_cpu(cqe->wqe_counter);
313 		struct mlx5_core_dev *mdev = cq->mdev;
314 
315 		if (wqe_counter == TEST_WC_NUM_WQES - 1)
316 			mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
317 		else
318 			mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
319 
320 		mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
321 	}
322 
323 	mlx5_cqwq_update_db_record(&cq->wq);
324 
325 	/* ensure cq space is freed before enabling more cqes */
326 	wmb();
327 
328 	sq->cc++;
329 
330 	return 0;
331 }
332 
mlx5_core_test_wc(struct mlx5_core_dev * mdev)333 static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
334 {
335 	unsigned long expires;
336 	struct mlx5_wc_sq *sq;
337 	int i, err;
338 
339 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
340 		return;
341 
342 	sq = kzalloc(sizeof(*sq), GFP_KERNEL);
343 	if (!sq)
344 		return;
345 
346 	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
347 	if (err) {
348 		mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
349 		goto err_alloc_bfreg;
350 	}
351 
352 	err = mlx5_wc_create_cq(mdev, &sq->cq);
353 	if (err)
354 		goto err_create_cq;
355 
356 	err = mlx5_wc_create_sq(mdev, sq);
357 	if (err)
358 		goto err_create_sq;
359 
360 	for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
361 		mlx5_wc_post_nop(sq, false);
362 
363 	mlx5_wc_post_nop(sq, true);
364 
365 	expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
366 	do {
367 		err = mlx5_wc_poll_cq(sq);
368 		if (err)
369 			usleep_range(2, 10);
370 	} while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
371 		 time_is_after_jiffies(expires));
372 
373 	mlx5_wc_destroy_sq(sq);
374 
375 err_create_sq:
376 	mlx5_wc_destroy_cq(&sq->cq);
377 err_create_cq:
378 	mlx5_free_bfreg(mdev, &sq->bfreg);
379 err_alloc_bfreg:
380 	kfree(sq);
381 }
382 
mlx5_wc_support_get(struct mlx5_core_dev * mdev)383 bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
384 {
385 	struct mlx5_core_dev *parent = NULL;
386 
387 	if (!MLX5_CAP_GEN(mdev, bf)) {
388 		mlx5_core_dbg(mdev, "BlueFlame not supported\n");
389 		goto out;
390 	}
391 
392 	if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
393 		mlx5_core_dbg(mdev, "SQ not supported\n");
394 		goto out;
395 	}
396 
397 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
398 		/* No need to lock anything as we perform WC test only
399 		 * once for whole device and was already done.
400 		 */
401 		goto out;
402 
403 	mutex_lock(&mdev->wc_state_lock);
404 
405 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
406 		goto unlock;
407 
408 #ifdef CONFIG_MLX5_SF
409 	if (mlx5_core_is_sf(mdev))
410 		parent = mdev->priv.parent_mdev;
411 #endif
412 
413 	if (parent) {
414 		mutex_lock(&parent->wc_state_lock);
415 
416 		mlx5_core_test_wc(parent);
417 
418 		mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
419 			      parent->wc_state);
420 		mdev->wc_state = parent->wc_state;
421 
422 		mutex_unlock(&parent->wc_state_lock);
423 	}
424 
425 	mlx5_core_test_wc(mdev);
426 
427 unlock:
428 	mutex_unlock(&mdev->wc_state_lock);
429 out:
430 	mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
431 
432 	return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
433 }
434 EXPORT_SYMBOL(mlx5_wc_support_get);
435