xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/wc.c (revision f617d24606553159a271f43e36d1c71a4c317e48)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/io.h>
5 #include <linux/mlx5/transobj.h>
6 #include "lib/clock.h"
7 #include "mlx5_core.h"
8 #include "wq.h"
9 
10 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
11 #include <asm/neon.h>
12 #include <asm/simd.h>
13 #endif
14 
15 #define TEST_WC_NUM_WQES 255
16 #define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
17 #define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
18 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
19 
20 struct mlx5_wc_cq {
21 	/* data path - accessed per cqe */
22 	struct mlx5_cqwq wq;
23 
24 	/* data path - accessed per napi poll */
25 	struct mlx5_core_cq mcq;
26 
27 	/* control */
28 	struct mlx5_core_dev *mdev;
29 	struct mlx5_wq_ctrl wq_ctrl;
30 };
31 
32 struct mlx5_wc_sq {
33 	/* data path */
34 	u16 cc;
35 	u16 pc;
36 
37 	/* read only */
38 	struct mlx5_wq_cyc wq;
39 	u32 sqn;
40 
41 	/* control path */
42 	struct mlx5_wq_ctrl wq_ctrl;
43 
44 	struct mlx5_wc_cq cq;
45 	struct mlx5_sq_bfreg bfreg;
46 };
47 
mlx5_wc_create_cqwq(struct mlx5_core_dev * mdev,void * cqc,struct mlx5_wc_cq * cq)48 static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
49 			       struct mlx5_wc_cq *cq)
50 {
51 	struct mlx5_core_cq *mcq = &cq->mcq;
52 	struct mlx5_wq_param param = {};
53 	int err;
54 	u32 i;
55 
56 	err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
57 	if (err)
58 		return err;
59 
60 	mcq->cqe_sz     = 64;
61 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
62 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
63 
64 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
65 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
66 
67 		cqe->op_own = 0xf1;
68 	}
69 
70 	cq->mdev = mdev;
71 
72 	return 0;
73 }
74 
create_wc_cq(struct mlx5_wc_cq * cq,void * cqc_data)75 static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
76 {
77 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
78 	struct mlx5_core_dev *mdev = cq->mdev;
79 	struct mlx5_core_cq *mcq = &cq->mcq;
80 	int err, inlen, eqn;
81 	void *in, *cqc;
82 
83 	err = mlx5_comp_eqn_get(mdev, 0, &eqn);
84 	if (err)
85 		return err;
86 
87 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
88 		sizeof(u64) * cq->wq_ctrl.buf.npages;
89 	in = kvzalloc(inlen, GFP_KERNEL);
90 	if (!in)
91 		return -ENOMEM;
92 
93 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
94 
95 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
96 
97 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
98 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
99 
100 	MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
101 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
102 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.bfreg.up->index);
103 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
104 					    MLX5_ADAPTER_PAGE_SHIFT);
105 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
106 
107 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
108 
109 	kvfree(in);
110 
111 	return err;
112 }
113 
mlx5_wc_create_cq(struct mlx5_core_dev * mdev,struct mlx5_wc_cq * cq)114 static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
115 {
116 	void *cqc;
117 	int err;
118 
119 	cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
120 	if (!cqc)
121 		return -ENOMEM;
122 
123 	MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
124 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
125 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
126 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
127 
128 	err = mlx5_wc_create_cqwq(mdev, cqc, cq);
129 	if (err) {
130 		mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
131 		goto err_create_cqwq;
132 	}
133 
134 	err = create_wc_cq(cq, cqc);
135 	if (err) {
136 		mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
137 		goto err_create_cq;
138 	}
139 
140 	kvfree(cqc);
141 	return 0;
142 
143 err_create_cq:
144 	mlx5_wq_destroy(&cq->wq_ctrl);
145 err_create_cqwq:
146 	kvfree(cqc);
147 	return err;
148 }
149 
mlx5_wc_destroy_cq(struct mlx5_wc_cq * cq)150 static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
151 {
152 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
153 	mlx5_wq_destroy(&cq->wq_ctrl);
154 }
155 
create_wc_sq(struct mlx5_core_dev * mdev,void * sqc_data,struct mlx5_wc_sq * sq)156 static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
157 			struct mlx5_wc_sq *sq)
158 {
159 	void *in, *sqc, *wq;
160 	int inlen, err;
161 	u8 ts_format;
162 
163 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
164 		sizeof(u64) * sq->wq_ctrl.buf.npages;
165 	in = kvzalloc(inlen, GFP_KERNEL);
166 	if (!in)
167 		return -ENOMEM;
168 
169 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
170 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
171 
172 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
173 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
174 
175 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
176 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
177 
178 	ts_format = mlx5_is_real_time_sq(mdev) ?
179 			MLX5_TIMESTAMP_FORMAT_REAL_TIME :
180 			MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
181 	MLX5_SET(sqc, sqc, ts_format, ts_format);
182 
183 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
184 	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
185 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
186 					  MLX5_ADAPTER_PAGE_SHIFT);
187 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
188 
189 	mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
190 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
191 
192 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
193 	if (err) {
194 		mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
195 		goto err_create_sq;
196 	}
197 
198 	memset(in, 0,  MLX5_ST_SZ_BYTES(modify_sq_in));
199 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
200 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
201 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
202 
203 	err = mlx5_core_modify_sq(mdev, sq->sqn, in);
204 	if (err) {
205 		mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
206 			      sq->sqn, err);
207 		goto err_modify_sq;
208 	}
209 
210 	kvfree(in);
211 	return 0;
212 
213 err_modify_sq:
214 	mlx5_core_destroy_sq(mdev, sq->sqn);
215 err_create_sq:
216 	kvfree(in);
217 	return err;
218 }
219 
mlx5_wc_create_sq(struct mlx5_core_dev * mdev,struct mlx5_wc_sq * sq)220 static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
221 {
222 	struct mlx5_wq_param param = {};
223 	void *sqc_data, *wq;
224 	int err;
225 
226 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
227 	if (!sqc_data)
228 		return -ENOMEM;
229 
230 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
231 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
232 	MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
233 	MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
234 
235 	err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
236 	if (err) {
237 		mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
238 		goto err_create_wq_cyc;
239 	}
240 
241 	err = create_wc_sq(mdev, sqc_data, sq);
242 	if (err)
243 		goto err_create_sq;
244 
245 	mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
246 
247 	kvfree(sqc_data);
248 	return 0;
249 
250 err_create_sq:
251 	mlx5_wq_destroy(&sq->wq_ctrl);
252 err_create_wq_cyc:
253 	kvfree(sqc_data);
254 	return err;
255 }
256 
mlx5_wc_destroy_sq(struct mlx5_wc_sq * sq)257 static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
258 {
259 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
260 	mlx5_wq_destroy(&sq->wq_ctrl);
261 }
262 
mlx5_iowrite64_copy(struct mlx5_wc_sq * sq,__be32 mmio_wqe[16],size_t mmio_wqe_size,unsigned int offset)263 static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
264 				size_t mmio_wqe_size, unsigned int offset)
265 {
266 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
267 	if (cpu_has_neon()) {
268 		scoped_ksimd() {
269 			asm volatile(
270 				".arch_extension simd\n\t"
271 				"ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
272 				"st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
273 				:
274 				: "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
275 				: "memory", "v0", "v1", "v2", "v3");
276 		}
277 		return;
278 	}
279 #endif
280 	__iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
281 			 mmio_wqe_size / 8);
282 }
283 
mlx5_wc_post_nop(struct mlx5_wc_sq * sq,unsigned int * offset,bool signaled)284 static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
285 			     bool signaled)
286 {
287 	int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
288 	struct mlx5_wqe_ctrl_seg *ctrl;
289 	__be32 mmio_wqe[16] = {};
290 	u16 pi;
291 
292 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
293 	ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
294 	memset(ctrl, 0, sizeof(*ctrl));
295 	ctrl->opmod_idx_opcode =
296 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
297 	ctrl->qpn_ds =
298 		cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
299 			    DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
300 	if (signaled)
301 		ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
302 
303 	memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
304 	((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
305 		MLX5_WQE_CTRL_CQ_UPDATE;
306 
307 	/* ensure wqe is visible to device before updating doorbell record */
308 	dma_wmb();
309 
310 	sq->pc++;
311 	sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
312 
313 	/* ensure doorbell record is visible to device before ringing the
314 	 * doorbell
315 	 */
316 	wmb();
317 
318 	mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
319 
320 	*offset ^= buf_size;
321 }
322 
mlx5_wc_poll_cq(struct mlx5_wc_sq * sq)323 static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
324 {
325 	struct mlx5_wc_cq *cq = &sq->cq;
326 	struct mlx5_cqe64 *cqe;
327 
328 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
329 	if (!cqe)
330 		return -ETIMEDOUT;
331 
332 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
333 	 * otherwise a cq overrun may occur
334 	 */
335 	mlx5_cqwq_pop(&cq->wq);
336 
337 	if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
338 		int wqe_counter = be16_to_cpu(cqe->wqe_counter);
339 		struct mlx5_core_dev *mdev = cq->mdev;
340 
341 		if (wqe_counter == TEST_WC_NUM_WQES - 1)
342 			mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
343 		else
344 			mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
345 
346 		mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
347 	}
348 
349 	mlx5_cqwq_update_db_record(&cq->wq);
350 
351 	/* ensure cq space is freed before enabling more cqes */
352 	wmb();
353 
354 	sq->cc++;
355 
356 	return 0;
357 }
358 
mlx5_core_test_wc(struct mlx5_core_dev * mdev)359 static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
360 {
361 	unsigned int offset = 0;
362 	unsigned long expires;
363 	struct mlx5_wc_sq *sq;
364 	int i, err;
365 
366 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
367 		return;
368 
369 	sq = kzalloc(sizeof(*sq), GFP_KERNEL);
370 	if (!sq)
371 		return;
372 
373 	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
374 	if (err) {
375 		mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
376 		goto err_alloc_bfreg;
377 	}
378 
379 	err = mlx5_wc_create_cq(mdev, &sq->cq);
380 	if (err)
381 		goto err_create_cq;
382 
383 	err = mlx5_wc_create_sq(mdev, sq);
384 	if (err)
385 		goto err_create_sq;
386 
387 	for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
388 		mlx5_wc_post_nop(sq, &offset, false);
389 
390 	mlx5_wc_post_nop(sq, &offset, true);
391 
392 	expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
393 	do {
394 		err = mlx5_wc_poll_cq(sq);
395 		if (err)
396 			usleep_range(2, 10);
397 	} while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
398 		 time_is_after_jiffies(expires));
399 
400 	mlx5_wc_destroy_sq(sq);
401 
402 err_create_sq:
403 	mlx5_wc_destroy_cq(&sq->cq);
404 err_create_cq:
405 	mlx5_free_bfreg(mdev, &sq->bfreg);
406 err_alloc_bfreg:
407 	kfree(sq);
408 
409 	if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
410 		mlx5_core_warn(mdev, "Write combining is not supported\n");
411 }
412 
mlx5_wc_support_get(struct mlx5_core_dev * mdev)413 bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
414 {
415 	struct mutex *wc_state_lock = &mdev->wc_state_lock;
416 	struct mlx5_core_dev *parent = NULL;
417 
418 	if (!MLX5_CAP_GEN(mdev, bf)) {
419 		mlx5_core_dbg(mdev, "BlueFlame not supported\n");
420 		goto out;
421 	}
422 
423 	if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
424 		mlx5_core_dbg(mdev, "SQ not supported\n");
425 		goto out;
426 	}
427 
428 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
429 		/* No need to lock anything as we perform WC test only
430 		 * once for whole device and was already done.
431 		 */
432 		goto out;
433 
434 #ifdef CONFIG_MLX5_SF
435 	if (mlx5_core_is_sf(mdev)) {
436 		parent = mdev->priv.parent_mdev;
437 		wc_state_lock = &parent->wc_state_lock;
438 	}
439 #endif
440 
441 	mutex_lock(wc_state_lock);
442 
443 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
444 		goto unlock;
445 
446 	if (parent) {
447 		mlx5_core_test_wc(parent);
448 
449 		mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
450 			      parent->wc_state);
451 		mdev->wc_state = parent->wc_state;
452 
453 	} else {
454 		mlx5_core_test_wc(mdev);
455 	}
456 
457 unlock:
458 	mutex_unlock(wc_state_lock);
459 out:
460 	mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
461 
462 	return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
463 }
464 EXPORT_SYMBOL(mlx5_wc_support_get);
465