xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/wc.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/io.h>
5 #include <linux/iopoll.h>
6 #include <linux/mlx5/transobj.h>
7 #include "lib/clock.h"
8 #include "mlx5_core.h"
9 #include "wq.h"
10 
11 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
12 #include <asm/neon.h>
13 #include <asm/simd.h>
14 #endif
15 
16 #define TEST_WC_NUM_WQES 255
17 #define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
18 #define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
19 #define TEST_WC_POLLING_MAX_TIME_USEC (100 * USEC_PER_MSEC)
20 
21 struct mlx5_wc_cq {
22 	/* data path - accessed per cqe */
23 	struct mlx5_cqwq wq;
24 
25 	/* data path - accessed per napi poll */
26 	struct mlx5_core_cq mcq;
27 
28 	/* control */
29 	struct mlx5_core_dev *mdev;
30 	struct mlx5_wq_ctrl wq_ctrl;
31 };
32 
33 struct mlx5_wc_sq {
34 	/* data path */
35 	u16 cc;
36 	u16 pc;
37 
38 	/* read only */
39 	struct mlx5_wq_cyc wq;
40 	u32 sqn;
41 
42 	/* control path */
43 	struct mlx5_wq_ctrl wq_ctrl;
44 
45 	struct mlx5_wc_cq cq;
46 	struct mlx5_sq_bfreg bfreg;
47 };
48 
mlx5_wc_create_cqwq(struct mlx5_core_dev * mdev,void * cqc,struct mlx5_wc_cq * cq)49 static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
50 			       struct mlx5_wc_cq *cq)
51 {
52 	struct mlx5_core_cq *mcq = &cq->mcq;
53 	struct mlx5_wq_param param = {};
54 	int err;
55 	u32 i;
56 
57 	err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
58 	if (err)
59 		return err;
60 
61 	mcq->cqe_sz     = 64;
62 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
63 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
64 
65 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
66 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
67 
68 		cqe->op_own = 0xf1;
69 	}
70 
71 	cq->mdev = mdev;
72 
73 	return 0;
74 }
75 
create_wc_cq(struct mlx5_wc_cq * cq,void * cqc_data)76 static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
77 {
78 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
79 	struct mlx5_core_dev *mdev = cq->mdev;
80 	struct mlx5_core_cq *mcq = &cq->mcq;
81 	int err, inlen, eqn;
82 	void *in, *cqc;
83 
84 	err = mlx5_comp_eqn_get(mdev, 0, &eqn);
85 	if (err)
86 		return err;
87 
88 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
89 		sizeof(u64) * cq->wq_ctrl.buf.npages;
90 	in = kvzalloc(inlen, GFP_KERNEL);
91 	if (!in)
92 		return -ENOMEM;
93 
94 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
95 
96 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
97 
98 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
99 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
100 
101 	MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
102 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
103 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.bfreg.up->index);
104 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
105 					    MLX5_ADAPTER_PAGE_SHIFT);
106 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
107 
108 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
109 
110 	kvfree(in);
111 
112 	return err;
113 }
114 
mlx5_wc_create_cq(struct mlx5_core_dev * mdev,struct mlx5_wc_cq * cq)115 static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
116 {
117 	void *cqc;
118 	int err;
119 
120 	cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
121 	if (!cqc)
122 		return -ENOMEM;
123 
124 	MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
125 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
126 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
127 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
128 
129 	err = mlx5_wc_create_cqwq(mdev, cqc, cq);
130 	if (err) {
131 		mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
132 		goto err_create_cqwq;
133 	}
134 
135 	err = create_wc_cq(cq, cqc);
136 	if (err) {
137 		mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
138 		goto err_create_cq;
139 	}
140 
141 	kvfree(cqc);
142 	return 0;
143 
144 err_create_cq:
145 	mlx5_wq_destroy(&cq->wq_ctrl);
146 err_create_cqwq:
147 	kvfree(cqc);
148 	return err;
149 }
150 
mlx5_wc_destroy_cq(struct mlx5_wc_cq * cq)151 static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
152 {
153 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
154 	mlx5_wq_destroy(&cq->wq_ctrl);
155 }
156 
create_wc_sq(struct mlx5_core_dev * mdev,void * sqc_data,struct mlx5_wc_sq * sq)157 static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
158 			struct mlx5_wc_sq *sq)
159 {
160 	void *in, *sqc, *wq;
161 	int inlen, err;
162 	u8 ts_format;
163 
164 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
165 		sizeof(u64) * sq->wq_ctrl.buf.npages;
166 	in = kvzalloc(inlen, GFP_KERNEL);
167 	if (!in)
168 		return -ENOMEM;
169 
170 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
171 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
172 
173 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
174 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
175 
176 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
177 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
178 
179 	ts_format = mlx5_is_real_time_sq(mdev) ?
180 			MLX5_TIMESTAMP_FORMAT_REAL_TIME :
181 			MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
182 	MLX5_SET(sqc, sqc, ts_format, ts_format);
183 
184 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
185 	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
186 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
187 					  MLX5_ADAPTER_PAGE_SHIFT);
188 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
189 
190 	mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
191 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
192 
193 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
194 	if (err) {
195 		mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
196 		goto err_create_sq;
197 	}
198 
199 	memset(in, 0,  MLX5_ST_SZ_BYTES(modify_sq_in));
200 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
201 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
202 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
203 
204 	err = mlx5_core_modify_sq(mdev, sq->sqn, in);
205 	if (err) {
206 		mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
207 			      sq->sqn, err);
208 		goto err_modify_sq;
209 	}
210 
211 	kvfree(in);
212 	return 0;
213 
214 err_modify_sq:
215 	mlx5_core_destroy_sq(mdev, sq->sqn);
216 err_create_sq:
217 	kvfree(in);
218 	return err;
219 }
220 
mlx5_wc_create_sq(struct mlx5_core_dev * mdev,struct mlx5_wc_sq * sq)221 static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
222 {
223 	struct mlx5_wq_param param = {};
224 	void *sqc_data, *wq;
225 	int err;
226 
227 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
228 	if (!sqc_data)
229 		return -ENOMEM;
230 
231 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
232 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
233 	MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
234 	MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
235 
236 	err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
237 	if (err) {
238 		mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
239 		goto err_create_wq_cyc;
240 	}
241 
242 	err = create_wc_sq(mdev, sqc_data, sq);
243 	if (err)
244 		goto err_create_sq;
245 
246 	mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
247 
248 	kvfree(sqc_data);
249 	return 0;
250 
251 err_create_sq:
252 	mlx5_wq_destroy(&sq->wq_ctrl);
253 err_create_wq_cyc:
254 	kvfree(sqc_data);
255 	return err;
256 }
257 
mlx5_wc_destroy_sq(struct mlx5_wc_sq * sq)258 static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
259 {
260 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
261 	mlx5_wq_destroy(&sq->wq_ctrl);
262 }
263 
mlx5_iowrite64_copy(struct mlx5_wc_sq * sq,__be32 mmio_wqe[16],size_t mmio_wqe_size,unsigned int offset)264 static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
265 				size_t mmio_wqe_size, unsigned int offset)
266 {
267 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
268 	if (cpu_has_neon()) {
269 		scoped_ksimd() {
270 			asm volatile(
271 				".arch_extension simd\n\t"
272 				"ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
273 				"st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
274 				:
275 				: "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
276 				: "memory", "v0", "v1", "v2", "v3");
277 		}
278 		return;
279 	}
280 #endif
281 	__iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
282 			 mmio_wqe_size / 8);
283 }
284 
mlx5_wc_post_nop(struct mlx5_wc_sq * sq,unsigned int * offset,bool signaled)285 static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
286 			     bool signaled)
287 {
288 	int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
289 	struct mlx5_wqe_ctrl_seg *ctrl;
290 	__be32 mmio_wqe[16] = {};
291 	u16 pi;
292 
293 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
294 	ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
295 	memset(ctrl, 0, sizeof(*ctrl));
296 	ctrl->opmod_idx_opcode =
297 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
298 	ctrl->qpn_ds =
299 		cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
300 			    DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
301 	if (signaled)
302 		ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
303 
304 	memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
305 	((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
306 		MLX5_WQE_CTRL_CQ_UPDATE;
307 
308 	/* ensure wqe is visible to device before updating doorbell record */
309 	dma_wmb();
310 
311 	sq->pc++;
312 	sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
313 
314 	/* ensure doorbell record is visible to device before ringing the
315 	 * doorbell
316 	 */
317 	wmb();
318 
319 	mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
320 
321 	*offset ^= buf_size;
322 }
323 
mlx5_wc_poll_cq(struct mlx5_wc_sq * sq)324 static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
325 {
326 	struct mlx5_wc_cq *cq = &sq->cq;
327 	struct mlx5_cqe64 *cqe;
328 
329 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
330 	if (!cqe)
331 		return -ETIMEDOUT;
332 
333 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
334 	 * otherwise a cq overrun may occur
335 	 */
336 	mlx5_cqwq_pop(&cq->wq);
337 
338 	if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
339 		int wqe_counter = be16_to_cpu(cqe->wqe_counter);
340 		struct mlx5_core_dev *mdev = cq->mdev;
341 
342 		if (wqe_counter == TEST_WC_NUM_WQES - 1)
343 			mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
344 		else
345 			mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
346 
347 		mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
348 	}
349 
350 	mlx5_cqwq_update_db_record(&cq->wq);
351 
352 	/* ensure cq space is freed before enabling more cqes */
353 	wmb();
354 
355 	sq->cc++;
356 
357 	return 0;
358 }
359 
mlx5_core_test_wc(struct mlx5_core_dev * mdev)360 static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
361 {
362 	unsigned int offset = 0;
363 	struct mlx5_wc_sq *sq;
364 	int i, err;
365 
366 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
367 		return;
368 
369 	sq = kzalloc_obj(*sq);
370 	if (!sq)
371 		return;
372 
373 	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
374 	if (err) {
375 		mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
376 		goto err_alloc_bfreg;
377 	}
378 
379 	err = mlx5_wc_create_cq(mdev, &sq->cq);
380 	if (err)
381 		goto err_create_cq;
382 
383 	err = mlx5_wc_create_sq(mdev, sq);
384 	if (err)
385 		goto err_create_sq;
386 
387 	for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
388 		mlx5_wc_post_nop(sq, &offset, false);
389 
390 	mlx5_wc_post_nop(sq, &offset, true);
391 
392 	poll_timeout_us(mlx5_wc_poll_cq(sq),
393 			mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED, 10,
394 			TEST_WC_POLLING_MAX_TIME_USEC, false);
395 
396 	mlx5_wc_destroy_sq(sq);
397 
398 err_create_sq:
399 	mlx5_wc_destroy_cq(&sq->cq);
400 err_create_cq:
401 	mlx5_free_bfreg(mdev, &sq->bfreg);
402 err_alloc_bfreg:
403 	kfree(sq);
404 
405 	if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
406 		mlx5_core_warn(mdev, "Write combining is not supported\n");
407 }
408 
mlx5_wc_support_get(struct mlx5_core_dev * mdev)409 bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
410 {
411 	struct mutex *wc_state_lock = &mdev->wc_state_lock;
412 	struct mlx5_core_dev *parent = NULL;
413 
414 	if (!MLX5_CAP_GEN(mdev, bf)) {
415 		mlx5_core_dbg(mdev, "BlueFlame not supported\n");
416 		goto out;
417 	}
418 
419 	if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
420 		mlx5_core_dbg(mdev, "SQ not supported\n");
421 		goto out;
422 	}
423 
424 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
425 		/* No need to lock anything as we perform WC test only
426 		 * once for whole device and was already done.
427 		 */
428 		goto out;
429 
430 #ifdef CONFIG_MLX5_SF
431 	if (mlx5_core_is_sf(mdev)) {
432 		parent = mdev->priv.parent_mdev;
433 		wc_state_lock = &parent->wc_state_lock;
434 	}
435 #endif
436 
437 	mutex_lock(wc_state_lock);
438 
439 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
440 		goto unlock;
441 
442 	if (parent) {
443 		mlx5_core_test_wc(parent);
444 
445 		mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
446 			      parent->wc_state);
447 		mdev->wc_state = parent->wc_state;
448 
449 	} else {
450 		mlx5_core_test_wc(mdev);
451 	}
452 
453 unlock:
454 	mutex_unlock(wc_state_lock);
455 out:
456 	mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
457 
458 	return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
459 }
460 EXPORT_SYMBOL(mlx5_wc_support_get);
461