xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/wc.c (revision 18a7e218cfcdca6666e1f7356533e4c988780b57)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/io.h>
5 #include <linux/mlx5/transobj.h>
6 #include "lib/clock.h"
7 #include "mlx5_core.h"
8 #include "wq.h"
9 
10 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
11 #include <asm/neon.h>
12 #endif
13 
14 #define TEST_WC_NUM_WQES 255
15 #define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
16 #define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
17 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
18 
19 struct mlx5_wc_cq {
20 	/* data path - accessed per cqe */
21 	struct mlx5_cqwq wq;
22 
23 	/* data path - accessed per napi poll */
24 	struct mlx5_core_cq mcq;
25 
26 	/* control */
27 	struct mlx5_core_dev *mdev;
28 	struct mlx5_wq_ctrl wq_ctrl;
29 };
30 
31 struct mlx5_wc_sq {
32 	/* data path */
33 	u16 cc;
34 	u16 pc;
35 
36 	/* read only */
37 	struct mlx5_wq_cyc wq;
38 	u32 sqn;
39 
40 	/* control path */
41 	struct mlx5_wq_ctrl wq_ctrl;
42 
43 	struct mlx5_wc_cq cq;
44 	struct mlx5_sq_bfreg bfreg;
45 };
46 
mlx5_wc_create_cqwq(struct mlx5_core_dev * mdev,void * cqc,struct mlx5_wc_cq * cq)47 static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
48 			       struct mlx5_wc_cq *cq)
49 {
50 	struct mlx5_core_cq *mcq = &cq->mcq;
51 	struct mlx5_wq_param param = {};
52 	int err;
53 	u32 i;
54 
55 	err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
56 	if (err)
57 		return err;
58 
59 	mcq->cqe_sz     = 64;
60 	mcq->set_ci_db  = cq->wq_ctrl.db.db;
61 	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
62 
63 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
64 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
65 
66 		cqe->op_own = 0xf1;
67 	}
68 
69 	cq->mdev = mdev;
70 
71 	return 0;
72 }
73 
create_wc_cq(struct mlx5_wc_cq * cq,void * cqc_data)74 static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
75 {
76 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
77 	struct mlx5_core_dev *mdev = cq->mdev;
78 	struct mlx5_core_cq *mcq = &cq->mcq;
79 	int err, inlen, eqn;
80 	void *in, *cqc;
81 
82 	err = mlx5_comp_eqn_get(mdev, 0, &eqn);
83 	if (err)
84 		return err;
85 
86 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
87 		sizeof(u64) * cq->wq_ctrl.buf.npages;
88 	in = kvzalloc(inlen, GFP_KERNEL);
89 	if (!in)
90 		return -ENOMEM;
91 
92 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
93 
94 	memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
95 
96 	mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
97 				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
98 
99 	MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
100 	MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
101 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.bfreg.up->index);
102 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
103 					    MLX5_ADAPTER_PAGE_SHIFT);
104 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
105 
106 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
107 
108 	kvfree(in);
109 
110 	return err;
111 }
112 
mlx5_wc_create_cq(struct mlx5_core_dev * mdev,struct mlx5_wc_cq * cq)113 static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
114 {
115 	void *cqc;
116 	int err;
117 
118 	cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
119 	if (!cqc)
120 		return -ENOMEM;
121 
122 	MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
123 	MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
124 	if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
125 		MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
126 
127 	err = mlx5_wc_create_cqwq(mdev, cqc, cq);
128 	if (err) {
129 		mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
130 		goto err_create_cqwq;
131 	}
132 
133 	err = create_wc_cq(cq, cqc);
134 	if (err) {
135 		mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
136 		goto err_create_cq;
137 	}
138 
139 	kvfree(cqc);
140 	return 0;
141 
142 err_create_cq:
143 	mlx5_wq_destroy(&cq->wq_ctrl);
144 err_create_cqwq:
145 	kvfree(cqc);
146 	return err;
147 }
148 
mlx5_wc_destroy_cq(struct mlx5_wc_cq * cq)149 static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
150 {
151 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
152 	mlx5_wq_destroy(&cq->wq_ctrl);
153 }
154 
create_wc_sq(struct mlx5_core_dev * mdev,void * sqc_data,struct mlx5_wc_sq * sq)155 static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
156 			struct mlx5_wc_sq *sq)
157 {
158 	void *in, *sqc, *wq;
159 	int inlen, err;
160 	u8 ts_format;
161 
162 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
163 		sizeof(u64) * sq->wq_ctrl.buf.npages;
164 	in = kvzalloc(inlen, GFP_KERNEL);
165 	if (!in)
166 		return -ENOMEM;
167 
168 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
169 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
170 
171 	memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
172 	MLX5_SET(sqc,  sqc, cqn, sq->cq.mcq.cqn);
173 
174 	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
175 	MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
176 
177 	ts_format = mlx5_is_real_time_sq(mdev) ?
178 			MLX5_TIMESTAMP_FORMAT_REAL_TIME :
179 			MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
180 	MLX5_SET(sqc, sqc, ts_format, ts_format);
181 
182 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
183 	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
184 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
185 					  MLX5_ADAPTER_PAGE_SHIFT);
186 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
187 
188 	mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
189 				  (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
190 
191 	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
192 	if (err) {
193 		mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
194 		goto err_create_sq;
195 	}
196 
197 	memset(in, 0,  MLX5_ST_SZ_BYTES(modify_sq_in));
198 	MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
199 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
200 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
201 
202 	err = mlx5_core_modify_sq(mdev, sq->sqn, in);
203 	if (err) {
204 		mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
205 			      sq->sqn, err);
206 		goto err_modify_sq;
207 	}
208 
209 	kvfree(in);
210 	return 0;
211 
212 err_modify_sq:
213 	mlx5_core_destroy_sq(mdev, sq->sqn);
214 err_create_sq:
215 	kvfree(in);
216 	return err;
217 }
218 
mlx5_wc_create_sq(struct mlx5_core_dev * mdev,struct mlx5_wc_sq * sq)219 static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
220 {
221 	struct mlx5_wq_param param = {};
222 	void *sqc_data, *wq;
223 	int err;
224 
225 	sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
226 	if (!sqc_data)
227 		return -ENOMEM;
228 
229 	wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
230 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
231 	MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
232 	MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
233 
234 	err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
235 	if (err) {
236 		mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
237 		goto err_create_wq_cyc;
238 	}
239 
240 	err = create_wc_sq(mdev, sqc_data, sq);
241 	if (err)
242 		goto err_create_sq;
243 
244 	mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
245 
246 	kvfree(sqc_data);
247 	return 0;
248 
249 err_create_sq:
250 	mlx5_wq_destroy(&sq->wq_ctrl);
251 err_create_wq_cyc:
252 	kvfree(sqc_data);
253 	return err;
254 }
255 
mlx5_wc_destroy_sq(struct mlx5_wc_sq * sq)256 static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
257 {
258 	mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
259 	mlx5_wq_destroy(&sq->wq_ctrl);
260 }
261 
mlx5_iowrite64_copy(struct mlx5_wc_sq * sq,__be32 mmio_wqe[16],size_t mmio_wqe_size,unsigned int offset)262 static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
263 				size_t mmio_wqe_size, unsigned int offset)
264 {
265 #if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
266 	if (cpu_has_neon()) {
267 		kernel_neon_begin();
268 		asm volatile
269 		(".arch_extension simd\n\t"
270 		"ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
271 		"st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
272 		:
273 		: "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
274 		: "memory", "v0", "v1", "v2", "v3");
275 		kernel_neon_end();
276 		return;
277 	}
278 #endif
279 	__iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
280 			 mmio_wqe_size / 8);
281 }
282 
mlx5_wc_post_nop(struct mlx5_wc_sq * sq,unsigned int * offset,bool signaled)283 static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
284 			     bool signaled)
285 {
286 	int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
287 	struct mlx5_wqe_ctrl_seg *ctrl;
288 	__be32 mmio_wqe[16] = {};
289 	u16 pi;
290 
291 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
292 	ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
293 	memset(ctrl, 0, sizeof(*ctrl));
294 	ctrl->opmod_idx_opcode =
295 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
296 	ctrl->qpn_ds =
297 		cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
298 			    DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
299 	if (signaled)
300 		ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
301 
302 	memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
303 	((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
304 		MLX5_WQE_CTRL_CQ_UPDATE;
305 
306 	/* ensure wqe is visible to device before updating doorbell record */
307 	dma_wmb();
308 
309 	sq->pc++;
310 	sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
311 
312 	/* ensure doorbell record is visible to device before ringing the
313 	 * doorbell
314 	 */
315 	wmb();
316 
317 	mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
318 
319 	*offset ^= buf_size;
320 }
321 
mlx5_wc_poll_cq(struct mlx5_wc_sq * sq)322 static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
323 {
324 	struct mlx5_wc_cq *cq = &sq->cq;
325 	struct mlx5_cqe64 *cqe;
326 
327 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
328 	if (!cqe)
329 		return -ETIMEDOUT;
330 
331 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
332 	 * otherwise a cq overrun may occur
333 	 */
334 	mlx5_cqwq_pop(&cq->wq);
335 
336 	if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
337 		int wqe_counter = be16_to_cpu(cqe->wqe_counter);
338 		struct mlx5_core_dev *mdev = cq->mdev;
339 
340 		if (wqe_counter == TEST_WC_NUM_WQES - 1)
341 			mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
342 		else
343 			mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
344 
345 		mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
346 	}
347 
348 	mlx5_cqwq_update_db_record(&cq->wq);
349 
350 	/* ensure cq space is freed before enabling more cqes */
351 	wmb();
352 
353 	sq->cc++;
354 
355 	return 0;
356 }
357 
mlx5_core_test_wc(struct mlx5_core_dev * mdev)358 static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
359 {
360 	unsigned int offset = 0;
361 	unsigned long expires;
362 	struct mlx5_wc_sq *sq;
363 	int i, err;
364 
365 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
366 		return;
367 
368 	sq = kzalloc(sizeof(*sq), GFP_KERNEL);
369 	if (!sq)
370 		return;
371 
372 	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
373 	if (err) {
374 		mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
375 		goto err_alloc_bfreg;
376 	}
377 
378 	err = mlx5_wc_create_cq(mdev, &sq->cq);
379 	if (err)
380 		goto err_create_cq;
381 
382 	err = mlx5_wc_create_sq(mdev, sq);
383 	if (err)
384 		goto err_create_sq;
385 
386 	for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
387 		mlx5_wc_post_nop(sq, &offset, false);
388 
389 	mlx5_wc_post_nop(sq, &offset, true);
390 
391 	expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
392 	do {
393 		err = mlx5_wc_poll_cq(sq);
394 		if (err)
395 			usleep_range(2, 10);
396 	} while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
397 		 time_is_after_jiffies(expires));
398 
399 	mlx5_wc_destroy_sq(sq);
400 
401 err_create_sq:
402 	mlx5_wc_destroy_cq(&sq->cq);
403 err_create_cq:
404 	mlx5_free_bfreg(mdev, &sq->bfreg);
405 err_alloc_bfreg:
406 	kfree(sq);
407 
408 	if (mdev->wc_state == MLX5_WC_STATE_UNSUPPORTED)
409 		mlx5_core_warn(mdev, "Write combining is not supported\n");
410 }
411 
mlx5_wc_support_get(struct mlx5_core_dev * mdev)412 bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
413 {
414 	struct mutex *wc_state_lock = &mdev->wc_state_lock;
415 	struct mlx5_core_dev *parent = NULL;
416 
417 	if (!MLX5_CAP_GEN(mdev, bf)) {
418 		mlx5_core_dbg(mdev, "BlueFlame not supported\n");
419 		goto out;
420 	}
421 
422 	if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
423 		mlx5_core_dbg(mdev, "SQ not supported\n");
424 		goto out;
425 	}
426 
427 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
428 		/* No need to lock anything as we perform WC test only
429 		 * once for whole device and was already done.
430 		 */
431 		goto out;
432 
433 #ifdef CONFIG_MLX5_SF
434 	if (mlx5_core_is_sf(mdev)) {
435 		parent = mdev->priv.parent_mdev;
436 		wc_state_lock = &parent->wc_state_lock;
437 	}
438 #endif
439 
440 	mutex_lock(wc_state_lock);
441 
442 	if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
443 		goto unlock;
444 
445 	if (parent) {
446 		mlx5_core_test_wc(parent);
447 
448 		mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
449 			      parent->wc_state);
450 		mdev->wc_state = parent->wc_state;
451 
452 	} else {
453 		mlx5_core_test_wc(mdev);
454 	}
455 
456 unlock:
457 	mutex_unlock(wc_state_lock);
458 out:
459 	mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
460 
461 	return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
462 }
463 EXPORT_SYMBOL(mlx5_wc_support_get);
464