1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/transobj.h>
6 #include "clock.h"
7 #include "aso.h"
8 #include "wq.h"
9
10 struct mlx5_aso_cq {
11 /* data path - accessed per cqe */
12 struct mlx5_cqwq wq;
13
14 /* data path - accessed per napi poll */
15 struct mlx5_core_cq mcq;
16
17 /* control */
18 struct mlx5_core_dev *mdev;
19 struct mlx5_wq_ctrl wq_ctrl;
20 } ____cacheline_aligned_in_smp;
21
22 struct mlx5_aso {
23 /* data path */
24 u16 cc;
25 u16 pc;
26
27 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
28 struct mlx5_aso_cq cq;
29
30 /* read only */
31 struct mlx5_wq_cyc wq;
32 void __iomem *uar_map;
33 u32 sqn;
34
35 /* control path */
36 struct mlx5_wq_ctrl wq_ctrl;
37
38 } ____cacheline_aligned_in_smp;
39
mlx5_aso_free_cq(struct mlx5_aso_cq * cq)40 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
41 {
42 mlx5_wq_destroy(&cq->wq_ctrl);
43 }
44
mlx5_aso_alloc_cq(struct mlx5_core_dev * mdev,int numa_node,void * cqc_data,struct mlx5_aso_cq * cq)45 static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
46 void *cqc_data, struct mlx5_aso_cq *cq)
47 {
48 struct mlx5_core_cq *mcq = &cq->mcq;
49 struct mlx5_wq_param param;
50 int err;
51 u32 i;
52
53 param.buf_numa_node = numa_node;
54 param.db_numa_node = numa_node;
55
56 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl);
57 if (err)
58 return err;
59
60 mcq->cqe_sz = 64;
61 mcq->set_ci_db = cq->wq_ctrl.db.db;
62 mcq->arm_db = cq->wq_ctrl.db.db + 1;
63
64 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
65 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
66
67 cqe->op_own = 0xf1;
68 }
69
70 cq->mdev = mdev;
71
72 return 0;
73 }
74
create_aso_cq(struct mlx5_aso_cq * cq,void * cqc_data)75 static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
76 {
77 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
78 struct mlx5_core_dev *mdev = cq->mdev;
79 struct mlx5_core_cq *mcq = &cq->mcq;
80 void *in, *cqc;
81 int inlen, eqn;
82 int err;
83
84 err = mlx5_comp_eqn_get(mdev, 0, &eqn);
85 if (err)
86 return err;
87
88 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
89 sizeof(u64) * cq->wq_ctrl.buf.npages;
90 in = kvzalloc(inlen, GFP_KERNEL);
91 if (!in)
92 return -ENOMEM;
93
94 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
95
96 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
97
98 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
99 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
100
101 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
102 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
103 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
104 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
105 MLX5_ADAPTER_PAGE_SHIFT);
106 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
107
108 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
109
110 kvfree(in);
111
112 return err;
113 }
114
mlx5_aso_destroy_cq(struct mlx5_aso_cq * cq)115 static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
116 {
117 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
118 mlx5_wq_destroy(&cq->wq_ctrl);
119 }
120
mlx5_aso_create_cq(struct mlx5_core_dev * mdev,int numa_node,struct mlx5_aso_cq * cq)121 static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
122 struct mlx5_aso_cq *cq)
123 {
124 void *cqc_data;
125 int err;
126
127 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
128 if (!cqc_data)
129 return -ENOMEM;
130
131 MLX5_SET(cqc, cqc_data, log_cq_size, 1);
132 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
133 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
134 MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
135
136 err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
137 if (err) {
138 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
139 goto err_out;
140 }
141
142 err = create_aso_cq(cq, cqc_data);
143 if (err) {
144 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
145 goto err_free_cq;
146 }
147
148 kvfree(cqc_data);
149 return 0;
150
151 err_free_cq:
152 mlx5_aso_free_cq(cq);
153 err_out:
154 kvfree(cqc_data);
155 return err;
156 }
157
mlx5_aso_alloc_sq(struct mlx5_core_dev * mdev,int numa_node,void * sqc_data,struct mlx5_aso * sq)158 static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
159 void *sqc_data, struct mlx5_aso *sq)
160 {
161 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
162 struct mlx5_wq_cyc *wq = &sq->wq;
163 struct mlx5_wq_param param;
164 int err;
165
166 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
167
168 param.db_numa_node = numa_node;
169 param.buf_numa_node = numa_node;
170 err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl);
171 if (err)
172 return err;
173 wq->db = &wq->db[MLX5_SND_DBR];
174
175 return 0;
176 }
177
create_aso_sq(struct mlx5_core_dev * mdev,int pdn,void * sqc_data,struct mlx5_aso * sq)178 static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
179 void *sqc_data, struct mlx5_aso *sq)
180 {
181 void *in, *sqc, *wq;
182 int inlen, err;
183 u8 ts_format;
184
185 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
186 sizeof(u64) * sq->wq_ctrl.buf.npages;
187 in = kvzalloc(inlen, GFP_KERNEL);
188 if (!in)
189 return -ENOMEM;
190
191 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
192 wq = MLX5_ADDR_OF(sqc, sqc, wq);
193
194 memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
195 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
196
197 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
198 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
199
200 ts_format = mlx5_is_real_time_sq(mdev) ?
201 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
202 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
203 MLX5_SET(sqc, sqc, ts_format, ts_format);
204
205 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
206 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
207 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
208 MLX5_ADAPTER_PAGE_SHIFT);
209 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
210
211 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
212 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
213
214 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
215
216 kvfree(in);
217
218 return err;
219 }
220
mlx5_aso_set_sq_rdy(struct mlx5_core_dev * mdev,u32 sqn)221 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
222 {
223 void *in, *sqc;
224 int inlen, err;
225
226 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
227 in = kvzalloc(inlen, GFP_KERNEL);
228 if (!in)
229 return -ENOMEM;
230
231 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
232 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
233 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
234
235 err = mlx5_core_modify_sq(mdev, sqn, in);
236
237 kvfree(in);
238
239 return err;
240 }
241
mlx5_aso_create_sq_rdy(struct mlx5_core_dev * mdev,u32 pdn,void * sqc_data,struct mlx5_aso * sq)242 static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
243 void *sqc_data, struct mlx5_aso *sq)
244 {
245 int err;
246
247 err = create_aso_sq(mdev, pdn, sqc_data, sq);
248 if (err)
249 return err;
250
251 err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
252 if (err)
253 mlx5_core_destroy_sq(mdev, sq->sqn);
254
255 return err;
256 }
257
mlx5_aso_free_sq(struct mlx5_aso * sq)258 static void mlx5_aso_free_sq(struct mlx5_aso *sq)
259 {
260 mlx5_wq_destroy(&sq->wq_ctrl);
261 }
262
mlx5_aso_destroy_sq(struct mlx5_aso * sq)263 static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
264 {
265 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
266 mlx5_aso_free_sq(sq);
267 }
268
mlx5_aso_create_sq(struct mlx5_core_dev * mdev,int numa_node,u32 pdn,struct mlx5_aso * sq)269 static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
270 u32 pdn, struct mlx5_aso *sq)
271 {
272 void *sqc_data, *wq;
273 int err;
274
275 sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
276 if (!sqc_data)
277 return -ENOMEM;
278
279 wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
280 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
281 MLX5_SET(wq, wq, pd, pdn);
282 MLX5_SET(wq, wq, log_wq_sz, 1);
283
284 err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
285 if (err) {
286 mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
287 goto err_out;
288 }
289
290 err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
291 if (err) {
292 mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
293 goto err_free_asosq;
294 }
295
296 mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
297
298 kvfree(sqc_data);
299 return 0;
300
301 err_free_asosq:
302 mlx5_aso_free_sq(sq);
303 err_out:
304 kvfree(sqc_data);
305 return err;
306 }
307
mlx5_aso_create(struct mlx5_core_dev * mdev,u32 pdn)308 struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
309 {
310 int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
311 struct mlx5_aso *aso;
312 int err;
313
314 aso = kzalloc(sizeof(*aso), GFP_KERNEL);
315 if (!aso)
316 return ERR_PTR(-ENOMEM);
317
318 err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
319 if (err)
320 goto err_cq;
321
322 err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
323 if (err)
324 goto err_sq;
325
326 return aso;
327
328 err_sq:
329 mlx5_aso_destroy_cq(&aso->cq);
330 err_cq:
331 kfree(aso);
332 return ERR_PTR(err);
333 }
334
mlx5_aso_destroy(struct mlx5_aso * aso)335 void mlx5_aso_destroy(struct mlx5_aso *aso)
336 {
337 mlx5_aso_destroy_sq(aso);
338 mlx5_aso_destroy_cq(&aso->cq);
339 kfree(aso);
340 }
341
mlx5_aso_build_wqe(struct mlx5_aso * aso,u8 ds_cnt,struct mlx5_aso_wqe * aso_wqe,u32 obj_id,u32 opc_mode)342 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
343 struct mlx5_aso_wqe *aso_wqe,
344 u32 obj_id, u32 opc_mode)
345 {
346 struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
347
348 cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
349 (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
350 MLX5_OPCODE_ACCESS_ASO);
351 cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
352 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
353 cseg->general_id = cpu_to_be32(obj_id);
354 }
355
mlx5_aso_get_wqe(struct mlx5_aso * aso)356 struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
357 {
358 struct mlx5_aso_wqe *wqe;
359 u16 pi;
360
361 pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
362 wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
363 memset(wqe, 0, sizeof(*wqe));
364 return wqe;
365 }
366
mlx5_aso_post_wqe(struct mlx5_aso * aso,bool with_data,struct mlx5_wqe_ctrl_seg * doorbell_cseg)367 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
368 struct mlx5_wqe_ctrl_seg *doorbell_cseg)
369 {
370 doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
371 /* ensure wqe is visible to device before updating doorbell record */
372 dma_wmb();
373
374 if (with_data)
375 aso->pc += MLX5_ASO_WQEBBS_DATA;
376 else
377 aso->pc += MLX5_ASO_WQEBBS;
378 *aso->wq.db = cpu_to_be32(aso->pc);
379
380 /* ensure doorbell record is visible to device before ringing the
381 * doorbell
382 */
383 wmb();
384
385 mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
386
387 /* Ensure doorbell is written on uar_page before poll_cq */
388 WRITE_ONCE(doorbell_cseg, NULL);
389 }
390
mlx5_aso_poll_cq(struct mlx5_aso * aso,bool with_data)391 int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
392 {
393 struct mlx5_aso_cq *cq = &aso->cq;
394 struct mlx5_cqe64 *cqe;
395
396 cqe = mlx5_cqwq_get_cqe(&cq->wq);
397 if (!cqe)
398 return -ETIMEDOUT;
399
400 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
401 * otherwise a cq overrun may occur
402 */
403 mlx5_cqwq_pop(&cq->wq);
404
405 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
406 struct mlx5_err_cqe *err_cqe;
407
408 mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
409 get_cqe_opcode(cqe));
410
411 err_cqe = (struct mlx5_err_cqe *)cqe;
412 mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
413 err_cqe->vendor_err_synd);
414 mlx5_core_err(cq->mdev, "syndrome=%x\n",
415 err_cqe->syndrome);
416 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
417 16, 1, err_cqe,
418 sizeof(*err_cqe), false);
419 }
420
421 mlx5_cqwq_update_db_record(&cq->wq);
422
423 /* ensure cq space is freed before enabling more cqes */
424 wmb();
425
426 if (with_data)
427 aso->cc += MLX5_ASO_WQEBBS_DATA;
428 else
429 aso->cc += MLX5_ASO_WQEBBS;
430
431 return 0;
432 }
433