1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3
4 #include <linux/dma-mapping.h>
5
6 #include "ionic_fw.h"
7 #include "ionic_ibdev.h"
8
ionic_v1_stat_normalize(struct ionic_v1_stat * hw_stats,int hw_stats_count)9 static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
10 int hw_stats_count)
11 {
12 int hw_stat_i;
13
14 for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
15 struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
16
17 stat->type_off = be32_to_cpu(stat->be_type_off);
18 stat->name[sizeof(stat->name) - 1] = 0;
19 if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
20 break;
21 }
22
23 return hw_stat_i;
24 }
25
ionic_fill_stats_desc(struct rdma_stat_desc * hw_stats_hdrs,struct ionic_v1_stat * hw_stats,int hw_stats_count)26 static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
27 struct ionic_v1_stat *hw_stats,
28 int hw_stats_count)
29 {
30 int hw_stat_i;
31
32 for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
33 struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
34
35 hw_stats_hdrs[hw_stat_i].name = stat->name;
36 }
37 }
38
ionic_v1_stat_val(struct ionic_v1_stat * stat,void * vals_buf,size_t vals_len)39 static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
40 void *vals_buf, size_t vals_len)
41 {
42 unsigned int off = ionic_v1_stat_off(stat);
43 int type = ionic_v1_stat_type(stat);
44
45 #define __ionic_v1_stat_validate(__type) \
46 ((off + sizeof(__type) <= vals_len) && \
47 (IS_ALIGNED(off, sizeof(__type))))
48
49 switch (type) {
50 case IONIC_V1_STAT_TYPE_8:
51 if (__ionic_v1_stat_validate(u8))
52 return *(u8 *)(vals_buf + off);
53 break;
54 case IONIC_V1_STAT_TYPE_LE16:
55 if (__ionic_v1_stat_validate(__le16))
56 return le16_to_cpu(*(__le16 *)(vals_buf + off));
57 break;
58 case IONIC_V1_STAT_TYPE_LE32:
59 if (__ionic_v1_stat_validate(__le32))
60 return le32_to_cpu(*(__le32 *)(vals_buf + off));
61 break;
62 case IONIC_V1_STAT_TYPE_LE64:
63 if (__ionic_v1_stat_validate(__le64))
64 return le64_to_cpu(*(__le64 *)(vals_buf + off));
65 break;
66 case IONIC_V1_STAT_TYPE_BE16:
67 if (__ionic_v1_stat_validate(__be16))
68 return be16_to_cpu(*(__be16 *)(vals_buf + off));
69 break;
70 case IONIC_V1_STAT_TYPE_BE32:
71 if (__ionic_v1_stat_validate(__be32))
72 return be32_to_cpu(*(__be32 *)(vals_buf + off));
73 break;
74 case IONIC_V1_STAT_TYPE_BE64:
75 if (__ionic_v1_stat_validate(__be64))
76 return be64_to_cpu(*(__be64 *)(vals_buf + off));
77 break;
78 }
79
80 return ~0ull;
81 #undef __ionic_v1_stat_validate
82 }
83
ionic_hw_stats_cmd(struct ionic_ibdev * dev,dma_addr_t dma,size_t len,int qid,int op)84 static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
85 dma_addr_t dma, size_t len, int qid, int op)
86 {
87 struct ionic_admin_wr wr = {
88 .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
89 .wqe = {
90 .op = op,
91 .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
92 .cmd.stats = {
93 .dma_addr = cpu_to_le64(dma),
94 .length = cpu_to_le32(len),
95 .id_ver = cpu_to_le32(qid),
96 },
97 }
98 };
99
100 if (dev->lif_cfg.admin_opcodes <= op)
101 return -EBADRQC;
102
103 ionic_admin_post(dev, &wr);
104
105 return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
106 }
107
ionic_init_hw_stats(struct ionic_ibdev * dev)108 static int ionic_init_hw_stats(struct ionic_ibdev *dev)
109 {
110 dma_addr_t hw_stats_dma;
111 int rc, hw_stats_count;
112
113 if (dev->hw_stats_hdrs)
114 return 0;
115
116 dev->hw_stats_count = 0;
117
118 /* buffer for current values from the device */
119 dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
120 if (!dev->hw_stats_buf) {
121 rc = -ENOMEM;
122 goto err_buf;
123 }
124
125 /* buffer for names, sizes, offsets of values */
126 dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
127 if (!dev->hw_stats) {
128 rc = -ENOMEM;
129 goto err_hw_stats;
130 }
131
132 /* request the names, sizes, offsets */
133 hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
134 PAGE_SIZE, DMA_FROM_DEVICE);
135 rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
136 if (rc)
137 goto err_dma;
138
139 rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
140 IONIC_V1_ADMIN_STATS_HDRS);
141 if (rc)
142 goto err_cmd;
143
144 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
145
146 /* normalize and count the number of hw_stats */
147 hw_stats_count =
148 ionic_v1_stat_normalize(dev->hw_stats,
149 PAGE_SIZE / sizeof(*dev->hw_stats));
150 if (!hw_stats_count) {
151 rc = -ENODATA;
152 goto err_dma;
153 }
154
155 dev->hw_stats_count = hw_stats_count;
156
157 /* alloc and init array of names, for alloc_hw_stats */
158 dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count);
159 if (!dev->hw_stats_hdrs) {
160 rc = -ENOMEM;
161 goto err_dma;
162 }
163
164 ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
165 hw_stats_count);
166
167 return 0;
168
169 err_cmd:
170 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
171 err_dma:
172 kfree(dev->hw_stats);
173 err_hw_stats:
174 kfree(dev->hw_stats_buf);
175 err_buf:
176 dev->hw_stats_count = 0;
177 dev->hw_stats = NULL;
178 dev->hw_stats_buf = NULL;
179 dev->hw_stats_hdrs = NULL;
180 return rc;
181 }
182
ionic_alloc_hw_stats(struct ib_device * ibdev,u32 port)183 static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
184 u32 port)
185 {
186 struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
187
188 if (port != 1)
189 return NULL;
190
191 return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
192 dev->hw_stats_count,
193 RDMA_HW_STATS_DEFAULT_LIFESPAN);
194 }
195
ionic_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * hw_stats,u32 port,int index)196 static int ionic_get_hw_stats(struct ib_device *ibdev,
197 struct rdma_hw_stats *hw_stats,
198 u32 port, int index)
199 {
200 struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
201 dma_addr_t hw_stats_dma;
202 int rc, hw_stat_i;
203
204 if (port != 1)
205 return -EINVAL;
206
207 hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
208 PAGE_SIZE, DMA_FROM_DEVICE);
209 rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
210 if (rc)
211 goto err_dma;
212
213 rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
214 0, IONIC_V1_ADMIN_STATS_VALS);
215 if (rc)
216 goto err_cmd;
217
218 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
219 PAGE_SIZE, DMA_FROM_DEVICE);
220
221 for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
222 hw_stats->value[hw_stat_i] =
223 ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
224 dev->hw_stats_buf, PAGE_SIZE);
225
226 return hw_stat_i;
227
228 err_cmd:
229 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
230 PAGE_SIZE, DMA_FROM_DEVICE);
231 err_dma:
232 return rc;
233 }
234
235 static struct rdma_hw_stats *
ionic_counter_alloc_stats(struct rdma_counter * counter)236 ionic_counter_alloc_stats(struct rdma_counter *counter)
237 {
238 struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
239 struct ionic_counter *cntr;
240 int err;
241
242 cntr = kzalloc_obj(*cntr);
243 if (!cntr)
244 return NULL;
245
246 /* buffer for current values from the device */
247 cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
248 if (!cntr->vals)
249 goto err_vals;
250
251 err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
252 cntr,
253 XA_LIMIT(0, IONIC_MAX_QPID),
254 GFP_KERNEL);
255 if (err)
256 goto err_xa;
257
258 INIT_LIST_HEAD(&cntr->qp_list);
259
260 return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
261 dev->counter_stats->queue_stats_count,
262 RDMA_HW_STATS_DEFAULT_LIFESPAN);
263 err_xa:
264 kfree(cntr->vals);
265 err_vals:
266 kfree(cntr);
267
268 return NULL;
269 }
270
ionic_counter_dealloc(struct rdma_counter * counter)271 static int ionic_counter_dealloc(struct rdma_counter *counter)
272 {
273 struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
274 struct ionic_counter *cntr;
275
276 cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
277 if (!cntr)
278 return -EINVAL;
279
280 kfree(cntr->vals);
281 kfree(cntr);
282
283 return 0;
284 }
285
ionic_counter_bind_qp(struct rdma_counter * counter,struct ib_qp * ibqp,u32 port)286 static int ionic_counter_bind_qp(struct rdma_counter *counter,
287 struct ib_qp *ibqp,
288 u32 port)
289 {
290 struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
291 struct ionic_qp *qp = to_ionic_qp(ibqp);
292 struct ionic_counter *cntr;
293
294 cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
295 if (!cntr)
296 return -EINVAL;
297
298 list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
299 ibqp->counter = counter;
300
301 return 0;
302 }
303
ionic_counter_unbind_qp(struct ib_qp * ibqp,u32 port)304 static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
305 {
306 struct ionic_qp *qp = to_ionic_qp(ibqp);
307
308 if (ibqp->counter) {
309 list_del(&qp->qp_list_counter);
310 ibqp->counter = NULL;
311 }
312
313 return 0;
314 }
315
ionic_get_qp_stats(struct ib_device * ibdev,struct rdma_hw_stats * hw_stats,u32 counter_id)316 static int ionic_get_qp_stats(struct ib_device *ibdev,
317 struct rdma_hw_stats *hw_stats,
318 u32 counter_id)
319 {
320 struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
321 struct ionic_counter_stats *cs;
322 struct ionic_counter *cntr;
323 dma_addr_t hw_stats_dma;
324 struct ionic_qp *qp;
325 int rc, stat_i = 0;
326
327 cs = dev->counter_stats;
328 cntr = xa_load(&cs->xa_counters, counter_id);
329 if (!cntr)
330 return -EINVAL;
331
332 hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
333 PAGE_SIZE, DMA_FROM_DEVICE);
334 rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
335 if (rc)
336 return rc;
337
338 memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
339
340 list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
341 rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
342 qp->qpid,
343 IONIC_V1_ADMIN_QP_STATS_VALS);
344 if (rc)
345 goto err_cmd;
346
347 for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
348 hw_stats->value[stat_i] +=
349 ionic_v1_stat_val(&cs->hdr[stat_i],
350 cntr->vals,
351 PAGE_SIZE);
352 }
353
354 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
355 return stat_i;
356
357 err_cmd:
358 dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
359
360 return rc;
361 }
362
ionic_counter_update_stats(struct rdma_counter * counter)363 static int ionic_counter_update_stats(struct rdma_counter *counter)
364 {
365 return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
366 }
367
ionic_alloc_counters(struct ionic_ibdev * dev)368 static int ionic_alloc_counters(struct ionic_ibdev *dev)
369 {
370 struct ionic_counter_stats *cs = dev->counter_stats;
371 int rc, hw_stats_count;
372 dma_addr_t hdr_dma;
373
374 /* buffer for names, sizes, offsets of values */
375 cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
376 if (!cs->hdr)
377 return -ENOMEM;
378
379 hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
380 PAGE_SIZE, DMA_FROM_DEVICE);
381 rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
382 if (rc)
383 goto err_dma;
384
385 rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
386 IONIC_V1_ADMIN_QP_STATS_HDRS);
387 if (rc)
388 goto err_cmd;
389
390 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
391
392 /* normalize and count the number of hw_stats */
393 hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
394 PAGE_SIZE / sizeof(*cs->hdr));
395 if (!hw_stats_count) {
396 rc = -ENODATA;
397 goto err_dma;
398 }
399
400 cs->queue_stats_count = hw_stats_count;
401
402 /* alloc and init array of names */
403 cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count);
404 if (!cs->stats_hdrs) {
405 rc = -ENOMEM;
406 goto err_dma;
407 }
408
409 ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
410
411 return 0;
412
413 err_cmd:
414 dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
415 err_dma:
416 kfree(cs->hdr);
417
418 return rc;
419 }
420
421 static const struct ib_device_ops ionic_hw_stats_ops = {
422 .driver_id = RDMA_DRIVER_IONIC,
423 .alloc_hw_port_stats = ionic_alloc_hw_stats,
424 .get_hw_stats = ionic_get_hw_stats,
425 };
426
427 static const struct ib_device_ops ionic_counter_stats_ops = {
428 .counter_alloc_stats = ionic_counter_alloc_stats,
429 .counter_dealloc = ionic_counter_dealloc,
430 .counter_bind_qp = ionic_counter_bind_qp,
431 .counter_unbind_qp = ionic_counter_unbind_qp,
432 .counter_update_stats = ionic_counter_update_stats,
433 };
434
ionic_stats_init(struct ionic_ibdev * dev)435 void ionic_stats_init(struct ionic_ibdev *dev)
436 {
437 u16 stats_type = dev->lif_cfg.stats_type;
438 int rc;
439
440 if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
441 rc = ionic_init_hw_stats(dev);
442 if (rc)
443 ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
444 else
445 ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
446 }
447
448 if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
449 dev->counter_stats = kzalloc_obj(*dev->counter_stats);
450 if (!dev->counter_stats)
451 return;
452
453 rc = ionic_alloc_counters(dev);
454 if (rc) {
455 ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
456 kfree(dev->counter_stats);
457 dev->counter_stats = NULL;
458 return;
459 }
460
461 xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
462
463 ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
464 }
465 }
466
ionic_stats_cleanup(struct ionic_ibdev * dev)467 void ionic_stats_cleanup(struct ionic_ibdev *dev)
468 {
469 if (dev->counter_stats) {
470 xa_destroy(&dev->counter_stats->xa_counters);
471 kfree(dev->counter_stats->hdr);
472 kfree(dev->counter_stats->stats_hdrs);
473 kfree(dev->counter_stats);
474 dev->counter_stats = NULL;
475 }
476
477 kfree(dev->hw_stats);
478 kfree(dev->hw_stats_buf);
479 kfree(dev->hw_stats_hdrs);
480 }
481