xref: /linux/drivers/infiniband/hw/ionic/ionic_hw_stats.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3 
4 #include <linux/dma-mapping.h>
5 
6 #include "ionic_fw.h"
7 #include "ionic_ibdev.h"
8 
9 static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
10 				   int hw_stats_count)
11 {
12 	int hw_stat_i;
13 
14 	for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
15 		struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
16 
17 		stat->type_off = be32_to_cpu(stat->be_type_off);
18 		stat->name[sizeof(stat->name) - 1] = 0;
19 		if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
20 			break;
21 	}
22 
23 	return hw_stat_i;
24 }
25 
26 static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
27 				  struct ionic_v1_stat *hw_stats,
28 				  int hw_stats_count)
29 {
30 	int hw_stat_i;
31 
32 	for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
33 		struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
34 
35 		hw_stats_hdrs[hw_stat_i].name = stat->name;
36 	}
37 }
38 
39 static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
40 			     void *vals_buf, size_t vals_len)
41 {
42 	unsigned int off = ionic_v1_stat_off(stat);
43 	int type = ionic_v1_stat_type(stat);
44 
45 #define __ionic_v1_stat_validate(__type)		\
46 	((off + sizeof(__type) <= vals_len) &&		\
47 	 (IS_ALIGNED(off, sizeof(__type))))
48 
49 	switch (type) {
50 	case IONIC_V1_STAT_TYPE_8:
51 		if (__ionic_v1_stat_validate(u8))
52 			return *(u8 *)(vals_buf + off);
53 		break;
54 	case IONIC_V1_STAT_TYPE_LE16:
55 		if (__ionic_v1_stat_validate(__le16))
56 			return le16_to_cpu(*(__le16 *)(vals_buf + off));
57 		break;
58 	case IONIC_V1_STAT_TYPE_LE32:
59 		if (__ionic_v1_stat_validate(__le32))
60 			return le32_to_cpu(*(__le32 *)(vals_buf + off));
61 		break;
62 	case IONIC_V1_STAT_TYPE_LE64:
63 		if (__ionic_v1_stat_validate(__le64))
64 			return le64_to_cpu(*(__le64 *)(vals_buf + off));
65 		break;
66 	case IONIC_V1_STAT_TYPE_BE16:
67 		if (__ionic_v1_stat_validate(__be16))
68 			return be16_to_cpu(*(__be16 *)(vals_buf + off));
69 		break;
70 	case IONIC_V1_STAT_TYPE_BE32:
71 		if (__ionic_v1_stat_validate(__be32))
72 			return be32_to_cpu(*(__be32 *)(vals_buf + off));
73 		break;
74 	case IONIC_V1_STAT_TYPE_BE64:
75 		if (__ionic_v1_stat_validate(__be64))
76 			return be64_to_cpu(*(__be64 *)(vals_buf + off));
77 		break;
78 	}
79 
80 	return ~0ull;
81 #undef __ionic_v1_stat_validate
82 }
83 
84 static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
85 			      dma_addr_t dma, size_t len, int qid, int op)
86 {
87 	struct ionic_admin_wr wr = {
88 		.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
89 		.wqe = {
90 			.op = op,
91 			.len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
92 			.cmd.stats = {
93 				.dma_addr = cpu_to_le64(dma),
94 				.length = cpu_to_le32(len),
95 				.id_ver = cpu_to_le32(qid),
96 			},
97 		}
98 	};
99 
100 	if (dev->lif_cfg.admin_opcodes <= op)
101 		return -EBADRQC;
102 
103 	ionic_admin_post(dev, &wr);
104 
105 	return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
106 }
107 
108 static int ionic_init_hw_stats(struct ionic_ibdev *dev)
109 {
110 	dma_addr_t hw_stats_dma;
111 	int rc, hw_stats_count;
112 
113 	if (dev->hw_stats_hdrs)
114 		return 0;
115 
116 	dev->hw_stats_count = 0;
117 
118 	/* buffer for current values from the device */
119 	dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
120 	if (!dev->hw_stats_buf) {
121 		rc = -ENOMEM;
122 		goto err_buf;
123 	}
124 
125 	/* buffer for names, sizes, offsets of values */
126 	dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
127 	if (!dev->hw_stats) {
128 		rc = -ENOMEM;
129 		goto err_hw_stats;
130 	}
131 
132 	/* request the names, sizes, offsets */
133 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
134 				      PAGE_SIZE, DMA_FROM_DEVICE);
135 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
136 	if (rc)
137 		goto err_dma;
138 
139 	rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
140 				IONIC_V1_ADMIN_STATS_HDRS);
141 	if (rc)
142 		goto err_cmd;
143 
144 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
145 
146 	/* normalize and count the number of hw_stats */
147 	hw_stats_count =
148 		ionic_v1_stat_normalize(dev->hw_stats,
149 					PAGE_SIZE / sizeof(*dev->hw_stats));
150 	if (!hw_stats_count) {
151 		rc = -ENODATA;
152 		goto err_dma;
153 	}
154 
155 	dev->hw_stats_count = hw_stats_count;
156 
157 	/* alloc and init array of names, for alloc_hw_stats */
158 	dev->hw_stats_hdrs = kcalloc(hw_stats_count,
159 				     sizeof(*dev->hw_stats_hdrs),
160 				     GFP_KERNEL);
161 	if (!dev->hw_stats_hdrs) {
162 		rc = -ENOMEM;
163 		goto err_dma;
164 	}
165 
166 	ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
167 			      hw_stats_count);
168 
169 	return 0;
170 
171 err_cmd:
172 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
173 err_dma:
174 	kfree(dev->hw_stats);
175 err_hw_stats:
176 	kfree(dev->hw_stats_buf);
177 err_buf:
178 	dev->hw_stats_count = 0;
179 	dev->hw_stats = NULL;
180 	dev->hw_stats_buf = NULL;
181 	dev->hw_stats_hdrs = NULL;
182 	return rc;
183 }
184 
185 static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
186 						  u32 port)
187 {
188 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
189 
190 	if (port != 1)
191 		return NULL;
192 
193 	return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
194 					  dev->hw_stats_count,
195 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
196 }
197 
198 static int ionic_get_hw_stats(struct ib_device *ibdev,
199 			      struct rdma_hw_stats *hw_stats,
200 			      u32 port, int index)
201 {
202 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
203 	dma_addr_t hw_stats_dma;
204 	int rc, hw_stat_i;
205 
206 	if (port != 1)
207 		return -EINVAL;
208 
209 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
210 				      PAGE_SIZE, DMA_FROM_DEVICE);
211 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
212 	if (rc)
213 		goto err_dma;
214 
215 	rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
216 				0, IONIC_V1_ADMIN_STATS_VALS);
217 	if (rc)
218 		goto err_cmd;
219 
220 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
221 			 PAGE_SIZE, DMA_FROM_DEVICE);
222 
223 	for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
224 		hw_stats->value[hw_stat_i] =
225 			ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
226 					  dev->hw_stats_buf, PAGE_SIZE);
227 
228 	return hw_stat_i;
229 
230 err_cmd:
231 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
232 			 PAGE_SIZE, DMA_FROM_DEVICE);
233 err_dma:
234 	return rc;
235 }
236 
237 static struct rdma_hw_stats *
238 ionic_counter_alloc_stats(struct rdma_counter *counter)
239 {
240 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
241 	struct ionic_counter *cntr;
242 	int err;
243 
244 	cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
245 	if (!cntr)
246 		return NULL;
247 
248 	/* buffer for current values from the device */
249 	cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
250 	if (!cntr->vals)
251 		goto err_vals;
252 
253 	err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
254 		       cntr,
255 		       XA_LIMIT(0, IONIC_MAX_QPID),
256 		       GFP_KERNEL);
257 	if (err)
258 		goto err_xa;
259 
260 	INIT_LIST_HEAD(&cntr->qp_list);
261 
262 	return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
263 					 dev->counter_stats->queue_stats_count,
264 					 RDMA_HW_STATS_DEFAULT_LIFESPAN);
265 err_xa:
266 	kfree(cntr->vals);
267 err_vals:
268 	kfree(cntr);
269 
270 	return NULL;
271 }
272 
273 static int ionic_counter_dealloc(struct rdma_counter *counter)
274 {
275 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
276 	struct ionic_counter *cntr;
277 
278 	cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
279 	if (!cntr)
280 		return -EINVAL;
281 
282 	kfree(cntr->vals);
283 	kfree(cntr);
284 
285 	return 0;
286 }
287 
288 static int ionic_counter_bind_qp(struct rdma_counter *counter,
289 				 struct ib_qp *ibqp,
290 				 u32 port)
291 {
292 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
293 	struct ionic_qp *qp = to_ionic_qp(ibqp);
294 	struct ionic_counter *cntr;
295 
296 	cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
297 	if (!cntr)
298 		return -EINVAL;
299 
300 	list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
301 	ibqp->counter = counter;
302 
303 	return 0;
304 }
305 
306 static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
307 {
308 	struct ionic_qp *qp = to_ionic_qp(ibqp);
309 
310 	if (ibqp->counter) {
311 		list_del(&qp->qp_list_counter);
312 		ibqp->counter = NULL;
313 	}
314 
315 	return 0;
316 }
317 
318 static int ionic_get_qp_stats(struct ib_device *ibdev,
319 			      struct rdma_hw_stats *hw_stats,
320 			      u32 counter_id)
321 {
322 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
323 	struct ionic_counter_stats *cs;
324 	struct ionic_counter *cntr;
325 	dma_addr_t hw_stats_dma;
326 	struct ionic_qp *qp;
327 	int rc, stat_i = 0;
328 
329 	cs = dev->counter_stats;
330 	cntr = xa_load(&cs->xa_counters, counter_id);
331 	if (!cntr)
332 		return -EINVAL;
333 
334 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
335 				      PAGE_SIZE, DMA_FROM_DEVICE);
336 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
337 	if (rc)
338 		return rc;
339 
340 	memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
341 
342 	list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
343 		rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
344 					qp->qpid,
345 					IONIC_V1_ADMIN_QP_STATS_VALS);
346 		if (rc)
347 			goto err_cmd;
348 
349 		for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
350 			hw_stats->value[stat_i] +=
351 				ionic_v1_stat_val(&cs->hdr[stat_i],
352 						  cntr->vals,
353 						  PAGE_SIZE);
354 	}
355 
356 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
357 	return stat_i;
358 
359 err_cmd:
360 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
361 
362 	return rc;
363 }
364 
365 static int ionic_counter_update_stats(struct rdma_counter *counter)
366 {
367 	return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
368 }
369 
370 static int ionic_alloc_counters(struct ionic_ibdev *dev)
371 {
372 	struct ionic_counter_stats *cs = dev->counter_stats;
373 	int rc, hw_stats_count;
374 	dma_addr_t hdr_dma;
375 
376 	/* buffer for names, sizes, offsets of values */
377 	cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
378 	if (!cs->hdr)
379 		return -ENOMEM;
380 
381 	hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
382 				 PAGE_SIZE, DMA_FROM_DEVICE);
383 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
384 	if (rc)
385 		goto err_dma;
386 
387 	rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
388 				IONIC_V1_ADMIN_QP_STATS_HDRS);
389 	if (rc)
390 		goto err_cmd;
391 
392 	dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
393 
394 	/* normalize and count the number of hw_stats */
395 	hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
396 						 PAGE_SIZE / sizeof(*cs->hdr));
397 	if (!hw_stats_count) {
398 		rc = -ENODATA;
399 		goto err_dma;
400 	}
401 
402 	cs->queue_stats_count = hw_stats_count;
403 
404 	/* alloc and init array of names */
405 	cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
406 				 GFP_KERNEL);
407 	if (!cs->stats_hdrs) {
408 		rc = -ENOMEM;
409 		goto err_dma;
410 	}
411 
412 	ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
413 
414 	return 0;
415 
416 err_cmd:
417 	dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
418 err_dma:
419 	kfree(cs->hdr);
420 
421 	return rc;
422 }
423 
424 static const struct ib_device_ops ionic_hw_stats_ops = {
425 	.driver_id = RDMA_DRIVER_IONIC,
426 	.alloc_hw_port_stats = ionic_alloc_hw_stats,
427 	.get_hw_stats = ionic_get_hw_stats,
428 };
429 
430 static const struct ib_device_ops ionic_counter_stats_ops = {
431 	.counter_alloc_stats = ionic_counter_alloc_stats,
432 	.counter_dealloc = ionic_counter_dealloc,
433 	.counter_bind_qp = ionic_counter_bind_qp,
434 	.counter_unbind_qp = ionic_counter_unbind_qp,
435 	.counter_update_stats = ionic_counter_update_stats,
436 };
437 
438 void ionic_stats_init(struct ionic_ibdev *dev)
439 {
440 	u16 stats_type = dev->lif_cfg.stats_type;
441 	int rc;
442 
443 	if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
444 		rc = ionic_init_hw_stats(dev);
445 		if (rc)
446 			ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
447 		else
448 			ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
449 	}
450 
451 	if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
452 		dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
453 					     GFP_KERNEL);
454 		if (!dev->counter_stats)
455 			return;
456 
457 		rc = ionic_alloc_counters(dev);
458 		if (rc) {
459 			ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
460 			kfree(dev->counter_stats);
461 			dev->counter_stats = NULL;
462 			return;
463 		}
464 
465 		xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
466 
467 		ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
468 	}
469 }
470 
471 void ionic_stats_cleanup(struct ionic_ibdev *dev)
472 {
473 	if (dev->counter_stats) {
474 		xa_destroy(&dev->counter_stats->xa_counters);
475 		kfree(dev->counter_stats->hdr);
476 		kfree(dev->counter_stats->stats_hdrs);
477 		kfree(dev->counter_stats);
478 		dev->counter_stats = NULL;
479 	}
480 
481 	kfree(dev->hw_stats);
482 	kfree(dev->hw_stats_buf);
483 	kfree(dev->hw_stats_hdrs);
484 }
485