xref: /linux/drivers/infiniband/hw/ionic/ionic_hw_stats.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3 
4 #include <linux/dma-mapping.h>
5 
6 #include "ionic_fw.h"
7 #include "ionic_ibdev.h"
8 
9 static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
10 				   int hw_stats_count)
11 {
12 	int hw_stat_i;
13 
14 	for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
15 		struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
16 
17 		stat->type_off = be32_to_cpu(stat->be_type_off);
18 		stat->name[sizeof(stat->name) - 1] = 0;
19 		if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
20 			break;
21 	}
22 
23 	return hw_stat_i;
24 }
25 
26 static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
27 				  struct ionic_v1_stat *hw_stats,
28 				  int hw_stats_count)
29 {
30 	int hw_stat_i;
31 
32 	for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
33 		struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
34 
35 		hw_stats_hdrs[hw_stat_i].name = stat->name;
36 	}
37 }
38 
39 static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
40 			     void *vals_buf, size_t vals_len)
41 {
42 	unsigned int off = ionic_v1_stat_off(stat);
43 	int type = ionic_v1_stat_type(stat);
44 
45 #define __ionic_v1_stat_validate(__type)		\
46 	((off + sizeof(__type) <= vals_len) &&		\
47 	 (IS_ALIGNED(off, sizeof(__type))))
48 
49 	switch (type) {
50 	case IONIC_V1_STAT_TYPE_8:
51 		if (__ionic_v1_stat_validate(u8))
52 			return *(u8 *)(vals_buf + off);
53 		break;
54 	case IONIC_V1_STAT_TYPE_LE16:
55 		if (__ionic_v1_stat_validate(__le16))
56 			return le16_to_cpu(*(__le16 *)(vals_buf + off));
57 		break;
58 	case IONIC_V1_STAT_TYPE_LE32:
59 		if (__ionic_v1_stat_validate(__le32))
60 			return le32_to_cpu(*(__le32 *)(vals_buf + off));
61 		break;
62 	case IONIC_V1_STAT_TYPE_LE64:
63 		if (__ionic_v1_stat_validate(__le64))
64 			return le64_to_cpu(*(__le64 *)(vals_buf + off));
65 		break;
66 	case IONIC_V1_STAT_TYPE_BE16:
67 		if (__ionic_v1_stat_validate(__be16))
68 			return be16_to_cpu(*(__be16 *)(vals_buf + off));
69 		break;
70 	case IONIC_V1_STAT_TYPE_BE32:
71 		if (__ionic_v1_stat_validate(__be32))
72 			return be32_to_cpu(*(__be32 *)(vals_buf + off));
73 		break;
74 	case IONIC_V1_STAT_TYPE_BE64:
75 		if (__ionic_v1_stat_validate(__be64))
76 			return be64_to_cpu(*(__be64 *)(vals_buf + off));
77 		break;
78 	}
79 
80 	return ~0ull;
81 #undef __ionic_v1_stat_validate
82 }
83 
84 static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
85 			      dma_addr_t dma, size_t len, int qid, int op)
86 {
87 	struct ionic_admin_wr wr = {
88 		.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
89 		.wqe = {
90 			.op = op,
91 			.len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
92 			.cmd.stats = {
93 				.dma_addr = cpu_to_le64(dma),
94 				.length = cpu_to_le32(len),
95 				.id_ver = cpu_to_le32(qid),
96 			},
97 		}
98 	};
99 
100 	if (dev->lif_cfg.admin_opcodes <= op)
101 		return -EBADRQC;
102 
103 	ionic_admin_post(dev, &wr);
104 
105 	return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
106 }
107 
108 static int ionic_init_hw_stats(struct ionic_ibdev *dev)
109 {
110 	dma_addr_t hw_stats_dma;
111 	int rc, hw_stats_count;
112 
113 	if (dev->hw_stats_hdrs)
114 		return 0;
115 
116 	dev->hw_stats_count = 0;
117 
118 	/* buffer for current values from the device */
119 	dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
120 	if (!dev->hw_stats_buf) {
121 		rc = -ENOMEM;
122 		goto err_buf;
123 	}
124 
125 	/* buffer for names, sizes, offsets of values */
126 	dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
127 	if (!dev->hw_stats) {
128 		rc = -ENOMEM;
129 		goto err_hw_stats;
130 	}
131 
132 	/* request the names, sizes, offsets */
133 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
134 				      PAGE_SIZE, DMA_FROM_DEVICE);
135 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
136 	if (rc)
137 		goto err_dma;
138 
139 	rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
140 				IONIC_V1_ADMIN_STATS_HDRS);
141 	if (rc)
142 		goto err_cmd;
143 
144 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
145 
146 	/* normalize and count the number of hw_stats */
147 	hw_stats_count =
148 		ionic_v1_stat_normalize(dev->hw_stats,
149 					PAGE_SIZE / sizeof(*dev->hw_stats));
150 	if (!hw_stats_count) {
151 		rc = -ENODATA;
152 		goto err_dma;
153 	}
154 
155 	dev->hw_stats_count = hw_stats_count;
156 
157 	/* alloc and init array of names, for alloc_hw_stats */
158 	dev->hw_stats_hdrs = kzalloc_objs(*dev->hw_stats_hdrs, hw_stats_count,
159 					  GFP_KERNEL);
160 	if (!dev->hw_stats_hdrs) {
161 		rc = -ENOMEM;
162 		goto err_dma;
163 	}
164 
165 	ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
166 			      hw_stats_count);
167 
168 	return 0;
169 
170 err_cmd:
171 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
172 err_dma:
173 	kfree(dev->hw_stats);
174 err_hw_stats:
175 	kfree(dev->hw_stats_buf);
176 err_buf:
177 	dev->hw_stats_count = 0;
178 	dev->hw_stats = NULL;
179 	dev->hw_stats_buf = NULL;
180 	dev->hw_stats_hdrs = NULL;
181 	return rc;
182 }
183 
184 static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
185 						  u32 port)
186 {
187 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
188 
189 	if (port != 1)
190 		return NULL;
191 
192 	return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
193 					  dev->hw_stats_count,
194 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
195 }
196 
197 static int ionic_get_hw_stats(struct ib_device *ibdev,
198 			      struct rdma_hw_stats *hw_stats,
199 			      u32 port, int index)
200 {
201 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
202 	dma_addr_t hw_stats_dma;
203 	int rc, hw_stat_i;
204 
205 	if (port != 1)
206 		return -EINVAL;
207 
208 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
209 				      PAGE_SIZE, DMA_FROM_DEVICE);
210 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
211 	if (rc)
212 		goto err_dma;
213 
214 	rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
215 				0, IONIC_V1_ADMIN_STATS_VALS);
216 	if (rc)
217 		goto err_cmd;
218 
219 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
220 			 PAGE_SIZE, DMA_FROM_DEVICE);
221 
222 	for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
223 		hw_stats->value[hw_stat_i] =
224 			ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
225 					  dev->hw_stats_buf, PAGE_SIZE);
226 
227 	return hw_stat_i;
228 
229 err_cmd:
230 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
231 			 PAGE_SIZE, DMA_FROM_DEVICE);
232 err_dma:
233 	return rc;
234 }
235 
236 static struct rdma_hw_stats *
237 ionic_counter_alloc_stats(struct rdma_counter *counter)
238 {
239 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
240 	struct ionic_counter *cntr;
241 	int err;
242 
243 	cntr = kzalloc_obj(*cntr, GFP_KERNEL);
244 	if (!cntr)
245 		return NULL;
246 
247 	/* buffer for current values from the device */
248 	cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
249 	if (!cntr->vals)
250 		goto err_vals;
251 
252 	err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
253 		       cntr,
254 		       XA_LIMIT(0, IONIC_MAX_QPID),
255 		       GFP_KERNEL);
256 	if (err)
257 		goto err_xa;
258 
259 	INIT_LIST_HEAD(&cntr->qp_list);
260 
261 	return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
262 					 dev->counter_stats->queue_stats_count,
263 					 RDMA_HW_STATS_DEFAULT_LIFESPAN);
264 err_xa:
265 	kfree(cntr->vals);
266 err_vals:
267 	kfree(cntr);
268 
269 	return NULL;
270 }
271 
272 static int ionic_counter_dealloc(struct rdma_counter *counter)
273 {
274 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
275 	struct ionic_counter *cntr;
276 
277 	cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
278 	if (!cntr)
279 		return -EINVAL;
280 
281 	kfree(cntr->vals);
282 	kfree(cntr);
283 
284 	return 0;
285 }
286 
287 static int ionic_counter_bind_qp(struct rdma_counter *counter,
288 				 struct ib_qp *ibqp,
289 				 u32 port)
290 {
291 	struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
292 	struct ionic_qp *qp = to_ionic_qp(ibqp);
293 	struct ionic_counter *cntr;
294 
295 	cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
296 	if (!cntr)
297 		return -EINVAL;
298 
299 	list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
300 	ibqp->counter = counter;
301 
302 	return 0;
303 }
304 
305 static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
306 {
307 	struct ionic_qp *qp = to_ionic_qp(ibqp);
308 
309 	if (ibqp->counter) {
310 		list_del(&qp->qp_list_counter);
311 		ibqp->counter = NULL;
312 	}
313 
314 	return 0;
315 }
316 
317 static int ionic_get_qp_stats(struct ib_device *ibdev,
318 			      struct rdma_hw_stats *hw_stats,
319 			      u32 counter_id)
320 {
321 	struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
322 	struct ionic_counter_stats *cs;
323 	struct ionic_counter *cntr;
324 	dma_addr_t hw_stats_dma;
325 	struct ionic_qp *qp;
326 	int rc, stat_i = 0;
327 
328 	cs = dev->counter_stats;
329 	cntr = xa_load(&cs->xa_counters, counter_id);
330 	if (!cntr)
331 		return -EINVAL;
332 
333 	hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
334 				      PAGE_SIZE, DMA_FROM_DEVICE);
335 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
336 	if (rc)
337 		return rc;
338 
339 	memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
340 
341 	list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
342 		rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
343 					qp->qpid,
344 					IONIC_V1_ADMIN_QP_STATS_VALS);
345 		if (rc)
346 			goto err_cmd;
347 
348 		for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
349 			hw_stats->value[stat_i] +=
350 				ionic_v1_stat_val(&cs->hdr[stat_i],
351 						  cntr->vals,
352 						  PAGE_SIZE);
353 	}
354 
355 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
356 	return stat_i;
357 
358 err_cmd:
359 	dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
360 
361 	return rc;
362 }
363 
364 static int ionic_counter_update_stats(struct rdma_counter *counter)
365 {
366 	return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
367 }
368 
369 static int ionic_alloc_counters(struct ionic_ibdev *dev)
370 {
371 	struct ionic_counter_stats *cs = dev->counter_stats;
372 	int rc, hw_stats_count;
373 	dma_addr_t hdr_dma;
374 
375 	/* buffer for names, sizes, offsets of values */
376 	cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
377 	if (!cs->hdr)
378 		return -ENOMEM;
379 
380 	hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
381 				 PAGE_SIZE, DMA_FROM_DEVICE);
382 	rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
383 	if (rc)
384 		goto err_dma;
385 
386 	rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
387 				IONIC_V1_ADMIN_QP_STATS_HDRS);
388 	if (rc)
389 		goto err_cmd;
390 
391 	dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
392 
393 	/* normalize and count the number of hw_stats */
394 	hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
395 						 PAGE_SIZE / sizeof(*cs->hdr));
396 	if (!hw_stats_count) {
397 		rc = -ENODATA;
398 		goto err_dma;
399 	}
400 
401 	cs->queue_stats_count = hw_stats_count;
402 
403 	/* alloc and init array of names */
404 	cs->stats_hdrs = kzalloc_objs(*cs->stats_hdrs, hw_stats_count,
405 				      GFP_KERNEL);
406 	if (!cs->stats_hdrs) {
407 		rc = -ENOMEM;
408 		goto err_dma;
409 	}
410 
411 	ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
412 
413 	return 0;
414 
415 err_cmd:
416 	dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
417 err_dma:
418 	kfree(cs->hdr);
419 
420 	return rc;
421 }
422 
423 static const struct ib_device_ops ionic_hw_stats_ops = {
424 	.driver_id = RDMA_DRIVER_IONIC,
425 	.alloc_hw_port_stats = ionic_alloc_hw_stats,
426 	.get_hw_stats = ionic_get_hw_stats,
427 };
428 
429 static const struct ib_device_ops ionic_counter_stats_ops = {
430 	.counter_alloc_stats = ionic_counter_alloc_stats,
431 	.counter_dealloc = ionic_counter_dealloc,
432 	.counter_bind_qp = ionic_counter_bind_qp,
433 	.counter_unbind_qp = ionic_counter_unbind_qp,
434 	.counter_update_stats = ionic_counter_update_stats,
435 };
436 
437 void ionic_stats_init(struct ionic_ibdev *dev)
438 {
439 	u16 stats_type = dev->lif_cfg.stats_type;
440 	int rc;
441 
442 	if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
443 		rc = ionic_init_hw_stats(dev);
444 		if (rc)
445 			ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
446 		else
447 			ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
448 	}
449 
450 	if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
451 		dev->counter_stats = kzalloc_obj(*dev->counter_stats,
452 						 GFP_KERNEL);
453 		if (!dev->counter_stats)
454 			return;
455 
456 		rc = ionic_alloc_counters(dev);
457 		if (rc) {
458 			ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
459 			kfree(dev->counter_stats);
460 			dev->counter_stats = NULL;
461 			return;
462 		}
463 
464 		xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
465 
466 		ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
467 	}
468 }
469 
470 void ionic_stats_cleanup(struct ionic_ibdev *dev)
471 {
472 	if (dev->counter_stats) {
473 		xa_destroy(&dev->counter_stats->xa_counters);
474 		kfree(dev->counter_stats->hdr);
475 		kfree(dev->counter_stats->stats_hdrs);
476 		kfree(dev->counter_stats);
477 		dev->counter_stats = NULL;
478 	}
479 
480 	kfree(dev->hw_stats);
481 	kfree(dev->hw_stats_buf);
482 	kfree(dev->hw_stats_hdrs);
483 }
484