xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c (revision 1c8910f50350b46eb184f77e5af7bc09e57e8aed)
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include "mlx5_core.h"
36 #include "fs_core.h"
37 #include "fs_pool.h"
38 #include "fs_cmd.h"
39 
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43 #define MLX5_INIT_COUNTERS_BULK 8
44 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
45 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
46 
47 struct mlx5_fc_stats {
48 	struct xarray counters;
49 
50 	struct workqueue_struct *wq;
51 	struct delayed_work work;
52 	unsigned long sampling_interval; /* jiffies */
53 	u32 *bulk_query_out;
54 	int bulk_query_len;
55 	bool bulk_query_alloc_failed;
56 	unsigned long next_bulk_query_alloc;
57 	struct mlx5_fs_pool fc_pool;
58 };
59 
60 static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
61 static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
62 static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
63 static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
64 
65 static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
66 {
67 	return min_t(int, MLX5_INIT_COUNTERS_BULK,
68 		     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
69 }
70 
71 static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
72 {
73 	return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
74 		     (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
75 }
76 
77 static void update_counter_cache(int index, u32 *bulk_raw_data,
78 				 struct mlx5_fc_cache *cache)
79 {
80 	void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
81 			     flow_statistics[index]);
82 	u64 packets = MLX5_GET64(traffic_counter, stats, packets);
83 	u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
84 
85 	if (cache->packets == packets)
86 		return;
87 
88 	cache->packets = packets;
89 	cache->bytes = bytes;
90 	cache->lastuse = jiffies;
91 }
92 
93 /* Synchronization notes
94  *
95  * Access to counter array:
96  * - create - mlx5_fc_create() (user context)
97  *   - inserts the counter into the xarray.
98  *
99  * - destroy - mlx5_fc_destroy() (user context)
100  *   - erases the counter from the xarray and releases it.
101  *
102  * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context)
103  *   - user should not access a counter after destroy.
104  *
105  * - bulk query (single thread workqueue context)
106  *   - create: query relies on 'lastuse' to avoid updating counters added
107  *             around the same time as the current bulk cmd.
108  *   - destroy: destroyed counters will not be accessed, even if they are
109  *              destroyed during a bulk query command.
110  */
111 static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
112 {
113 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
114 	u32 bulk_len = fc_stats->bulk_query_len;
115 	XA_STATE(xas, &fc_stats->counters, 0);
116 	u32 *data = fc_stats->bulk_query_out;
117 	struct mlx5_fc *counter;
118 	u32 last_bulk_id = 0;
119 	u64 bulk_query_time;
120 	u32 bulk_base_id;
121 	int err;
122 
123 	xas_lock(&xas);
124 	xas_for_each(&xas, counter, U32_MAX) {
125 		if (xas_retry(&xas, counter))
126 			continue;
127 		if (unlikely(counter->id >= last_bulk_id)) {
128 			/* Start new bulk query. */
129 			/* First id must be aligned to 4 when using bulk query. */
130 			bulk_base_id = counter->id & ~0x3;
131 			last_bulk_id = bulk_base_id + bulk_len;
132 			/* The lock is released while querying the hw and reacquired after. */
133 			xas_unlock(&xas);
134 			/* The same id needs to be processed again in the next loop iteration. */
135 			xas_reset(&xas);
136 			bulk_query_time = jiffies;
137 			err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data);
138 			if (err) {
139 				mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
140 				return;
141 			}
142 			xas_lock(&xas);
143 			continue;
144 		}
145 		/* Do not update counters added after bulk query was started. */
146 		if (time_after64(bulk_query_time, counter->cache.lastuse))
147 			update_counter_cache(counter->id - bulk_base_id, data,
148 					     &counter->cache);
149 	}
150 	xas_unlock(&xas);
151 }
152 
153 static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
154 {
155 	mlx5_cmd_fc_free(dev, counter->id);
156 	kfree(counter->bulk);
157 	kfree(counter);
158 }
159 
160 static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
161 {
162 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
163 
164 	if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
165 		return;
166 
167 	if (counter->type == MLX5_FC_TYPE_POOL_ACQUIRED)
168 		mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
169 	else
170 		mlx5_fc_free(dev, counter);
171 }
172 
173 static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
174 						 int bulk_query_len)
175 {
176 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
177 	u32 *bulk_query_out_tmp;
178 	int out_len;
179 
180 	out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len);
181 	bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL);
182 	if (!bulk_query_out_tmp) {
183 		mlx5_core_warn_once(dev,
184 				    "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n",
185 				    bulk_query_len);
186 		return;
187 	}
188 
189 	kvfree(fc_stats->bulk_query_out);
190 	fc_stats->bulk_query_out = bulk_query_out_tmp;
191 	fc_stats->bulk_query_len = bulk_query_len;
192 	mlx5_core_info(dev,
193 		       "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n",
194 		       bulk_query_len);
195 }
196 
197 static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
198 {
199 	struct mlx5_fc *counter;
200 	int num_counters = 0;
201 	unsigned long id;
202 
203 	xa_for_each(&fc_stats->counters, id, counter)
204 		num_counters++;
205 	return num_counters;
206 }
207 
208 static void mlx5_fc_stats_work(struct work_struct *work)
209 {
210 	struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
211 						      work.work);
212 	struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
213 
214 	queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
215 
216 	/* Grow the bulk query buffer to max if not maxed and enough counters are present. */
217 	if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
218 		     mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
219 		mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
220 
221 	mlx5_fc_stats_query_all_counters(dev);
222 }
223 
224 static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
225 {
226 	fc_bulk->base_id = base_id;
227 	refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
228 	mutex_init(&fc_bulk->hws_data.lock);
229 }
230 
231 static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
232 {
233 	struct mlx5_fc_bulk *fc_bulk;
234 	struct mlx5_fc *counter;
235 	int err;
236 
237 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
238 	if (!counter)
239 		return ERR_PTR(-ENOMEM);
240 
241 	fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
242 	if (!fc_bulk) {
243 		err = -ENOMEM;
244 		goto free_counter;
245 	}
246 	err = mlx5_cmd_fc_alloc(dev, &counter->id);
247 	if (err)
248 		goto free_bulk;
249 
250 	counter->type = MLX5_FC_TYPE_SINGLE;
251 	mlx5_fs_bulk_init(&fc_bulk->fs_bulk, 1);
252 	mlx5_fc_bulk_init(fc_bulk, counter->id);
253 	counter->bulk = fc_bulk;
254 	return counter;
255 
256 free_bulk:
257 	kfree(fc_bulk);
258 free_counter:
259 	kfree(counter);
260 	return ERR_PTR(err);
261 }
262 
263 static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
264 {
265 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
266 	struct mlx5_fc *counter;
267 
268 	if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
269 		counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
270 		if (!IS_ERR(counter))
271 			return counter;
272 	}
273 
274 	return mlx5_fc_single_alloc(dev);
275 }
276 
277 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
278 {
279 	struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
280 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
281 	int err;
282 
283 	if (IS_ERR(counter))
284 		return counter;
285 
286 	counter->aging = aging;
287 
288 	if (aging) {
289 		u32 id = counter->id;
290 
291 		counter->cache.lastuse = jiffies;
292 		counter->lastbytes = counter->cache.bytes;
293 		counter->lastpackets = counter->cache.packets;
294 
295 		err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
296 		if (err != 0)
297 			goto err_out_alloc;
298 	}
299 
300 	return counter;
301 
302 err_out_alloc:
303 	mlx5_fc_release(dev, counter);
304 	return ERR_PTR(err);
305 }
306 EXPORT_SYMBOL(mlx5_fc_create);
307 
308 u32 mlx5_fc_id(struct mlx5_fc *counter)
309 {
310 	return counter->id;
311 }
312 EXPORT_SYMBOL(mlx5_fc_id);
313 
314 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
315 {
316 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
317 
318 	if (!counter)
319 		return;
320 
321 	if (counter->aging)
322 		xa_erase(&fc_stats->counters, counter->id);
323 	mlx5_fc_release(dev, counter);
324 }
325 EXPORT_SYMBOL(mlx5_fc_destroy);
326 
327 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
328 {
329 	struct mlx5_fc_stats *fc_stats;
330 
331 	fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL);
332 	if (!fc_stats)
333 		return -ENOMEM;
334 	dev->priv.fc_stats = fc_stats;
335 
336 	xa_init(&fc_stats->counters);
337 
338 	/* Allocate initial (small) bulk query buffer. */
339 	mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev));
340 	if (!fc_stats->bulk_query_out)
341 		goto err_bulk;
342 
343 	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
344 	if (!fc_stats->wq)
345 		goto err_wq_create;
346 
347 	fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
348 	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
349 
350 	mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
351 	queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
352 	return 0;
353 
354 err_wq_create:
355 	kvfree(fc_stats->bulk_query_out);
356 err_bulk:
357 	kfree(fc_stats);
358 	return -ENOMEM;
359 }
360 
361 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
362 {
363 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
364 	struct mlx5_fc *counter;
365 	unsigned long id;
366 
367 	cancel_delayed_work_sync(&fc_stats->work);
368 	destroy_workqueue(fc_stats->wq);
369 	fc_stats->wq = NULL;
370 
371 	xa_for_each(&fc_stats->counters, id, counter) {
372 		xa_erase(&fc_stats->counters, id);
373 		mlx5_fc_release(dev, counter);
374 	}
375 	xa_destroy(&fc_stats->counters);
376 
377 	mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
378 	kvfree(fc_stats->bulk_query_out);
379 	kfree(fc_stats);
380 }
381 
382 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
383 		  u64 *packets, u64 *bytes)
384 {
385 	return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
386 }
387 EXPORT_SYMBOL(mlx5_fc_query);
388 
389 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
390 {
391 	return counter->cache.lastuse;
392 }
393 
394 void mlx5_fc_query_cached(struct mlx5_fc *counter,
395 			  u64 *bytes, u64 *packets, u64 *lastuse)
396 {
397 	struct mlx5_fc_cache c;
398 
399 	c = counter->cache;
400 
401 	*bytes = c.bytes - counter->lastbytes;
402 	*packets = c.packets - counter->lastpackets;
403 	*lastuse = c.lastuse;
404 
405 	counter->lastbytes = c.bytes;
406 	counter->lastpackets = c.packets;
407 }
408 
409 void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
410 			      u64 *bytes, u64 *packets, u64 *lastuse)
411 {
412 	struct mlx5_fc_cache c = counter->cache;
413 
414 	*bytes = c.bytes;
415 	*packets = c.packets;
416 	*lastuse = c.lastuse;
417 }
418 
419 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
420 			      struct delayed_work *dwork,
421 			      unsigned long delay)
422 {
423 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
424 
425 	queue_delayed_work(fc_stats->wq, dwork, delay);
426 }
427 
428 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
429 				      unsigned long interval)
430 {
431 	struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
432 
433 	fc_stats->sampling_interval = min_t(unsigned long, interval,
434 					    fc_stats->sampling_interval);
435 }
436 
437 /* Flow counter bulks */
438 
439 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
440 			 u32 id)
441 {
442 	counter->bulk = bulk;
443 	counter->id = id;
444 }
445 
446 u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
447 {
448 	return counter->bulk->base_id;
449 }
450 
451 static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
452 						void *pool_ctx)
453 {
454 	enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
455 	struct mlx5_fc_bulk *fc_bulk;
456 	int bulk_len;
457 	u32 base_id;
458 	int i;
459 
460 	alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
461 	bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
462 
463 	fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
464 	if (!fc_bulk)
465 		return NULL;
466 
467 	mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_len);
468 
469 	if (mlx5_fs_bulk_bitmap_alloc(dev, &fc_bulk->fs_bulk))
470 		goto fc_bulk_free;
471 
472 	if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
473 		goto fs_bulk_cleanup;
474 
475 	mlx5_fc_bulk_init(fc_bulk, base_id);
476 	for (i = 0; i < bulk_len; i++)
477 		mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
478 
479 	return &fc_bulk->fs_bulk;
480 
481 fs_bulk_cleanup:
482 	mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
483 fc_bulk_free:
484 	kvfree(fc_bulk);
485 	return NULL;
486 }
487 
488 static int
489 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
490 {
491 	struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
492 						    struct mlx5_fc_bulk,
493 						    fs_bulk);
494 
495 	if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
496 		mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
497 		return -EBUSY;
498 	}
499 
500 	mlx5_cmd_fc_free(dev, fc_bulk->base_id);
501 	mlx5_fs_bulk_cleanup(fs_bulk);
502 	kvfree(fc_bulk);
503 
504 	return 0;
505 }
506 
507 static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
508 {
509 	fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
510 				   fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
511 }
512 
513 /* Flow counters pool API */
514 
515 static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
516 	.bulk_destroy = mlx5_fc_bulk_destroy,
517 	.bulk_create = mlx5_fc_bulk_create,
518 	.update_threshold = mlx5_fc_pool_update_threshold,
519 };
520 
521 static void
522 mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
523 {
524 	mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
525 }
526 
527 static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
528 {
529 	mlx5_fs_pool_cleanup(fc_pool);
530 }
531 
532 static struct mlx5_fc *
533 mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
534 {
535 	struct mlx5_fs_pool_index pool_index = {};
536 	struct mlx5_fc_bulk *fc_bulk;
537 	int err;
538 
539 	err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
540 	if (err)
541 		return ERR_PTR(err);
542 	fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
543 	return &fc_bulk->fcs[pool_index.index];
544 }
545 
546 static void
547 mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
548 {
549 	struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
550 	struct mlx5_fs_pool_index pool_index = {};
551 	struct mlx5_core_dev *dev = fc_pool->dev;
552 
553 	pool_index.fs_bulk = fs_bulk;
554 	pool_index.index = fc->id - fc->bulk->base_id;
555 	if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
556 		mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
557 }
558 
559 /**
560  * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
561  * was already acquired using its counter id and bulk data.
562  *
563  * @counter_id: counter acquired counter id
564  * @offset: counter offset from bulk base
565  * @bulk_size: counter's bulk size as was allocated
566  *
567  * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
568  */
569 struct mlx5_fc *
570 mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
571 {
572 	struct mlx5_fc_bulk *fc_bulk;
573 	struct mlx5_fc *counter;
574 
575 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
576 	if (!counter)
577 		return ERR_PTR(-ENOMEM);
578 	fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
579 	if (!fc_bulk) {
580 		kfree(counter);
581 		return ERR_PTR(-ENOMEM);
582 	}
583 
584 	counter->type = MLX5_FC_TYPE_LOCAL;
585 	counter->id = counter_id;
586 	mlx5_fs_bulk_init(&fc_bulk->fs_bulk, bulk_size);
587 	mlx5_fc_bulk_init(fc_bulk, counter_id - offset);
588 	counter->bulk = fc_bulk;
589 	refcount_set(&counter->fc_local_refcount, 1);
590 	return counter;
591 }
592 EXPORT_SYMBOL(mlx5_fc_local_create);
593 
594 void mlx5_fc_local_destroy(struct mlx5_fc *counter)
595 {
596 	kfree(counter->bulk);
597 	kfree(counter);
598 }
599 EXPORT_SYMBOL(mlx5_fc_local_destroy);
600 
601 void mlx5_fc_local_get(struct mlx5_fc *counter)
602 {
603 	if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
604 		return;
605 
606 	refcount_inc(&counter->fc_local_refcount);
607 }
608 
609 void mlx5_fc_local_put(struct mlx5_fc *counter)
610 {
611 	if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
612 		return;
613 
614 	if (!refcount_dec_and_test(&counter->fc_local_refcount))
615 		return;
616 
617 	mlx5_fc_local_destroy(counter);
618 }
619