xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c (revision 0ad9617c78acbc71373fb341a6f75d4012b01d69)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include <mlx5_core.h>
5 #include "fs_pool.h"
6 
7 int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
8 		      int bulk_len)
9 {
10 	int i;
11 
12 	fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
13 				    GFP_KERNEL);
14 	if (!fs_bulk->bitmask)
15 		return -ENOMEM;
16 
17 	fs_bulk->bulk_len = bulk_len;
18 	for (i = 0; i < bulk_len; i++)
19 		set_bit(i, fs_bulk->bitmask);
20 
21 	return 0;
22 }
23 
24 void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
25 {
26 	kvfree(fs_bulk->bitmask);
27 }
28 
29 int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
30 {
31 	return bitmap_weight(bulk->bitmask, bulk->bulk_len);
32 }
33 
34 static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
35 				      struct mlx5_fs_pool_index *pool_index)
36 {
37 	int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
38 
39 	WARN_ON_ONCE(!pool_index || !fs_bulk);
40 	if (free_index >= fs_bulk->bulk_len)
41 		return -ENOSPC;
42 
43 	clear_bit(free_index, fs_bulk->bitmask);
44 	pool_index->fs_bulk = fs_bulk;
45 	pool_index->index = free_index;
46 	return 0;
47 }
48 
49 static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
50 {
51 	if (test_bit(index, fs_bulk->bitmask))
52 		return -EINVAL;
53 
54 	set_bit(index, fs_bulk->bitmask);
55 	return 0;
56 }
57 
58 void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
59 		       const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
60 {
61 	WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
62 		     !ops->update_threshold);
63 	pool->dev = dev;
64 	pool->pool_ctx = pool_ctx;
65 	mutex_init(&pool->pool_lock);
66 	INIT_LIST_HEAD(&pool->fully_used);
67 	INIT_LIST_HEAD(&pool->partially_used);
68 	INIT_LIST_HEAD(&pool->unused);
69 	pool->available_units = 0;
70 	pool->used_units = 0;
71 	pool->threshold = 0;
72 	pool->ops = ops;
73 }
74 
75 void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
76 {
77 	struct mlx5_core_dev *dev = pool->dev;
78 	struct mlx5_fs_bulk *bulk;
79 	struct mlx5_fs_bulk *tmp;
80 
81 	list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
82 		pool->ops->bulk_destroy(dev, bulk);
83 	list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
84 		pool->ops->bulk_destroy(dev, bulk);
85 	list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
86 		pool->ops->bulk_destroy(dev, bulk);
87 }
88 
89 static struct mlx5_fs_bulk *
90 mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
91 {
92 	struct mlx5_core_dev *dev = fs_pool->dev;
93 	struct mlx5_fs_bulk *new_bulk;
94 
95 	new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
96 	if (new_bulk)
97 		fs_pool->available_units += new_bulk->bulk_len;
98 	fs_pool->ops->update_threshold(fs_pool);
99 	return new_bulk;
100 }
101 
102 static void
103 mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
104 {
105 	struct mlx5_core_dev *dev = fs_pool->dev;
106 
107 	fs_pool->available_units -= bulk->bulk_len;
108 	fs_pool->ops->bulk_destroy(dev, bulk);
109 	fs_pool->ops->update_threshold(fs_pool);
110 }
111 
112 static int
113 mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
114 			       struct list_head *next_list,
115 			       bool move_non_full_bulk,
116 			       struct mlx5_fs_pool_index *pool_index)
117 {
118 	struct mlx5_fs_bulk *fs_bulk;
119 	int err;
120 
121 	if (list_empty(src_list))
122 		return -ENODATA;
123 
124 	fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
125 	err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
126 	if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
127 		list_move(&fs_bulk->pool_list, next_list);
128 	return err;
129 }
130 
131 int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
132 			       struct mlx5_fs_pool_index *pool_index)
133 {
134 	struct mlx5_fs_bulk *new_bulk;
135 	int err;
136 
137 	mutex_lock(&fs_pool->pool_lock);
138 
139 	err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
140 					     &fs_pool->fully_used, false,
141 					     pool_index);
142 	if (err)
143 		err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
144 						     &fs_pool->partially_used,
145 						     true, pool_index);
146 	if (err) {
147 		new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
148 		if (!new_bulk) {
149 			err = -ENOENT;
150 			goto out;
151 		}
152 		err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
153 		WARN_ON_ONCE(err);
154 		list_add(&new_bulk->pool_list, &fs_pool->partially_used);
155 	}
156 	fs_pool->available_units--;
157 	fs_pool->used_units++;
158 
159 out:
160 	mutex_unlock(&fs_pool->pool_lock);
161 	return err;
162 }
163 
164 int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
165 			       struct mlx5_fs_pool_index *pool_index)
166 {
167 	struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
168 	int bulk_free_amount;
169 	int err;
170 
171 	mutex_lock(&fs_pool->pool_lock);
172 
173 	/* TBD would rather return void if there was no warn here in original code */
174 	err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
175 	if (err)
176 		goto unlock;
177 
178 	fs_pool->available_units++;
179 	fs_pool->used_units--;
180 
181 	bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
182 	if (bulk_free_amount == 1)
183 		list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
184 	if (bulk_free_amount == bulk->bulk_len) {
185 		list_del(&bulk->pool_list);
186 		if (fs_pool->available_units > fs_pool->threshold)
187 			mlx5_fs_pool_free_bulk(fs_pool, bulk);
188 		else
189 			list_add(&bulk->pool_list, &fs_pool->unused);
190 	}
191 
192 unlock:
193 	mutex_unlock(&fs_pool->pool_lock);
194 	return err;
195 }
196