xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/refcount.h>
5 #include <linux/idr.h>
6 
7 #include "spectrum.h"
8 #include "reg.h"
9 
10 struct mlxsw_sp_pgt {
11 	struct idr pgt_idr;
12 	u16 end_index; /* Exclusive. */
13 	struct mutex lock; /* Protects PGT. */
14 	bool smpe_index_valid;
15 };
16 
17 struct mlxsw_sp_pgt_entry {
18 	struct list_head ports_list;
19 	u16 index;
20 	u16 smpe_index;
21 };
22 
23 struct mlxsw_sp_pgt_entry_port {
24 	struct list_head list; /* Member of 'ports_list'. */
25 	u16 local_port;
26 };
27 
28 int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
29 {
30 	int index, err = 0;
31 
32 	mutex_lock(&mlxsw_sp->pgt->lock);
33 	index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
34 			  mlxsw_sp->pgt->end_index, GFP_KERNEL);
35 
36 	if (index < 0) {
37 		err = index;
38 		goto err_idr_alloc;
39 	}
40 
41 	*p_mid = index;
42 	mutex_unlock(&mlxsw_sp->pgt->lock);
43 	return 0;
44 
45 err_idr_alloc:
46 	mutex_unlock(&mlxsw_sp->pgt->lock);
47 	return err;
48 }
49 
50 void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
51 {
52 	mutex_lock(&mlxsw_sp->pgt->lock);
53 	WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
54 	mutex_unlock(&mlxsw_sp->pgt->lock);
55 }
56 
57 int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *p_mid_base,
58 				 u16 count)
59 {
60 	unsigned int mid_base;
61 	int i, err;
62 
63 	mutex_lock(&mlxsw_sp->pgt->lock);
64 
65 	mid_base = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
66 	for (i = 0; i < count; i++) {
67 		err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
68 				       mid_base, mid_base + count, GFP_KERNEL);
69 		if (err < 0)
70 			goto err_idr_alloc_cyclic;
71 	}
72 
73 	mutex_unlock(&mlxsw_sp->pgt->lock);
74 	*p_mid_base = mid_base;
75 	return 0;
76 
77 err_idr_alloc_cyclic:
78 	for (i--; i >= 0; i--)
79 		idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
80 	mutex_unlock(&mlxsw_sp->pgt->lock);
81 	return err;
82 }
83 
84 void
85 mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
86 {
87 	struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
88 	int i;
89 
90 	mutex_lock(&mlxsw_sp->pgt->lock);
91 
92 	for (i = 0; i < count; i++)
93 		WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
94 
95 	mutex_unlock(&mlxsw_sp->pgt->lock);
96 }
97 
98 static struct mlxsw_sp_pgt_entry_port *
99 mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
100 			       u16 local_port)
101 {
102 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
103 
104 	list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
105 		if (pgt_entry_port->local_port == local_port)
106 			return pgt_entry_port;
107 	}
108 
109 	return NULL;
110 }
111 
112 static struct mlxsw_sp_pgt_entry *
113 mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
114 {
115 	struct mlxsw_sp_pgt_entry *pgt_entry;
116 	void *ret;
117 	int err;
118 
119 	pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
120 	if (!pgt_entry)
121 		return ERR_PTR(-ENOMEM);
122 
123 	ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
124 	if (IS_ERR(ret)) {
125 		err = PTR_ERR(ret);
126 		goto err_idr_replace;
127 	}
128 
129 	INIT_LIST_HEAD(&pgt_entry->ports_list);
130 	pgt_entry->index = mid;
131 	pgt_entry->smpe_index = smpe;
132 	return pgt_entry;
133 
134 err_idr_replace:
135 	kfree(pgt_entry);
136 	return ERR_PTR(err);
137 }
138 
139 static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
140 				       struct mlxsw_sp_pgt_entry *pgt_entry)
141 {
142 	WARN_ON(!list_empty(&pgt_entry->ports_list));
143 
144 	pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
145 	if (WARN_ON(IS_ERR(pgt_entry)))
146 		return;
147 
148 	kfree(pgt_entry);
149 }
150 
151 static struct mlxsw_sp_pgt_entry *
152 mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
153 {
154 	struct mlxsw_sp_pgt_entry *pgt_entry;
155 
156 	pgt_entry = idr_find(&pgt->pgt_idr, mid);
157 	if (pgt_entry)
158 		return pgt_entry;
159 
160 	return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
161 }
162 
163 static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
164 {
165 	struct mlxsw_sp_pgt_entry *pgt_entry;
166 
167 	pgt_entry = idr_find(&pgt->pgt_idr, mid);
168 	if (WARN_ON(!pgt_entry))
169 		return;
170 
171 	if (list_empty(&pgt_entry->ports_list))
172 		mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
173 }
174 
175 static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
176 					bool member)
177 {
178 	mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
179 	mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
180 }
181 
182 static int
183 mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
184 			      const struct mlxsw_sp_pgt_entry *pgt_entry,
185 			      u16 local_port, bool member)
186 {
187 	char *smid2_pl;
188 	int err;
189 
190 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
191 	if (!smid2_pl)
192 		return -ENOMEM;
193 
194 	mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
195 			     mlxsw_sp->pgt->smpe_index_valid,
196 			     pgt_entry->smpe_index);
197 
198 	mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
199 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
200 
201 	kfree(smid2_pl);
202 
203 	return err;
204 }
205 
206 static struct mlxsw_sp_pgt_entry_port *
207 mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
208 			       struct mlxsw_sp_pgt_entry *pgt_entry,
209 			       u16 local_port)
210 {
211 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
212 	int err;
213 
214 	pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
215 	if (!pgt_entry_port)
216 		return ERR_PTR(-ENOMEM);
217 
218 	err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
219 					    true);
220 	if (err)
221 		goto err_pgt_entry_port_write;
222 
223 	pgt_entry_port->local_port = local_port;
224 	list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
225 
226 	return pgt_entry_port;
227 
228 err_pgt_entry_port_write:
229 	kfree(pgt_entry_port);
230 	return ERR_PTR(err);
231 }
232 
233 static void
234 mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
235 				struct mlxsw_sp_pgt_entry *pgt_entry,
236 				struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
237 
238 {
239 	list_del(&pgt_entry_port->list);
240 	mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
241 				      pgt_entry_port->local_port, false);
242 	kfree(pgt_entry_port);
243 }
244 
245 static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
246 				       u16 smpe, u16 local_port)
247 {
248 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
249 	struct mlxsw_sp_pgt_entry *pgt_entry;
250 	int err;
251 
252 	mutex_lock(&mlxsw_sp->pgt->lock);
253 
254 	pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
255 	if (IS_ERR(pgt_entry)) {
256 		err = PTR_ERR(pgt_entry);
257 		goto err_pgt_entry_get;
258 	}
259 
260 	pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
261 							local_port);
262 	if (IS_ERR(pgt_entry_port)) {
263 		err = PTR_ERR(pgt_entry_port);
264 		goto err_pgt_entry_port_get;
265 	}
266 
267 	mutex_unlock(&mlxsw_sp->pgt->lock);
268 	return 0;
269 
270 err_pgt_entry_port_get:
271 	mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
272 err_pgt_entry_get:
273 	mutex_unlock(&mlxsw_sp->pgt->lock);
274 	return err;
275 }
276 
277 static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
278 					u16 mid, u16 smpe, u16 local_port)
279 {
280 	struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
281 	struct mlxsw_sp_pgt_entry *pgt_entry;
282 
283 	mutex_lock(&mlxsw_sp->pgt->lock);
284 
285 	pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
286 	if (!pgt_entry)
287 		goto out;
288 
289 	pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
290 	if (!pgt_entry_port)
291 		goto out;
292 
293 	mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
294 	mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
295 
296 out:
297 	mutex_unlock(&mlxsw_sp->pgt->lock);
298 }
299 
300 int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
301 				u16 smpe, u16 local_port, bool member)
302 {
303 	if (member)
304 		return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
305 						   local_port);
306 
307 	mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
308 	return 0;
309 }
310 
311 int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
312 {
313 	struct mlxsw_sp_pgt *pgt;
314 
315 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
316 		return -EIO;
317 
318 	pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
319 	if (!pgt)
320 		return -ENOMEM;
321 
322 	idr_init(&pgt->pgt_idr);
323 	pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
324 	mutex_init(&pgt->lock);
325 	pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
326 	mlxsw_sp->pgt = pgt;
327 	return 0;
328 }
329 
330 void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
331 {
332 	mutex_destroy(&mlxsw_sp->pgt->lock);
333 	WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
334 	idr_destroy(&mlxsw_sp->pgt->pgt_idr);
335 	kfree(mlxsw_sp->pgt);
336 }
337