xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/err.h>
5 #include <linux/gfp.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/netlink.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <net/inet_ecn.h>
12 #include <net/ipv6.h>
13 
14 #include "reg.h"
15 #include "spectrum.h"
16 #include "spectrum_nve.h"
17 
18 const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
19 	[MLXSW_SP_NVE_TYPE_VXLAN]	= &mlxsw_sp1_nve_vxlan_ops,
20 };
21 
22 const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
23 	[MLXSW_SP_NVE_TYPE_VXLAN]	= &mlxsw_sp2_nve_vxlan_ops,
24 };
25 
26 struct mlxsw_sp_nve_mc_entry;
27 struct mlxsw_sp_nve_mc_record;
28 struct mlxsw_sp_nve_mc_list;
29 
30 struct mlxsw_sp_nve_mc_record_ops {
31 	enum mlxsw_reg_tnumt_record_type type;
32 	int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
33 			 struct mlxsw_sp_nve_mc_entry *mc_entry,
34 			 const union mlxsw_sp_l3addr *addr);
35 	void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
36 			  const struct mlxsw_sp_nve_mc_entry *mc_entry);
37 	void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
38 			  const struct mlxsw_sp_nve_mc_entry *mc_entry,
39 			  char *tnumt_pl, unsigned int entry_index);
40 	bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
41 			      const struct mlxsw_sp_nve_mc_entry *mc_entry,
42 			      const union mlxsw_sp_l3addr *addr);
43 };
44 
45 struct mlxsw_sp_nve_mc_list_key {
46 	u16 fid_index;
47 };
48 
49 struct mlxsw_sp_nve_mc_ipv6_entry {
50 	struct in6_addr addr6;
51 	u32 addr6_kvdl_index;
52 };
53 
54 struct mlxsw_sp_nve_mc_entry {
55 	union {
56 		__be32 addr4;
57 		struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
58 	};
59 	u8 valid:1;
60 };
61 
62 struct mlxsw_sp_nve_mc_record {
63 	struct list_head list;
64 	enum mlxsw_sp_l3proto proto;
65 	unsigned int num_entries;
66 	struct mlxsw_sp *mlxsw_sp;
67 	struct mlxsw_sp_nve_mc_list *mc_list;
68 	const struct mlxsw_sp_nve_mc_record_ops *ops;
69 	u32 kvdl_index;
70 	struct mlxsw_sp_nve_mc_entry entries[];
71 };
72 
73 struct mlxsw_sp_nve_mc_list {
74 	struct list_head records_list;
75 	struct rhash_head ht_node;
76 	struct mlxsw_sp_nve_mc_list_key key;
77 };
78 
79 static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
80 	.key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
81 	.key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
82 	.head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
83 };
84 
85 static int
mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record * mc_record,struct mlxsw_sp_nve_mc_entry * mc_entry,const union mlxsw_sp_l3addr * addr)86 mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
87 				      struct mlxsw_sp_nve_mc_entry *mc_entry,
88 				      const union mlxsw_sp_l3addr *addr)
89 {
90 	mc_entry->addr4 = addr->addr4;
91 
92 	return 0;
93 }
94 
95 static void
mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry)96 mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
97 				      const struct mlxsw_sp_nve_mc_entry *mc_entry)
98 {
99 }
100 
101 static void
mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry,char * tnumt_pl,unsigned int entry_index)102 mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
103 				      const struct mlxsw_sp_nve_mc_entry *mc_entry,
104 				      char *tnumt_pl, unsigned int entry_index)
105 {
106 	u32 udip = be32_to_cpu(mc_entry->addr4);
107 
108 	mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
109 }
110 
111 static bool
mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry,const union mlxsw_sp_l3addr * addr)112 mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
113 					  const struct mlxsw_sp_nve_mc_entry *mc_entry,
114 					  const union mlxsw_sp_l3addr *addr)
115 {
116 	return mc_entry->addr4 == addr->addr4;
117 }
118 
119 static const struct mlxsw_sp_nve_mc_record_ops
120 mlxsw_sp_nve_mc_record_ipv4_ops = {
121 	.type		= MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
122 	.entry_add	= &mlxsw_sp_nve_mc_record_ipv4_entry_add,
123 	.entry_del	= &mlxsw_sp_nve_mc_record_ipv4_entry_del,
124 	.entry_set	= &mlxsw_sp_nve_mc_record_ipv4_entry_set,
125 	.entry_compare	= &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
126 };
127 
128 static int
mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record * mc_record,struct mlxsw_sp_nve_mc_entry * mc_entry,const union mlxsw_sp_l3addr * addr)129 mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
130 				      struct mlxsw_sp_nve_mc_entry *mc_entry,
131 				      const union mlxsw_sp_l3addr *addr)
132 {
133 	u32 kvdl_index;
134 	int err;
135 
136 	err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
137 						&addr->addr6, &kvdl_index);
138 	if (err)
139 		return err;
140 
141 	mc_entry->ipv6_entry.addr6 = addr->addr6;
142 	mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
143 	return 0;
144 }
145 
146 static void
mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry)147 mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
148 				      const struct mlxsw_sp_nve_mc_entry *mc_entry)
149 {
150 	mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
151 			       &mc_entry->ipv6_entry.addr6);
152 }
153 
154 static void
mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry,char * tnumt_pl,unsigned int entry_index)155 mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
156 				      const struct mlxsw_sp_nve_mc_entry *mc_entry,
157 				      char *tnumt_pl, unsigned int entry_index)
158 {
159 	u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
160 
161 	mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
162 }
163 
164 static bool
mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record * mc_record,const struct mlxsw_sp_nve_mc_entry * mc_entry,const union mlxsw_sp_l3addr * addr)165 mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
166 					  const struct mlxsw_sp_nve_mc_entry *mc_entry,
167 					  const union mlxsw_sp_l3addr *addr)
168 {
169 	return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
170 }
171 
172 static const struct mlxsw_sp_nve_mc_record_ops
173 mlxsw_sp_nve_mc_record_ipv6_ops = {
174 	.type		= MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
175 	.entry_add	= &mlxsw_sp_nve_mc_record_ipv6_entry_add,
176 	.entry_del	= &mlxsw_sp_nve_mc_record_ipv6_entry_del,
177 	.entry_set	= &mlxsw_sp_nve_mc_record_ipv6_entry_set,
178 	.entry_compare	= &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
179 };
180 
181 static const struct mlxsw_sp_nve_mc_record_ops *
182 mlxsw_sp_nve_mc_record_ops_arr[] = {
183 	[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
184 	[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
185 };
186 
mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp * mlxsw_sp,u32 uip,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr)187 int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
188 				    enum mlxsw_sp_l3proto proto,
189 				    union mlxsw_sp_l3addr *addr)
190 {
191 	switch (proto) {
192 	case MLXSW_SP_L3_PROTO_IPV4:
193 		addr->addr4 = cpu_to_be32(uip);
194 		return 0;
195 	default:
196 		WARN_ON(1);
197 		return -EINVAL;
198 	}
199 }
200 
201 static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_find(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_mc_list_key * key)202 mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
203 			  const struct mlxsw_sp_nve_mc_list_key *key)
204 {
205 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
206 
207 	return rhashtable_lookup_fast(&nve->mc_list_ht, key,
208 				      mlxsw_sp_nve_mc_list_ht_params);
209 }
210 
211 static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_mc_list_key * key)212 mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
213 			    const struct mlxsw_sp_nve_mc_list_key *key)
214 {
215 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
216 	struct mlxsw_sp_nve_mc_list *mc_list;
217 	int err;
218 
219 	mc_list = kmalloc_obj(*mc_list);
220 	if (!mc_list)
221 		return ERR_PTR(-ENOMEM);
222 
223 	INIT_LIST_HEAD(&mc_list->records_list);
224 	mc_list->key = *key;
225 
226 	err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
227 				     mlxsw_sp_nve_mc_list_ht_params);
228 	if (err)
229 		goto err_rhashtable_insert;
230 
231 	return mc_list;
232 
233 err_rhashtable_insert:
234 	kfree(mc_list);
235 	return ERR_PTR(err);
236 }
237 
mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list)238 static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
239 					 struct mlxsw_sp_nve_mc_list *mc_list)
240 {
241 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
242 
243 	rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
244 			       mlxsw_sp_nve_mc_list_ht_params);
245 	WARN_ON(!list_empty(&mc_list->records_list));
246 	kfree(mc_list);
247 }
248 
249 static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_mc_list_key * key)250 mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
251 			 const struct mlxsw_sp_nve_mc_list_key *key)
252 {
253 	struct mlxsw_sp_nve_mc_list *mc_list;
254 
255 	mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
256 	if (mc_list)
257 		return mc_list;
258 
259 	return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
260 }
261 
262 static void
mlxsw_sp_nve_mc_list_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list)263 mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
264 			 struct mlxsw_sp_nve_mc_list *mc_list)
265 {
266 	if (!list_empty(&mc_list->records_list))
267 		return;
268 	mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
269 }
270 
271 static struct mlxsw_sp_nve_mc_record *
mlxsw_sp_nve_mc_record_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list,enum mlxsw_sp_l3proto proto)272 mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
273 			      struct mlxsw_sp_nve_mc_list *mc_list,
274 			      enum mlxsw_sp_l3proto proto)
275 {
276 	unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
277 	struct mlxsw_sp_nve_mc_record *mc_record;
278 	int err;
279 
280 	mc_record = kzalloc_flex(*mc_record, entries, num_max_entries);
281 	if (!mc_record)
282 		return ERR_PTR(-ENOMEM);
283 
284 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
285 				  &mc_record->kvdl_index);
286 	if (err)
287 		goto err_kvdl_alloc;
288 
289 	mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
290 	mc_record->mlxsw_sp = mlxsw_sp;
291 	mc_record->mc_list = mc_list;
292 	mc_record->proto = proto;
293 	list_add_tail(&mc_record->list, &mc_list->records_list);
294 
295 	return mc_record;
296 
297 err_kvdl_alloc:
298 	kfree(mc_record);
299 	return ERR_PTR(err);
300 }
301 
302 static void
mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record * mc_record)303 mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
304 {
305 	struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
306 
307 	list_del(&mc_record->list);
308 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
309 			   mc_record->kvdl_index);
310 	WARN_ON(mc_record->num_entries);
311 	kfree(mc_record);
312 }
313 
314 static struct mlxsw_sp_nve_mc_record *
mlxsw_sp_nve_mc_record_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list,enum mlxsw_sp_l3proto proto)315 mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
316 			   struct mlxsw_sp_nve_mc_list *mc_list,
317 			   enum mlxsw_sp_l3proto proto)
318 {
319 	struct mlxsw_sp_nve_mc_record *mc_record;
320 
321 	list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
322 		unsigned int num_entries = mc_record->num_entries;
323 		struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
324 
325 		if (mc_record->proto == proto &&
326 		    num_entries < nve->num_max_mc_entries[proto])
327 			return mc_record;
328 	}
329 
330 	return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
331 }
332 
333 static void
mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record * mc_record)334 mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
335 {
336 	if (mc_record->num_entries != 0)
337 		return;
338 
339 	mlxsw_sp_nve_mc_record_destroy(mc_record);
340 }
341 
342 static struct mlxsw_sp_nve_mc_entry *
mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record * mc_record)343 mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
344 {
345 	struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
346 	unsigned int num_max_entries;
347 	int i;
348 
349 	num_max_entries = nve->num_max_mc_entries[mc_record->proto];
350 	for (i = 0; i < num_max_entries; i++) {
351 		if (mc_record->entries[i].valid)
352 			continue;
353 		return &mc_record->entries[i];
354 	}
355 
356 	return NULL;
357 }
358 
359 static int
mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record * mc_record)360 mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
361 {
362 	enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
363 	struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
364 	struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
365 	char tnumt_pl[MLXSW_REG_TNUMT_LEN];
366 	unsigned int num_max_entries;
367 	unsigned int num_entries = 0;
368 	u32 next_kvdl_index = 0;
369 	bool next_valid = false;
370 	int i;
371 
372 	if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
373 		struct mlxsw_sp_nve_mc_record *next_record;
374 
375 		next_record = list_next_entry(mc_record, list);
376 		next_kvdl_index = next_record->kvdl_index;
377 		next_valid = true;
378 	}
379 
380 	mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE,
381 			     mc_record->kvdl_index, next_valid,
382 			     next_kvdl_index, mc_record->num_entries);
383 
384 	num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
385 	for (i = 0; i < num_max_entries; i++) {
386 		struct mlxsw_sp_nve_mc_entry *mc_entry;
387 
388 		mc_entry = &mc_record->entries[i];
389 		if (!mc_entry->valid)
390 			continue;
391 		mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
392 					  num_entries++);
393 	}
394 
395 	WARN_ON(num_entries != mc_record->num_entries);
396 
397 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
398 }
399 
400 static bool
mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record * mc_record)401 mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
402 {
403 	struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
404 	struct mlxsw_sp_nve_mc_record *first_record;
405 
406 	first_record = list_first_entry(&mc_list->records_list,
407 					struct mlxsw_sp_nve_mc_record, list);
408 
409 	return mc_record == first_record;
410 }
411 
412 static struct mlxsw_sp_nve_mc_entry *
mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record * mc_record,union mlxsw_sp_l3addr * addr)413 mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
414 			   union mlxsw_sp_l3addr *addr)
415 {
416 	struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
417 	unsigned int num_max_entries;
418 	int i;
419 
420 	num_max_entries = nve->num_max_mc_entries[mc_record->proto];
421 	for (i = 0; i < num_max_entries; i++) {
422 		struct mlxsw_sp_nve_mc_entry *mc_entry;
423 
424 		mc_entry = &mc_record->entries[i];
425 		if (!mc_entry->valid)
426 			continue;
427 		if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
428 			return mc_entry;
429 	}
430 
431 	return NULL;
432 }
433 
434 static int
mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record * mc_record,union mlxsw_sp_l3addr * addr)435 mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
436 			      union mlxsw_sp_l3addr *addr)
437 {
438 	struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
439 	int err;
440 
441 	mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
442 	if (WARN_ON(!mc_entry))
443 		return -EINVAL;
444 
445 	err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
446 	if (err)
447 		return err;
448 	mc_record->num_entries++;
449 	mc_entry->valid = true;
450 
451 	err = mlxsw_sp_nve_mc_record_refresh(mc_record);
452 	if (err)
453 		goto err_record_refresh;
454 
455 	/* If this is a new record and not the first one, then we need to
456 	 * update the next pointer of the previous entry
457 	 */
458 	if (mc_record->num_entries != 1 ||
459 	    mlxsw_sp_nve_mc_record_is_first(mc_record))
460 		return 0;
461 
462 	err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
463 	if (err)
464 		goto err_prev_record_refresh;
465 
466 	return 0;
467 
468 err_prev_record_refresh:
469 err_record_refresh:
470 	mc_entry->valid = false;
471 	mc_record->num_entries--;
472 	mc_record->ops->entry_del(mc_record, mc_entry);
473 	return err;
474 }
475 
476 static void
mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record * mc_record,struct mlxsw_sp_nve_mc_entry * mc_entry)477 mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
478 				 struct mlxsw_sp_nve_mc_entry *mc_entry)
479 {
480 	struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
481 
482 	mc_entry->valid = false;
483 	mc_record->num_entries--;
484 
485 	/* When the record continues to exist we only need to invalidate
486 	 * the requested entry
487 	 */
488 	if (mc_record->num_entries != 0) {
489 		mlxsw_sp_nve_mc_record_refresh(mc_record);
490 		mc_record->ops->entry_del(mc_record, mc_entry);
491 		return;
492 	}
493 
494 	/* If the record needs to be deleted, but it is not the first,
495 	 * then we need to make sure that the previous record no longer
496 	 * points to it. Remove deleted record from the list to reflect
497 	 * that and then re-add it at the end, so that it could be
498 	 * properly removed by the record destruction code
499 	 */
500 	if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
501 		struct mlxsw_sp_nve_mc_record *prev_record;
502 
503 		prev_record = list_prev_entry(mc_record, list);
504 		list_del(&mc_record->list);
505 		mlxsw_sp_nve_mc_record_refresh(prev_record);
506 		list_add_tail(&mc_record->list, &mc_list->records_list);
507 		mc_record->ops->entry_del(mc_record, mc_entry);
508 		return;
509 	}
510 
511 	/* If the first record needs to be deleted, but the list is not
512 	 * singular, then the second record needs to be written in the
513 	 * first record's address, as this address is stored as a property
514 	 * of the FID
515 	 */
516 	if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
517 	    !list_is_singular(&mc_list->records_list)) {
518 		struct mlxsw_sp_nve_mc_record *next_record;
519 
520 		next_record = list_next_entry(mc_record, list);
521 		swap(mc_record->kvdl_index, next_record->kvdl_index);
522 		mlxsw_sp_nve_mc_record_refresh(next_record);
523 		mc_record->ops->entry_del(mc_record, mc_entry);
524 		return;
525 	}
526 
527 	/* This is the last case where the last remaining record needs to
528 	 * be deleted. Simply delete the entry
529 	 */
530 	mc_record->ops->entry_del(mc_record, mc_entry);
531 }
532 
533 static struct mlxsw_sp_nve_mc_record *
mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list * mc_list,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr,struct mlxsw_sp_nve_mc_entry ** mc_entry)534 mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
535 			    enum mlxsw_sp_l3proto proto,
536 			    union mlxsw_sp_l3addr *addr,
537 			    struct mlxsw_sp_nve_mc_entry **mc_entry)
538 {
539 	struct mlxsw_sp_nve_mc_record *mc_record;
540 
541 	list_for_each_entry(mc_record, &mc_list->records_list, list) {
542 		if (mc_record->proto != proto)
543 			continue;
544 
545 		*mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
546 		if (*mc_entry)
547 			return mc_record;
548 	}
549 
550 	return NULL;
551 }
552 
mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr)553 static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
554 				       struct mlxsw_sp_nve_mc_list *mc_list,
555 				       enum mlxsw_sp_l3proto proto,
556 				       union mlxsw_sp_l3addr *addr)
557 {
558 	struct mlxsw_sp_nve_mc_record *mc_record;
559 	int err;
560 
561 	mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
562 	if (IS_ERR(mc_record))
563 		return PTR_ERR(mc_record);
564 
565 	err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
566 	if (err)
567 		goto err_ip_add;
568 
569 	return 0;
570 
571 err_ip_add:
572 	mlxsw_sp_nve_mc_record_put(mc_record);
573 	return err;
574 }
575 
mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_mc_list * mc_list,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr)576 static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
577 					struct mlxsw_sp_nve_mc_list *mc_list,
578 					enum mlxsw_sp_l3proto proto,
579 					union mlxsw_sp_l3addr *addr)
580 {
581 	struct mlxsw_sp_nve_mc_record *mc_record;
582 	struct mlxsw_sp_nve_mc_entry *mc_entry;
583 
584 	mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
585 						&mc_entry);
586 	if (!mc_record)
587 		return;
588 
589 	mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
590 	mlxsw_sp_nve_mc_record_put(mc_record);
591 }
592 
593 static int
mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid * fid,struct mlxsw_sp_nve_mc_list * mc_list)594 mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
595 				 struct mlxsw_sp_nve_mc_list *mc_list)
596 {
597 	struct mlxsw_sp_nve_mc_record *mc_record;
598 
599 	/* The address of the first record in the list is a property of
600 	 * the FID and we never change it. It only needs to be set when
601 	 * a new list is created
602 	 */
603 	if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
604 		return 0;
605 
606 	mc_record = list_first_entry(&mc_list->records_list,
607 				     struct mlxsw_sp_nve_mc_record, list);
608 
609 	return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
610 }
611 
612 static void
mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid * fid,struct mlxsw_sp_nve_mc_list * mc_list)613 mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
614 				   struct mlxsw_sp_nve_mc_list *mc_list)
615 {
616 	struct mlxsw_sp_nve_mc_record *mc_record;
617 
618 	/* The address of the first record needs to be invalidated only when
619 	 * the last record is about to be removed
620 	 */
621 	if (!list_is_singular(&mc_list->records_list))
622 		return;
623 
624 	mc_record = list_first_entry(&mc_list->records_list,
625 				     struct mlxsw_sp_nve_mc_record, list);
626 	if (mc_record->num_entries != 1)
627 		return;
628 
629 	return mlxsw_sp_fid_nve_flood_index_clear(fid);
630 }
631 
mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fid * fid,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr)632 int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
633 			      struct mlxsw_sp_fid *fid,
634 			      enum mlxsw_sp_l3proto proto,
635 			      union mlxsw_sp_l3addr *addr)
636 {
637 	struct mlxsw_sp_nve_mc_list_key key = { 0 };
638 	struct mlxsw_sp_nve_mc_list *mc_list;
639 	int err;
640 
641 	key.fid_index = mlxsw_sp_fid_index(fid);
642 	mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
643 	if (IS_ERR(mc_list))
644 		return PTR_ERR(mc_list);
645 
646 	err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
647 	if (err)
648 		goto err_add_ip;
649 
650 	err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
651 	if (err)
652 		goto err_fid_flood_index_set;
653 
654 	return 0;
655 
656 err_fid_flood_index_set:
657 	mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
658 err_add_ip:
659 	mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
660 	return err;
661 }
662 
mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fid * fid,enum mlxsw_sp_l3proto proto,union mlxsw_sp_l3addr * addr)663 void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
664 			       struct mlxsw_sp_fid *fid,
665 			       enum mlxsw_sp_l3proto proto,
666 			       union mlxsw_sp_l3addr *addr)
667 {
668 	struct mlxsw_sp_nve_mc_list_key key = { 0 };
669 	struct mlxsw_sp_nve_mc_list *mc_list;
670 
671 	key.fid_index = mlxsw_sp_fid_index(fid);
672 	mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
673 	if (!mc_list)
674 		return;
675 
676 	mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
677 	mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
678 	mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
679 }
680 
681 static void
mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record * mc_record)682 mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
683 {
684 	struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
685 	unsigned int num_max_entries;
686 	int i;
687 
688 	num_max_entries = nve->num_max_mc_entries[mc_record->proto];
689 	for (i = 0; i < num_max_entries; i++) {
690 		struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
691 
692 		if (!mc_entry->valid)
693 			continue;
694 		mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
695 	}
696 
697 	WARN_ON(mc_record->num_entries);
698 	mlxsw_sp_nve_mc_record_put(mc_record);
699 }
700 
mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fid * fid)701 static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
702 					struct mlxsw_sp_fid *fid)
703 {
704 	struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
705 	struct mlxsw_sp_nve_mc_list_key key = { 0 };
706 	struct mlxsw_sp_nve_mc_list *mc_list;
707 
708 	if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
709 		return;
710 
711 	mlxsw_sp_fid_nve_flood_index_clear(fid);
712 
713 	key.fid_index = mlxsw_sp_fid_index(fid);
714 	mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
715 	if (WARN_ON(!mc_list))
716 		return;
717 
718 	list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
719 		mlxsw_sp_nve_mc_record_delete(mc_record);
720 
721 	WARN_ON(!list_empty(&mc_list->records_list));
722 	mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
723 }
724 
mlxsw_sp_nve_tunnel_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_config * config)725 static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
726 				    struct mlxsw_sp_nve_config *config)
727 {
728 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
729 	const struct mlxsw_sp_nve_ops *ops;
730 	int err;
731 
732 	if (nve->num_nve_tunnels++ != 0)
733 		return 0;
734 
735 	nve->config = *config;
736 
737 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
738 				  &nve->tunnel_index);
739 	if (err)
740 		goto err_kvdl_alloc;
741 
742 	ops = nve->nve_ops_arr[config->type];
743 	err = ops->init(nve, config);
744 	if (err)
745 		goto err_ops_init;
746 
747 	return 0;
748 
749 err_ops_init:
750 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
751 			   nve->tunnel_index);
752 err_kvdl_alloc:
753 	memset(&nve->config, 0, sizeof(nve->config));
754 	nve->num_nve_tunnels--;
755 	return err;
756 }
757 
mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp * mlxsw_sp)758 static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
759 {
760 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
761 	const struct mlxsw_sp_nve_ops *ops;
762 
763 	ops = nve->nve_ops_arr[nve->config.type];
764 
765 	if (mlxsw_sp->nve->num_nve_tunnels == 1) {
766 		ops->fini(nve);
767 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
768 				   nve->tunnel_index);
769 		memset(&nve->config, 0, sizeof(nve->config));
770 	}
771 	nve->num_nve_tunnels--;
772 }
773 
mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp * mlxsw_sp,u16 fid_index)774 static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
775 					  u16 fid_index)
776 {
777 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
778 
779 	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
780 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
781 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
782 }
783 
mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fid * fid,const struct net_device * nve_dev,__be32 vni)784 static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
785 					   const struct mlxsw_sp_fid *fid,
786 					   const struct net_device *nve_dev,
787 					   __be32 vni)
788 {
789 	const struct mlxsw_sp_nve_ops *ops;
790 	enum mlxsw_sp_nve_type type;
791 
792 	if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
793 		return;
794 
795 	ops = mlxsw_sp->nve->nve_ops_arr[type];
796 	ops->fdb_clear_offload(nve_dev, vni);
797 }
798 
799 struct mlxsw_sp_nve_ipv6_ht_key {
800 	u8 mac[ETH_ALEN];
801 	u16 fid_index;
802 };
803 
804 struct mlxsw_sp_nve_ipv6_ht_node {
805 	struct rhash_head ht_node;
806 	struct list_head list;
807 	struct mlxsw_sp_nve_ipv6_ht_key key;
808 	struct in6_addr addr6;
809 };
810 
811 static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
812 	.key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
813 	.key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
814 	.head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
815 };
816 
mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6,u32 * p_kvdl_index)817 int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
818 				    const struct in6_addr *addr6,
819 				    u32 *p_kvdl_index)
820 {
821 	return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
822 }
823 
mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6)824 void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
825 				       const struct in6_addr *addr6)
826 {
827 	mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
828 }
829 
830 static struct mlxsw_sp_nve_ipv6_ht_node *
mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid_index)831 mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
832 				 u16 fid_index)
833 {
834 	struct mlxsw_sp_nve_ipv6_ht_key key = {};
835 
836 	ether_addr_copy(key.mac, mac);
837 	key.fid_index = fid_index;
838 	return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
839 				      mlxsw_sp_nve_ipv6_ht_params);
840 }
841 
mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid_index,const struct in6_addr * addr6)842 static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
843 				       const char *mac, u16 fid_index,
844 				       const struct in6_addr *addr6)
845 {
846 	struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
847 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
848 	int err;
849 
850 	ipv6_ht_node = kzalloc_obj(*ipv6_ht_node);
851 	if (!ipv6_ht_node)
852 		return -ENOMEM;
853 
854 	ether_addr_copy(ipv6_ht_node->key.mac, mac);
855 	ipv6_ht_node->key.fid_index = fid_index;
856 	ipv6_ht_node->addr6 = *addr6;
857 
858 	err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
859 				     mlxsw_sp_nve_ipv6_ht_params);
860 	if (err)
861 		goto err_rhashtable_insert;
862 
863 	list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
864 
865 	return 0;
866 
867 err_rhashtable_insert:
868 	kfree(ipv6_ht_node);
869 	return err;
870 }
871 
872 static void
mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nve_ipv6_ht_node * ipv6_ht_node)873 mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
874 			    struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
875 {
876 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
877 
878 	list_del(&ipv6_ht_node->list);
879 	rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
880 			       mlxsw_sp_nve_ipv6_ht_params);
881 	kfree(ipv6_ht_node);
882 }
883 
884 int
mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid_index,const struct in6_addr * new_addr6)885 mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
886 				   u16 fid_index,
887 				   const struct in6_addr *new_addr6)
888 {
889 	struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
890 
891 	ASSERT_RTNL();
892 
893 	ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
894 							fid_index);
895 	if (!ipv6_ht_node)
896 		return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
897 						   new_addr6);
898 
899 	mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
900 	ipv6_ht_node->addr6 = *new_addr6;
901 	return 0;
902 }
903 
mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid_index)904 void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
905 				    u16 fid_index)
906 {
907 	struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
908 
909 	ASSERT_RTNL();
910 
911 	ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
912 							fid_index);
913 	if (WARN_ON(!ipv6_ht_node))
914 		return;
915 
916 	mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
917 }
918 
mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp * mlxsw_sp,u16 fid_index)919 static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
920 						u16 fid_index)
921 {
922 	struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
923 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
924 
925 	list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
926 				 list) {
927 		if (ipv6_ht_node->key.fid_index != fid_index)
928 			continue;
929 
930 		mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
931 		mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
932 	}
933 }
934 
mlxsw_sp_nve_fid_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fid * fid,struct mlxsw_sp_nve_params * params,struct netlink_ext_ack * extack)935 int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
936 			    struct mlxsw_sp_nve_params *params,
937 			    struct netlink_ext_ack *extack)
938 {
939 	struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
940 	const struct mlxsw_sp_nve_ops *ops;
941 	struct mlxsw_sp_nve_config config;
942 	int err;
943 
944 	ops = nve->nve_ops_arr[params->type];
945 
946 	if (!ops->can_offload(nve, params, extack))
947 		return -EINVAL;
948 
949 	memset(&config, 0, sizeof(config));
950 	ops->nve_config(nve, params, &config);
951 	if (nve->num_nve_tunnels &&
952 	    memcmp(&config, &nve->config, sizeof(config))) {
953 		NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
954 		return -EINVAL;
955 	}
956 
957 	err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
958 	if (err) {
959 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
960 		return err;
961 	}
962 
963 	err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
964 				   params->dev->ifindex);
965 	if (err) {
966 		NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
967 		goto err_fid_vni_set;
968 	}
969 
970 	err = ops->fdb_replay(params->dev, params->vni, extack);
971 	if (err)
972 		goto err_fdb_replay;
973 
974 	return 0;
975 
976 err_fdb_replay:
977 	mlxsw_sp_fid_vni_clear(fid);
978 err_fid_vni_set:
979 	mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
980 	return err;
981 }
982 
mlxsw_sp_nve_fid_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fid * fid)983 void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
984 			      struct mlxsw_sp_fid *fid)
985 {
986 	u16 fid_index = mlxsw_sp_fid_index(fid);
987 	struct net_device *nve_dev;
988 	int nve_ifindex;
989 	__be32 vni;
990 
991 	/* Necessary for __dev_get_by_index() below. */
992 	ASSERT_RTNL();
993 
994 	mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
995 	mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
996 	mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
997 
998 	if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
999 		    mlxsw_sp_fid_vni(fid, &vni)))
1000 		goto out;
1001 
1002 	nve_dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
1003 	if (!nve_dev)
1004 		goto out;
1005 
1006 	mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
1007 	mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
1008 
1009 out:
1010 	mlxsw_sp_fid_vni_clear(fid);
1011 	mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
1012 }
1013 
mlxsw_sp_port_nve_init(struct mlxsw_sp_port * mlxsw_sp_port)1014 int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
1015 {
1016 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1017 	char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
1018 
1019 	mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
1020 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
1021 }
1022 
mlxsw_sp_port_nve_fini(struct mlxsw_sp_port * mlxsw_sp_port)1023 void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1024 {
1025 }
1026 
mlxsw_sp_nve_qos_init(struct mlxsw_sp * mlxsw_sp)1027 static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
1028 {
1029 	char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
1030 
1031 	mlxsw_reg_tnqcr_pack(tnqcr_pl);
1032 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
1033 }
1034 
mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp * mlxsw_sp)1035 static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
1036 {
1037 	int i;
1038 
1039 	/* Iterate over inner ECN values */
1040 	for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
1041 		u8 outer_ecn = INET_ECN_encapsulate(0, i);
1042 		char tneem_pl[MLXSW_REG_TNEEM_LEN];
1043 		int err;
1044 
1045 		mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
1046 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
1047 				      tneem_pl);
1048 		if (err)
1049 			return err;
1050 	}
1051 
1052 	return 0;
1053 }
1054 
__mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp * mlxsw_sp,u8 inner_ecn,u8 outer_ecn)1055 static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
1056 					 u8 inner_ecn, u8 outer_ecn)
1057 {
1058 	char tndem_pl[MLXSW_REG_TNDEM_LEN];
1059 	u8 new_inner_ecn;
1060 	bool trap_en;
1061 
1062 	new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
1063 						  &trap_en);
1064 	mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
1065 			     trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
1066 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
1067 }
1068 
mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp * mlxsw_sp)1069 static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
1070 {
1071 	int i;
1072 
1073 	/* Iterate over inner ECN values */
1074 	for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
1075 		int j;
1076 
1077 		/* Iterate over outer ECN values */
1078 		for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
1079 			int err;
1080 
1081 			err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
1082 			if (err)
1083 				return err;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
mlxsw_sp_nve_ecn_init(struct mlxsw_sp * mlxsw_sp)1090 static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
1091 {
1092 	int err;
1093 
1094 	err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
1095 	if (err)
1096 		return err;
1097 
1098 	return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
1099 }
1100 
mlxsw_sp_nve_resources_query(struct mlxsw_sp * mlxsw_sp)1101 static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
1102 {
1103 	unsigned int max;
1104 
1105 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
1106 	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
1107 		return -EIO;
1108 	max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
1109 	mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
1110 	max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
1111 	mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
1112 
1113 	return 0;
1114 }
1115 
mlxsw_sp_nve_init(struct mlxsw_sp * mlxsw_sp)1116 int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
1117 {
1118 	struct mlxsw_sp_nve *nve;
1119 	int err;
1120 
1121 	nve = kzalloc_obj(*mlxsw_sp->nve);
1122 	if (!nve)
1123 		return -ENOMEM;
1124 	mlxsw_sp->nve = nve;
1125 	nve->mlxsw_sp = mlxsw_sp;
1126 	nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
1127 
1128 	err = rhashtable_init(&nve->mc_list_ht,
1129 			      &mlxsw_sp_nve_mc_list_ht_params);
1130 	if (err)
1131 		goto err_mc_rhashtable_init;
1132 
1133 	err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
1134 	if (err)
1135 		goto err_ipv6_rhashtable_init;
1136 
1137 	INIT_LIST_HEAD(&nve->ipv6_addr_list);
1138 
1139 	err = mlxsw_sp_nve_qos_init(mlxsw_sp);
1140 	if (err)
1141 		goto err_nve_qos_init;
1142 
1143 	err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
1144 	if (err)
1145 		goto err_nve_ecn_init;
1146 
1147 	err = mlxsw_sp_nve_resources_query(mlxsw_sp);
1148 	if (err)
1149 		goto err_nve_resources_query;
1150 
1151 	return 0;
1152 
1153 err_nve_resources_query:
1154 err_nve_ecn_init:
1155 err_nve_qos_init:
1156 	rhashtable_destroy(&nve->ipv6_ht);
1157 err_ipv6_rhashtable_init:
1158 	rhashtable_destroy(&nve->mc_list_ht);
1159 err_mc_rhashtable_init:
1160 	mlxsw_sp->nve = NULL;
1161 	kfree(nve);
1162 	return err;
1163 }
1164 
mlxsw_sp_nve_fini(struct mlxsw_sp * mlxsw_sp)1165 void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
1166 {
1167 	WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
1168 	WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
1169 	rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
1170 	rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
1171 	kfree(mlxsw_sp->nve);
1172 	mlxsw_sp->nve = NULL;
1173 }
1174