xref: /linux/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/etherdevice.h>
35 #include <linux/inetdevice.h>
36 #include <net/netevent.h>
37 #include <linux/idr.h>
38 #include <net/dst_metadata.h>
39 #include <net/arp.h>
40 
41 #include "cmsg.h"
42 #include "main.h"
43 #include "../nfp_net_repr.h"
44 #include "../nfp_net.h"
45 
46 #define NFP_FL_MAX_ROUTES               32
47 
48 /**
49  * struct nfp_tun_active_tuns - periodic message of active tunnels
50  * @seq:		sequence number of the message
51  * @count:		number of tunnels report in message
52  * @flags:		options part of the request
53  * @ipv4:		dest IPv4 address of active route
54  * @egress_port:	port the encapsulated packet egressed
55  * @extra:		reserved for future use
56  * @tun_info:		tunnels that have sent traffic in reported period
57  */
58 struct nfp_tun_active_tuns {
59 	__be32 seq;
60 	__be32 count;
61 	__be32 flags;
62 	struct route_ip_info {
63 		__be32 ipv4;
64 		__be32 egress_port;
65 		__be32 extra[2];
66 	} tun_info[];
67 };
68 
69 /**
70  * struct nfp_tun_neigh - neighbour/route entry on the NFP
71  * @dst_ipv4:	destination IPv4 address
72  * @src_ipv4:	source IPv4 address
73  * @dst_addr:	destination MAC address
74  * @src_addr:	source MAC address
75  * @port_id:	NFP port to output packet on - associated with source IPv4
76  */
77 struct nfp_tun_neigh {
78 	__be32 dst_ipv4;
79 	__be32 src_ipv4;
80 	u8 dst_addr[ETH_ALEN];
81 	u8 src_addr[ETH_ALEN];
82 	__be32 port_id;
83 };
84 
85 /**
86  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
87  * @ingress_port:	ingress port of packet that signalled request
88  * @ipv4_addr:		destination ipv4 address for route
89  * @reserved:		reserved for future use
90  */
91 struct nfp_tun_req_route_ipv4 {
92 	__be32 ingress_port;
93 	__be32 ipv4_addr;
94 	__be32 reserved[2];
95 };
96 
97 /**
98  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
99  * @ipv4_addr:	destination of route
100  * @list:	list pointer
101  */
102 struct nfp_ipv4_route_entry {
103 	__be32 ipv4_addr;
104 	struct list_head list;
105 };
106 
107 #define NFP_FL_IPV4_ADDRS_MAX        32
108 
109 /**
110  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
111  * @count:	number of IPs populated in the array
112  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
113  */
114 struct nfp_tun_ipv4_addr {
115 	__be32 count;
116 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
117 };
118 
119 /**
120  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
121  * @ipv4_addr:	IP address
122  * @ref_count:	number of rules currently using this IP
123  * @list:	list pointer
124  */
125 struct nfp_ipv4_addr_entry {
126 	__be32 ipv4_addr;
127 	int ref_count;
128 	struct list_head list;
129 };
130 
131 /**
132  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
133  * @reserved:	reserved for future use
134  * @count:	number of MAC addresses in the message
135  * @index:	index of MAC address in the lookup table
136  * @addr:	interface MAC address
137  * @addresses:	series of MACs to offload
138  */
139 struct nfp_tun_mac_addr {
140 	__be16 reserved;
141 	__be16 count;
142 	struct index_mac_addr {
143 		__be16 index;
144 		u8 addr[ETH_ALEN];
145 	} addresses[];
146 };
147 
148 /**
149  * struct nfp_tun_mac_offload_entry - list of MACs to offload
150  * @index:	index of MAC address for offloading
151  * @addr:	interface MAC address
152  * @list:	list pointer
153  */
154 struct nfp_tun_mac_offload_entry {
155 	__be16 index;
156 	u8 addr[ETH_ALEN];
157 	struct list_head list;
158 };
159 
160 #define NFP_MAX_MAC_INDEX       0xff
161 
162 /**
163  * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
164  * @ifindex:	netdev ifindex of the device
165  * @index:	index of netdevs mac on NFP
166  * @list:	list pointer
167  */
168 struct nfp_tun_mac_non_nfp_idx {
169 	int ifindex;
170 	u8 index;
171 	struct list_head list;
172 };
173 
174 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
175 {
176 	struct nfp_tun_active_tuns *payload;
177 	struct net_device *netdev;
178 	int count, i, pay_len;
179 	struct neighbour *n;
180 	__be32 ipv4_addr;
181 	u32 port;
182 
183 	payload = nfp_flower_cmsg_get_data(skb);
184 	count = be32_to_cpu(payload->count);
185 	if (count > NFP_FL_MAX_ROUTES) {
186 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
187 		return;
188 	}
189 
190 	pay_len = nfp_flower_cmsg_get_data_len(skb);
191 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
192 	    sizeof(struct route_ip_info) * count) {
193 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
194 		return;
195 	}
196 
197 	for (i = 0; i < count; i++) {
198 		ipv4_addr = payload->tun_info[i].ipv4;
199 		port = be32_to_cpu(payload->tun_info[i].egress_port);
200 		netdev = nfp_app_repr_get(app, port);
201 		if (!netdev)
202 			continue;
203 
204 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
205 		if (!n)
206 			continue;
207 
208 		/* Update the used timestamp of neighbour */
209 		neigh_event_send(n, NULL);
210 		neigh_release(n);
211 	}
212 }
213 
214 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
215 {
216 	if (!netdev->rtnl_link_ops)
217 		return false;
218 	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
219 		return true;
220 	if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
221 		return true;
222 
223 	return false;
224 }
225 
226 static int
227 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata)
228 {
229 	struct sk_buff *skb;
230 	unsigned char *msg;
231 
232 	skb = nfp_flower_cmsg_alloc(app, plen, mtype);
233 	if (!skb)
234 		return -ENOMEM;
235 
236 	msg = nfp_flower_cmsg_get_data(skb);
237 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
238 
239 	nfp_ctrl_tx(app->ctrl, skb);
240 	return 0;
241 }
242 
243 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
244 {
245 	struct nfp_flower_priv *priv = app->priv;
246 	struct nfp_ipv4_route_entry *entry;
247 	struct list_head *ptr, *storage;
248 
249 	mutex_lock(&priv->nfp_neigh_off_lock);
250 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
251 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
252 		if (entry->ipv4_addr == ipv4_addr) {
253 			mutex_unlock(&priv->nfp_neigh_off_lock);
254 			return true;
255 		}
256 	}
257 	mutex_unlock(&priv->nfp_neigh_off_lock);
258 	return false;
259 }
260 
261 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
262 {
263 	struct nfp_flower_priv *priv = app->priv;
264 	struct nfp_ipv4_route_entry *entry;
265 	struct list_head *ptr, *storage;
266 
267 	mutex_lock(&priv->nfp_neigh_off_lock);
268 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
269 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
270 		if (entry->ipv4_addr == ipv4_addr) {
271 			mutex_unlock(&priv->nfp_neigh_off_lock);
272 			return;
273 		}
274 	}
275 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
276 	if (!entry) {
277 		mutex_unlock(&priv->nfp_neigh_off_lock);
278 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
279 		return;
280 	}
281 
282 	entry->ipv4_addr = ipv4_addr;
283 	list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
284 	mutex_unlock(&priv->nfp_neigh_off_lock);
285 }
286 
287 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
288 {
289 	struct nfp_flower_priv *priv = app->priv;
290 	struct nfp_ipv4_route_entry *entry;
291 	struct list_head *ptr, *storage;
292 
293 	mutex_lock(&priv->nfp_neigh_off_lock);
294 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
295 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
296 		if (entry->ipv4_addr == ipv4_addr) {
297 			list_del(&entry->list);
298 			kfree(entry);
299 			break;
300 		}
301 	}
302 	mutex_unlock(&priv->nfp_neigh_off_lock);
303 }
304 
305 static void
306 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
307 		    struct flowi4 *flow, struct neighbour *neigh)
308 {
309 	struct nfp_tun_neigh payload;
310 
311 	/* Only offload representor IPv4s for now. */
312 	if (!nfp_netdev_is_nfp_repr(netdev))
313 		return;
314 
315 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
316 	payload.dst_ipv4 = flow->daddr;
317 
318 	/* If entry has expired send dst IP with all other fields 0. */
319 	if (!(neigh->nud_state & NUD_VALID)) {
320 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
321 		/* Trigger ARP to verify invalid neighbour state. */
322 		neigh_event_send(neigh, NULL);
323 		goto send_msg;
324 	}
325 
326 	/* Have a valid neighbour so populate rest of entry. */
327 	payload.src_ipv4 = flow->saddr;
328 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
329 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
330 	payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
331 	/* Add destination of new route to NFP cache. */
332 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
333 
334 send_msg:
335 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
336 				 sizeof(struct nfp_tun_neigh),
337 				 (unsigned char *)&payload);
338 }
339 
340 static int
341 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
342 			    void *ptr)
343 {
344 	struct nfp_flower_priv *app_priv;
345 	struct netevent_redirect *redir;
346 	struct flowi4 flow = {};
347 	struct neighbour *n;
348 	struct nfp_app *app;
349 	struct rtable *rt;
350 	int err;
351 
352 	switch (event) {
353 	case NETEVENT_REDIRECT:
354 		redir = (struct netevent_redirect *)ptr;
355 		n = redir->neigh;
356 		break;
357 	case NETEVENT_NEIGH_UPDATE:
358 		n = (struct neighbour *)ptr;
359 		break;
360 	default:
361 		return NOTIFY_DONE;
362 	}
363 
364 	flow.daddr = *(__be32 *)n->primary_key;
365 
366 	/* Only concerned with route changes for representors. */
367 	if (!nfp_netdev_is_nfp_repr(n->dev))
368 		return NOTIFY_DONE;
369 
370 	app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
371 	app = app_priv->app;
372 
373 	/* Only concerned with changes to routes already added to NFP. */
374 	if (!nfp_tun_has_route(app, flow.daddr))
375 		return NOTIFY_DONE;
376 
377 #if IS_ENABLED(CONFIG_INET)
378 	/* Do a route lookup to populate flow data. */
379 	rt = ip_route_output_key(dev_net(n->dev), &flow);
380 	err = PTR_ERR_OR_ZERO(rt);
381 	if (err)
382 		return NOTIFY_DONE;
383 #else
384 	return NOTIFY_DONE;
385 #endif
386 
387 	flow.flowi4_proto = IPPROTO_UDP;
388 	nfp_tun_write_neigh(n->dev, app, &flow, n);
389 
390 	return NOTIFY_OK;
391 }
392 
393 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
394 {
395 	struct nfp_tun_req_route_ipv4 *payload;
396 	struct net_device *netdev;
397 	struct flowi4 flow = {};
398 	struct neighbour *n;
399 	struct rtable *rt;
400 	int err;
401 
402 	payload = nfp_flower_cmsg_get_data(skb);
403 
404 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
405 	if (!netdev)
406 		goto route_fail_warning;
407 
408 	flow.daddr = payload->ipv4_addr;
409 	flow.flowi4_proto = IPPROTO_UDP;
410 
411 #if IS_ENABLED(CONFIG_INET)
412 	/* Do a route lookup on same namespace as ingress port. */
413 	rt = ip_route_output_key(dev_net(netdev), &flow);
414 	err = PTR_ERR_OR_ZERO(rt);
415 	if (err)
416 		goto route_fail_warning;
417 #else
418 	goto route_fail_warning;
419 #endif
420 
421 	/* Get the neighbour entry for the lookup */
422 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
423 	ip_rt_put(rt);
424 	if (!n)
425 		goto route_fail_warning;
426 	nfp_tun_write_neigh(n->dev, app, &flow, n);
427 	neigh_release(n);
428 	return;
429 
430 route_fail_warning:
431 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
432 }
433 
434 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
435 {
436 	struct nfp_flower_priv *priv = app->priv;
437 	struct nfp_ipv4_addr_entry *entry;
438 	struct nfp_tun_ipv4_addr payload;
439 	struct list_head *ptr, *storage;
440 	int count;
441 
442 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
443 	mutex_lock(&priv->nfp_ipv4_off_lock);
444 	count = 0;
445 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
446 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
447 			mutex_unlock(&priv->nfp_ipv4_off_lock);
448 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
449 			return;
450 		}
451 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
452 		payload.ipv4_addr[count++] = entry->ipv4_addr;
453 	}
454 	payload.count = cpu_to_be32(count);
455 	mutex_unlock(&priv->nfp_ipv4_off_lock);
456 
457 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
458 				 sizeof(struct nfp_tun_ipv4_addr),
459 				 &payload);
460 }
461 
462 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
463 {
464 	struct nfp_flower_priv *priv = app->priv;
465 	struct nfp_ipv4_addr_entry *entry;
466 	struct list_head *ptr, *storage;
467 
468 	mutex_lock(&priv->nfp_ipv4_off_lock);
469 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
470 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
471 		if (entry->ipv4_addr == ipv4) {
472 			entry->ref_count++;
473 			mutex_unlock(&priv->nfp_ipv4_off_lock);
474 			return;
475 		}
476 	}
477 
478 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
479 	if (!entry) {
480 		mutex_unlock(&priv->nfp_ipv4_off_lock);
481 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
482 		return;
483 	}
484 	entry->ipv4_addr = ipv4;
485 	entry->ref_count = 1;
486 	list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
487 	mutex_unlock(&priv->nfp_ipv4_off_lock);
488 
489 	nfp_tun_write_ipv4_list(app);
490 }
491 
492 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
493 {
494 	struct nfp_flower_priv *priv = app->priv;
495 	struct nfp_ipv4_addr_entry *entry;
496 	struct list_head *ptr, *storage;
497 
498 	mutex_lock(&priv->nfp_ipv4_off_lock);
499 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
500 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
501 		if (entry->ipv4_addr == ipv4) {
502 			entry->ref_count--;
503 			if (!entry->ref_count) {
504 				list_del(&entry->list);
505 				kfree(entry);
506 			}
507 			break;
508 		}
509 	}
510 	mutex_unlock(&priv->nfp_ipv4_off_lock);
511 
512 	nfp_tun_write_ipv4_list(app);
513 }
514 
515 void nfp_tunnel_write_macs(struct nfp_app *app)
516 {
517 	struct nfp_flower_priv *priv = app->priv;
518 	struct nfp_tun_mac_offload_entry *entry;
519 	struct nfp_tun_mac_addr *payload;
520 	struct list_head *ptr, *storage;
521 	int mac_count, err, pay_size;
522 
523 	mutex_lock(&priv->nfp_mac_off_lock);
524 	if (!priv->nfp_mac_off_count) {
525 		mutex_unlock(&priv->nfp_mac_off_lock);
526 		return;
527 	}
528 
529 	pay_size = sizeof(struct nfp_tun_mac_addr) +
530 		   sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
531 
532 	payload = kzalloc(pay_size, GFP_KERNEL);
533 	if (!payload) {
534 		mutex_unlock(&priv->nfp_mac_off_lock);
535 		return;
536 	}
537 
538 	payload->count = cpu_to_be16(priv->nfp_mac_off_count);
539 
540 	mac_count = 0;
541 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
542 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
543 				   list);
544 		payload->addresses[mac_count].index = entry->index;
545 		ether_addr_copy(payload->addresses[mac_count].addr,
546 				entry->addr);
547 		mac_count++;
548 	}
549 
550 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
551 				       pay_size, payload);
552 
553 	kfree(payload);
554 
555 	if (err) {
556 		mutex_unlock(&priv->nfp_mac_off_lock);
557 		/* Write failed so retain list for future retry. */
558 		return;
559 	}
560 
561 	/* If list was successfully offloaded, flush it. */
562 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
563 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
564 				   list);
565 		list_del(&entry->list);
566 		kfree(entry);
567 	}
568 
569 	priv->nfp_mac_off_count = 0;
570 	mutex_unlock(&priv->nfp_mac_off_lock);
571 }
572 
573 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
574 {
575 	struct nfp_flower_priv *priv = app->priv;
576 	struct nfp_tun_mac_non_nfp_idx *entry;
577 	struct list_head *ptr, *storage;
578 	int idx;
579 
580 	mutex_lock(&priv->nfp_mac_index_lock);
581 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
582 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
583 		if (entry->ifindex == ifindex) {
584 			idx = entry->index;
585 			mutex_unlock(&priv->nfp_mac_index_lock);
586 			return idx;
587 		}
588 	}
589 
590 	idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
591 			     NFP_MAX_MAC_INDEX, GFP_KERNEL);
592 	if (idx < 0) {
593 		mutex_unlock(&priv->nfp_mac_index_lock);
594 		return idx;
595 	}
596 
597 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
598 	if (!entry) {
599 		mutex_unlock(&priv->nfp_mac_index_lock);
600 		return -ENOMEM;
601 	}
602 	entry->ifindex = ifindex;
603 	entry->index = idx;
604 	list_add_tail(&entry->list, &priv->nfp_mac_index_list);
605 	mutex_unlock(&priv->nfp_mac_index_lock);
606 
607 	return idx;
608 }
609 
610 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
611 {
612 	struct nfp_flower_priv *priv = app->priv;
613 	struct nfp_tun_mac_non_nfp_idx *entry;
614 	struct list_head *ptr, *storage;
615 
616 	mutex_lock(&priv->nfp_mac_index_lock);
617 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
618 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
619 		if (entry->ifindex == ifindex) {
620 			ida_simple_remove(&priv->nfp_mac_off_ids,
621 					  entry->index);
622 			list_del(&entry->list);
623 			kfree(entry);
624 			break;
625 		}
626 	}
627 	mutex_unlock(&priv->nfp_mac_index_lock);
628 }
629 
630 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
631 					    struct nfp_app *app)
632 {
633 	struct nfp_flower_priv *priv = app->priv;
634 	struct nfp_tun_mac_offload_entry *entry;
635 	u16 nfp_mac_idx;
636 	int port = 0;
637 
638 	/* Check if MAC should be offloaded. */
639 	if (!is_valid_ether_addr(netdev->dev_addr))
640 		return;
641 
642 	if (nfp_netdev_is_nfp_repr(netdev))
643 		port = nfp_repr_get_port_id(netdev);
644 	else if (!nfp_tun_is_netdev_to_offload(netdev))
645 		return;
646 
647 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
648 	if (!entry) {
649 		nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
650 		return;
651 	}
652 
653 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
654 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
655 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
656 	} else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
657 		   NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
658 		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
659 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
660 	} else {
661 		/* Must assign our own unique 8-bit index. */
662 		int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
663 
664 		if (idx < 0) {
665 			nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
666 			kfree(entry);
667 			return;
668 		}
669 		nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
670 	}
671 
672 	entry->index = cpu_to_be16(nfp_mac_idx);
673 	ether_addr_copy(entry->addr, netdev->dev_addr);
674 
675 	mutex_lock(&priv->nfp_mac_off_lock);
676 	priv->nfp_mac_off_count++;
677 	list_add_tail(&entry->list, &priv->nfp_mac_off_list);
678 	mutex_unlock(&priv->nfp_mac_off_lock);
679 }
680 
681 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
682 				     unsigned long event, void *ptr)
683 {
684 	struct nfp_flower_priv *app_priv;
685 	struct net_device *netdev;
686 	struct nfp_app *app;
687 
688 	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
689 		app_priv = container_of(nb, struct nfp_flower_priv,
690 					nfp_tun_mac_nb);
691 		app = app_priv->app;
692 		netdev = netdev_notifier_info_to_dev(ptr);
693 
694 		/* If non-nfp netdev then free its offload index. */
695 		if (nfp_tun_is_netdev_to_offload(netdev))
696 			nfp_tun_del_mac_idx(app, netdev->ifindex);
697 	} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
698 		   event == NETDEV_REGISTER) {
699 		app_priv = container_of(nb, struct nfp_flower_priv,
700 					nfp_tun_mac_nb);
701 		app = app_priv->app;
702 		netdev = netdev_notifier_info_to_dev(ptr);
703 
704 		nfp_tun_add_to_mac_offload_list(netdev, app);
705 
706 		/* Force a list write to keep NFP up to date. */
707 		nfp_tunnel_write_macs(app);
708 	}
709 	return NOTIFY_OK;
710 }
711 
712 int nfp_tunnel_config_start(struct nfp_app *app)
713 {
714 	struct nfp_flower_priv *priv = app->priv;
715 	struct net_device *netdev;
716 	int err;
717 
718 	/* Initialise priv data for MAC offloading. */
719 	priv->nfp_mac_off_count = 0;
720 	mutex_init(&priv->nfp_mac_off_lock);
721 	INIT_LIST_HEAD(&priv->nfp_mac_off_list);
722 	priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
723 	mutex_init(&priv->nfp_mac_index_lock);
724 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
725 	ida_init(&priv->nfp_mac_off_ids);
726 
727 	/* Initialise priv data for IPv4 offloading. */
728 	mutex_init(&priv->nfp_ipv4_off_lock);
729 	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
730 
731 	/* Initialise priv data for neighbour offloading. */
732 	mutex_init(&priv->nfp_neigh_off_lock);
733 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
734 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
735 
736 	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
737 	if (err)
738 		goto err_free_mac_ida;
739 
740 	err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
741 	if (err)
742 		goto err_unreg_mac_nb;
743 
744 	/* Parse netdevs already registered for MACs that need offloaded. */
745 	rtnl_lock();
746 	for_each_netdev(&init_net, netdev)
747 		nfp_tun_add_to_mac_offload_list(netdev, app);
748 	rtnl_unlock();
749 
750 	return 0;
751 
752 err_unreg_mac_nb:
753 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
754 err_free_mac_ida:
755 	ida_destroy(&priv->nfp_mac_off_ids);
756 	return err;
757 }
758 
759 void nfp_tunnel_config_stop(struct nfp_app *app)
760 {
761 	struct nfp_tun_mac_offload_entry *mac_entry;
762 	struct nfp_flower_priv *priv = app->priv;
763 	struct nfp_ipv4_route_entry *route_entry;
764 	struct nfp_tun_mac_non_nfp_idx *mac_idx;
765 	struct nfp_ipv4_addr_entry *ip_entry;
766 	struct list_head *ptr, *storage;
767 
768 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
769 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
770 
771 	/* Free any memory that may be occupied by MAC list. */
772 	mutex_lock(&priv->nfp_mac_off_lock);
773 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
774 		mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
775 				       list);
776 		list_del(&mac_entry->list);
777 		kfree(mac_entry);
778 	}
779 	mutex_unlock(&priv->nfp_mac_off_lock);
780 
781 	/* Free any memory that may be occupied by MAC index list. */
782 	mutex_lock(&priv->nfp_mac_index_lock);
783 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
784 		mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
785 				     list);
786 		list_del(&mac_idx->list);
787 		kfree(mac_idx);
788 	}
789 	mutex_unlock(&priv->nfp_mac_index_lock);
790 
791 	ida_destroy(&priv->nfp_mac_off_ids);
792 
793 	/* Free any memory that may be occupied by ipv4 list. */
794 	mutex_lock(&priv->nfp_ipv4_off_lock);
795 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
796 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
797 		list_del(&ip_entry->list);
798 		kfree(ip_entry);
799 	}
800 	mutex_unlock(&priv->nfp_ipv4_off_lock);
801 
802 	/* Free any memory that may be occupied by the route list. */
803 	mutex_lock(&priv->nfp_neigh_off_lock);
804 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
805 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
806 					 list);
807 		list_del(&route_entry->list);
808 		kfree(route_entry);
809 	}
810 	mutex_unlock(&priv->nfp_neigh_off_lock);
811 }
812