xref: /linux/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c (revision fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/inetdevice.h>
6 #include <net/netevent.h>
7 #include <linux/idr.h>
8 #include <net/dst_metadata.h>
9 #include <net/arp.h>
10 
11 #include "cmsg.h"
12 #include "main.h"
13 #include "../nfp_net_repr.h"
14 #include "../nfp_net.h"
15 
16 #define NFP_FL_MAX_ROUTES               32
17 
18 /**
19  * struct nfp_tun_active_tuns - periodic message of active tunnels
20  * @seq:		sequence number of the message
21  * @count:		number of tunnels report in message
22  * @flags:		options part of the request
23  * @tun_info.ipv4:		dest IPv4 address of active route
24  * @tun_info.egress_port:	port the encapsulated packet egressed
25  * @tun_info.extra:		reserved for future use
26  * @tun_info:		tunnels that have sent traffic in reported period
27  */
28 struct nfp_tun_active_tuns {
29 	__be32 seq;
30 	__be32 count;
31 	__be32 flags;
32 	struct route_ip_info {
33 		__be32 ipv4;
34 		__be32 egress_port;
35 		__be32 extra[2];
36 	} tun_info[];
37 };
38 
39 /**
40  * struct nfp_tun_neigh - neighbour/route entry on the NFP
41  * @dst_ipv4:	destination IPv4 address
42  * @src_ipv4:	source IPv4 address
43  * @dst_addr:	destination MAC address
44  * @src_addr:	source MAC address
45  * @port_id:	NFP port to output packet on - associated with source IPv4
46  */
47 struct nfp_tun_neigh {
48 	__be32 dst_ipv4;
49 	__be32 src_ipv4;
50 	u8 dst_addr[ETH_ALEN];
51 	u8 src_addr[ETH_ALEN];
52 	__be32 port_id;
53 };
54 
55 /**
56  * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
57  * @ingress_port:	ingress port of packet that signalled request
58  * @ipv4_addr:		destination ipv4 address for route
59  * @reserved:		reserved for future use
60  */
61 struct nfp_tun_req_route_ipv4 {
62 	__be32 ingress_port;
63 	__be32 ipv4_addr;
64 	__be32 reserved[2];
65 };
66 
67 /**
68  * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
69  * @ipv4_addr:	destination of route
70  * @list:	list pointer
71  */
72 struct nfp_ipv4_route_entry {
73 	__be32 ipv4_addr;
74 	struct list_head list;
75 };
76 
77 #define NFP_FL_IPV4_ADDRS_MAX        32
78 
79 /**
80  * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
81  * @count:	number of IPs populated in the array
82  * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
83  */
84 struct nfp_tun_ipv4_addr {
85 	__be32 count;
86 	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
87 };
88 
89 /**
90  * struct nfp_ipv4_addr_entry - cached IPv4 addresses
91  * @ipv4_addr:	IP address
92  * @ref_count:	number of rules currently using this IP
93  * @list:	list pointer
94  */
95 struct nfp_ipv4_addr_entry {
96 	__be32 ipv4_addr;
97 	int ref_count;
98 	struct list_head list;
99 };
100 
101 /**
102  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
103  * @reserved:	reserved for future use
104  * @count:	number of MAC addresses in the message
105  * @addresses.index:	index of MAC address in the lookup table
106  * @addresses.addr:	interface MAC address
107  * @addresses:	series of MACs to offload
108  */
109 struct nfp_tun_mac_addr {
110 	__be16 reserved;
111 	__be16 count;
112 	struct index_mac_addr {
113 		__be16 index;
114 		u8 addr[ETH_ALEN];
115 	} addresses[];
116 };
117 
118 /**
119  * struct nfp_tun_mac_offload_entry - list of MACs to offload
120  * @index:	index of MAC address for offloading
121  * @addr:	interface MAC address
122  * @list:	list pointer
123  */
124 struct nfp_tun_mac_offload_entry {
125 	__be16 index;
126 	u8 addr[ETH_ALEN];
127 	struct list_head list;
128 };
129 
130 #define NFP_MAX_MAC_INDEX       0xff
131 
132 /**
133  * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
134  * @ifindex:	netdev ifindex of the device
135  * @index:	index of netdevs mac on NFP
136  * @list:	list pointer
137  */
138 struct nfp_tun_mac_non_nfp_idx {
139 	int ifindex;
140 	u8 index;
141 	struct list_head list;
142 };
143 
144 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
145 {
146 	struct nfp_tun_active_tuns *payload;
147 	struct net_device *netdev;
148 	int count, i, pay_len;
149 	struct neighbour *n;
150 	__be32 ipv4_addr;
151 	u32 port;
152 
153 	payload = nfp_flower_cmsg_get_data(skb);
154 	count = be32_to_cpu(payload->count);
155 	if (count > NFP_FL_MAX_ROUTES) {
156 		nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
157 		return;
158 	}
159 
160 	pay_len = nfp_flower_cmsg_get_data_len(skb);
161 	if (pay_len != sizeof(struct nfp_tun_active_tuns) +
162 	    sizeof(struct route_ip_info) * count) {
163 		nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
164 		return;
165 	}
166 
167 	for (i = 0; i < count; i++) {
168 		ipv4_addr = payload->tun_info[i].ipv4;
169 		port = be32_to_cpu(payload->tun_info[i].egress_port);
170 		netdev = nfp_app_repr_get(app, port);
171 		if (!netdev)
172 			continue;
173 
174 		n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
175 		if (!n)
176 			continue;
177 
178 		/* Update the used timestamp of neighbour */
179 		neigh_event_send(n, NULL);
180 		neigh_release(n);
181 	}
182 }
183 
184 static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
185 {
186 	if (!netdev->rtnl_link_ops)
187 		return false;
188 	if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
189 		return true;
190 	if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
191 		return true;
192 
193 	return false;
194 }
195 
196 static int
197 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
198 			 gfp_t flag)
199 {
200 	struct sk_buff *skb;
201 	unsigned char *msg;
202 
203 	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
204 	if (!skb)
205 		return -ENOMEM;
206 
207 	msg = nfp_flower_cmsg_get_data(skb);
208 	memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
209 
210 	nfp_ctrl_tx(app->ctrl, skb);
211 	return 0;
212 }
213 
214 static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
215 {
216 	struct nfp_flower_priv *priv = app->priv;
217 	struct nfp_ipv4_route_entry *entry;
218 	struct list_head *ptr, *storage;
219 
220 	spin_lock_bh(&priv->nfp_neigh_off_lock);
221 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
222 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
223 		if (entry->ipv4_addr == ipv4_addr) {
224 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
225 			return true;
226 		}
227 	}
228 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
229 	return false;
230 }
231 
232 static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
233 {
234 	struct nfp_flower_priv *priv = app->priv;
235 	struct nfp_ipv4_route_entry *entry;
236 	struct list_head *ptr, *storage;
237 
238 	spin_lock_bh(&priv->nfp_neigh_off_lock);
239 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
240 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
241 		if (entry->ipv4_addr == ipv4_addr) {
242 			spin_unlock_bh(&priv->nfp_neigh_off_lock);
243 			return;
244 		}
245 	}
246 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
247 	if (!entry) {
248 		spin_unlock_bh(&priv->nfp_neigh_off_lock);
249 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
250 		return;
251 	}
252 
253 	entry->ipv4_addr = ipv4_addr;
254 	list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
255 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
256 }
257 
258 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
259 {
260 	struct nfp_flower_priv *priv = app->priv;
261 	struct nfp_ipv4_route_entry *entry;
262 	struct list_head *ptr, *storage;
263 
264 	spin_lock_bh(&priv->nfp_neigh_off_lock);
265 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
266 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
267 		if (entry->ipv4_addr == ipv4_addr) {
268 			list_del(&entry->list);
269 			kfree(entry);
270 			break;
271 		}
272 	}
273 	spin_unlock_bh(&priv->nfp_neigh_off_lock);
274 }
275 
276 static void
277 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
278 		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
279 {
280 	struct nfp_tun_neigh payload;
281 
282 	/* Only offload representor IPv4s for now. */
283 	if (!nfp_netdev_is_nfp_repr(netdev))
284 		return;
285 
286 	memset(&payload, 0, sizeof(struct nfp_tun_neigh));
287 	payload.dst_ipv4 = flow->daddr;
288 
289 	/* If entry has expired send dst IP with all other fields 0. */
290 	if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
291 		nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
292 		/* Trigger ARP to verify invalid neighbour state. */
293 		neigh_event_send(neigh, NULL);
294 		goto send_msg;
295 	}
296 
297 	/* Have a valid neighbour so populate rest of entry. */
298 	payload.src_ipv4 = flow->saddr;
299 	ether_addr_copy(payload.src_addr, netdev->dev_addr);
300 	neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
301 	payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
302 	/* Add destination of new route to NFP cache. */
303 	nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
304 
305 send_msg:
306 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
307 				 sizeof(struct nfp_tun_neigh),
308 				 (unsigned char *)&payload, flag);
309 }
310 
311 static int
312 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
313 			    void *ptr)
314 {
315 	struct nfp_flower_priv *app_priv;
316 	struct netevent_redirect *redir;
317 	struct flowi4 flow = {};
318 	struct neighbour *n;
319 	struct nfp_app *app;
320 	struct rtable *rt;
321 	int err;
322 
323 	switch (event) {
324 	case NETEVENT_REDIRECT:
325 		redir = (struct netevent_redirect *)ptr;
326 		n = redir->neigh;
327 		break;
328 	case NETEVENT_NEIGH_UPDATE:
329 		n = (struct neighbour *)ptr;
330 		break;
331 	default:
332 		return NOTIFY_DONE;
333 	}
334 
335 	flow.daddr = *(__be32 *)n->primary_key;
336 
337 	/* Only concerned with route changes for representors. */
338 	if (!nfp_netdev_is_nfp_repr(n->dev))
339 		return NOTIFY_DONE;
340 
341 	app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
342 	app = app_priv->app;
343 
344 	/* Only concerned with changes to routes already added to NFP. */
345 	if (!nfp_tun_has_route(app, flow.daddr))
346 		return NOTIFY_DONE;
347 
348 #if IS_ENABLED(CONFIG_INET)
349 	/* Do a route lookup to populate flow data. */
350 	rt = ip_route_output_key(dev_net(n->dev), &flow);
351 	err = PTR_ERR_OR_ZERO(rt);
352 	if (err)
353 		return NOTIFY_DONE;
354 
355 	ip_rt_put(rt);
356 #else
357 	return NOTIFY_DONE;
358 #endif
359 
360 	flow.flowi4_proto = IPPROTO_UDP;
361 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
362 
363 	return NOTIFY_OK;
364 }
365 
366 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
367 {
368 	struct nfp_tun_req_route_ipv4 *payload;
369 	struct net_device *netdev;
370 	struct flowi4 flow = {};
371 	struct neighbour *n;
372 	struct rtable *rt;
373 	int err;
374 
375 	payload = nfp_flower_cmsg_get_data(skb);
376 
377 	netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
378 	if (!netdev)
379 		goto route_fail_warning;
380 
381 	flow.daddr = payload->ipv4_addr;
382 	flow.flowi4_proto = IPPROTO_UDP;
383 
384 #if IS_ENABLED(CONFIG_INET)
385 	/* Do a route lookup on same namespace as ingress port. */
386 	rt = ip_route_output_key(dev_net(netdev), &flow);
387 	err = PTR_ERR_OR_ZERO(rt);
388 	if (err)
389 		goto route_fail_warning;
390 #else
391 	goto route_fail_warning;
392 #endif
393 
394 	/* Get the neighbour entry for the lookup */
395 	n = dst_neigh_lookup(&rt->dst, &flow.daddr);
396 	ip_rt_put(rt);
397 	if (!n)
398 		goto route_fail_warning;
399 	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
400 	neigh_release(n);
401 	return;
402 
403 route_fail_warning:
404 	nfp_flower_cmsg_warn(app, "Requested route not found.\n");
405 }
406 
407 static void nfp_tun_write_ipv4_list(struct nfp_app *app)
408 {
409 	struct nfp_flower_priv *priv = app->priv;
410 	struct nfp_ipv4_addr_entry *entry;
411 	struct nfp_tun_ipv4_addr payload;
412 	struct list_head *ptr, *storage;
413 	int count;
414 
415 	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
416 	mutex_lock(&priv->nfp_ipv4_off_lock);
417 	count = 0;
418 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
419 		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
420 			mutex_unlock(&priv->nfp_ipv4_off_lock);
421 			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
422 			return;
423 		}
424 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
425 		payload.ipv4_addr[count++] = entry->ipv4_addr;
426 	}
427 	payload.count = cpu_to_be32(count);
428 	mutex_unlock(&priv->nfp_ipv4_off_lock);
429 
430 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
431 				 sizeof(struct nfp_tun_ipv4_addr),
432 				 &payload, GFP_KERNEL);
433 }
434 
435 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
436 {
437 	struct nfp_flower_priv *priv = app->priv;
438 	struct nfp_ipv4_addr_entry *entry;
439 	struct list_head *ptr, *storage;
440 
441 	mutex_lock(&priv->nfp_ipv4_off_lock);
442 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
443 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
444 		if (entry->ipv4_addr == ipv4) {
445 			entry->ref_count++;
446 			mutex_unlock(&priv->nfp_ipv4_off_lock);
447 			return;
448 		}
449 	}
450 
451 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
452 	if (!entry) {
453 		mutex_unlock(&priv->nfp_ipv4_off_lock);
454 		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
455 		return;
456 	}
457 	entry->ipv4_addr = ipv4;
458 	entry->ref_count = 1;
459 	list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
460 	mutex_unlock(&priv->nfp_ipv4_off_lock);
461 
462 	nfp_tun_write_ipv4_list(app);
463 }
464 
465 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
466 {
467 	struct nfp_flower_priv *priv = app->priv;
468 	struct nfp_ipv4_addr_entry *entry;
469 	struct list_head *ptr, *storage;
470 
471 	mutex_lock(&priv->nfp_ipv4_off_lock);
472 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
473 		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
474 		if (entry->ipv4_addr == ipv4) {
475 			entry->ref_count--;
476 			if (!entry->ref_count) {
477 				list_del(&entry->list);
478 				kfree(entry);
479 			}
480 			break;
481 		}
482 	}
483 	mutex_unlock(&priv->nfp_ipv4_off_lock);
484 
485 	nfp_tun_write_ipv4_list(app);
486 }
487 
488 void nfp_tunnel_write_macs(struct nfp_app *app)
489 {
490 	struct nfp_flower_priv *priv = app->priv;
491 	struct nfp_tun_mac_offload_entry *entry;
492 	struct nfp_tun_mac_addr *payload;
493 	struct list_head *ptr, *storage;
494 	int mac_count, err, pay_size;
495 
496 	mutex_lock(&priv->nfp_mac_off_lock);
497 	if (!priv->nfp_mac_off_count) {
498 		mutex_unlock(&priv->nfp_mac_off_lock);
499 		return;
500 	}
501 
502 	pay_size = sizeof(struct nfp_tun_mac_addr) +
503 		   sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
504 
505 	payload = kzalloc(pay_size, GFP_KERNEL);
506 	if (!payload) {
507 		mutex_unlock(&priv->nfp_mac_off_lock);
508 		return;
509 	}
510 
511 	payload->count = cpu_to_be16(priv->nfp_mac_off_count);
512 
513 	mac_count = 0;
514 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
515 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
516 				   list);
517 		payload->addresses[mac_count].index = entry->index;
518 		ether_addr_copy(payload->addresses[mac_count].addr,
519 				entry->addr);
520 		mac_count++;
521 	}
522 
523 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
524 				       pay_size, payload, GFP_KERNEL);
525 
526 	kfree(payload);
527 
528 	if (err) {
529 		mutex_unlock(&priv->nfp_mac_off_lock);
530 		/* Write failed so retain list for future retry. */
531 		return;
532 	}
533 
534 	/* If list was successfully offloaded, flush it. */
535 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
536 		entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
537 				   list);
538 		list_del(&entry->list);
539 		kfree(entry);
540 	}
541 
542 	priv->nfp_mac_off_count = 0;
543 	mutex_unlock(&priv->nfp_mac_off_lock);
544 }
545 
546 static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
547 {
548 	struct nfp_flower_priv *priv = app->priv;
549 	struct nfp_tun_mac_non_nfp_idx *entry;
550 	struct list_head *ptr, *storage;
551 	int idx;
552 
553 	mutex_lock(&priv->nfp_mac_index_lock);
554 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
555 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
556 		if (entry->ifindex == ifindex) {
557 			idx = entry->index;
558 			mutex_unlock(&priv->nfp_mac_index_lock);
559 			return idx;
560 		}
561 	}
562 
563 	idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
564 			     NFP_MAX_MAC_INDEX, GFP_KERNEL);
565 	if (idx < 0) {
566 		mutex_unlock(&priv->nfp_mac_index_lock);
567 		return idx;
568 	}
569 
570 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
571 	if (!entry) {
572 		mutex_unlock(&priv->nfp_mac_index_lock);
573 		return -ENOMEM;
574 	}
575 	entry->ifindex = ifindex;
576 	entry->index = idx;
577 	list_add_tail(&entry->list, &priv->nfp_mac_index_list);
578 	mutex_unlock(&priv->nfp_mac_index_lock);
579 
580 	return idx;
581 }
582 
583 static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
584 {
585 	struct nfp_flower_priv *priv = app->priv;
586 	struct nfp_tun_mac_non_nfp_idx *entry;
587 	struct list_head *ptr, *storage;
588 
589 	mutex_lock(&priv->nfp_mac_index_lock);
590 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
591 		entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
592 		if (entry->ifindex == ifindex) {
593 			ida_simple_remove(&priv->nfp_mac_off_ids,
594 					  entry->index);
595 			list_del(&entry->list);
596 			kfree(entry);
597 			break;
598 		}
599 	}
600 	mutex_unlock(&priv->nfp_mac_index_lock);
601 }
602 
603 static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
604 					    struct nfp_app *app)
605 {
606 	struct nfp_flower_priv *priv = app->priv;
607 	struct nfp_tun_mac_offload_entry *entry;
608 	u16 nfp_mac_idx;
609 	int port = 0;
610 
611 	/* Check if MAC should be offloaded. */
612 	if (!is_valid_ether_addr(netdev->dev_addr))
613 		return;
614 
615 	if (nfp_netdev_is_nfp_repr(netdev))
616 		port = nfp_repr_get_port_id(netdev);
617 	else if (!nfp_tun_is_netdev_to_offload(netdev))
618 		return;
619 
620 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
621 	if (!entry) {
622 		nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
623 		return;
624 	}
625 
626 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
627 	    NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
628 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
629 	} else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
630 		   NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
631 		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
632 		nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
633 	} else {
634 		/* Must assign our own unique 8-bit index. */
635 		int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
636 
637 		if (idx < 0) {
638 			nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
639 			kfree(entry);
640 			return;
641 		}
642 		nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
643 	}
644 
645 	entry->index = cpu_to_be16(nfp_mac_idx);
646 	ether_addr_copy(entry->addr, netdev->dev_addr);
647 
648 	mutex_lock(&priv->nfp_mac_off_lock);
649 	priv->nfp_mac_off_count++;
650 	list_add_tail(&entry->list, &priv->nfp_mac_off_list);
651 	mutex_unlock(&priv->nfp_mac_off_lock);
652 }
653 
654 static int nfp_tun_mac_event_handler(struct notifier_block *nb,
655 				     unsigned long event, void *ptr)
656 {
657 	struct nfp_flower_priv *app_priv;
658 	struct net_device *netdev;
659 	struct nfp_app *app;
660 
661 	if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
662 		app_priv = container_of(nb, struct nfp_flower_priv,
663 					nfp_tun_mac_nb);
664 		app = app_priv->app;
665 		netdev = netdev_notifier_info_to_dev(ptr);
666 
667 		/* If non-nfp netdev then free its offload index. */
668 		if (nfp_tun_is_netdev_to_offload(netdev))
669 			nfp_tun_del_mac_idx(app, netdev->ifindex);
670 	} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
671 		   event == NETDEV_REGISTER) {
672 		app_priv = container_of(nb, struct nfp_flower_priv,
673 					nfp_tun_mac_nb);
674 		app = app_priv->app;
675 		netdev = netdev_notifier_info_to_dev(ptr);
676 
677 		nfp_tun_add_to_mac_offload_list(netdev, app);
678 
679 		/* Force a list write to keep NFP up to date. */
680 		nfp_tunnel_write_macs(app);
681 	}
682 	return NOTIFY_OK;
683 }
684 
685 int nfp_tunnel_config_start(struct nfp_app *app)
686 {
687 	struct nfp_flower_priv *priv = app->priv;
688 	struct net_device *netdev;
689 	int err;
690 
691 	/* Initialise priv data for MAC offloading. */
692 	priv->nfp_mac_off_count = 0;
693 	mutex_init(&priv->nfp_mac_off_lock);
694 	INIT_LIST_HEAD(&priv->nfp_mac_off_list);
695 	priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
696 	mutex_init(&priv->nfp_mac_index_lock);
697 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
698 	ida_init(&priv->nfp_mac_off_ids);
699 
700 	/* Initialise priv data for IPv4 offloading. */
701 	mutex_init(&priv->nfp_ipv4_off_lock);
702 	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
703 
704 	/* Initialise priv data for neighbour offloading. */
705 	spin_lock_init(&priv->nfp_neigh_off_lock);
706 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
707 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
708 
709 	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
710 	if (err)
711 		goto err_free_mac_ida;
712 
713 	err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
714 	if (err)
715 		goto err_unreg_mac_nb;
716 
717 	/* Parse netdevs already registered for MACs that need offloaded. */
718 	rtnl_lock();
719 	for_each_netdev(&init_net, netdev)
720 		nfp_tun_add_to_mac_offload_list(netdev, app);
721 	rtnl_unlock();
722 
723 	return 0;
724 
725 err_unreg_mac_nb:
726 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
727 err_free_mac_ida:
728 	ida_destroy(&priv->nfp_mac_off_ids);
729 	return err;
730 }
731 
732 void nfp_tunnel_config_stop(struct nfp_app *app)
733 {
734 	struct nfp_tun_mac_offload_entry *mac_entry;
735 	struct nfp_flower_priv *priv = app->priv;
736 	struct nfp_ipv4_route_entry *route_entry;
737 	struct nfp_tun_mac_non_nfp_idx *mac_idx;
738 	struct nfp_ipv4_addr_entry *ip_entry;
739 	struct list_head *ptr, *storage;
740 
741 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
742 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
743 
744 	/* Free any memory that may be occupied by MAC list. */
745 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
746 		mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
747 				       list);
748 		list_del(&mac_entry->list);
749 		kfree(mac_entry);
750 	}
751 
752 	/* Free any memory that may be occupied by MAC index list. */
753 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
754 		mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
755 				     list);
756 		list_del(&mac_idx->list);
757 		kfree(mac_idx);
758 	}
759 
760 	ida_destroy(&priv->nfp_mac_off_ids);
761 
762 	/* Free any memory that may be occupied by ipv4 list. */
763 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
764 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
765 		list_del(&ip_entry->list);
766 		kfree(ip_entry);
767 	}
768 
769 	/* Free any memory that may be occupied by the route list. */
770 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
771 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
772 					 list);
773 		list_del(&route_entry->list);
774 		kfree(route_entry);
775 	}
776 }
777