xref: /linux/net/openvswitch/datapath.c (revision 841b86f3289dbe858daeceec36423d4ea286fac2)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53 
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "meter.h"
59 #include "vport-internal_dev.h"
60 #include "vport-netdev.h"
61 
62 unsigned int ovs_net_id __read_mostly;
63 
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67 
68 static const struct nla_policy flow_policy[];
69 
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71 	.name = OVS_FLOW_MCGROUP,
72 };
73 
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75 	.name = OVS_DATAPATH_MCGROUP,
76 };
77 
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79 	.name = OVS_VPORT_MCGROUP,
80 };
81 
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85 			    unsigned int group)
86 {
87 	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88 	       genl_has_listeners(family, genl_info_net(info), group);
89 }
90 
91 static void ovs_notify(struct genl_family *family,
92 		       struct sk_buff *skb, struct genl_info *info)
93 {
94 	genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96 
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113 
114 static DEFINE_MUTEX(ovs_mutex);
115 
116 void ovs_lock(void)
117 {
118 	mutex_lock(&ovs_mutex);
119 }
120 
121 void ovs_unlock(void)
122 {
123 	mutex_unlock(&ovs_mutex);
124 }
125 
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129 	if (debug_locks)
130 		return lockdep_is_held(&ovs_mutex);
131 	else
132 		return 1;
133 }
134 #endif
135 
136 static struct vport *new_vport(const struct vport_parms *);
137 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
138 			     const struct sw_flow_key *,
139 			     const struct dp_upcall_info *,
140 			     uint32_t cutlen);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142 				  const struct sw_flow_key *,
143 				  const struct dp_upcall_info *,
144 				  uint32_t cutlen);
145 
146 /* Must be called with rcu_read_lock or ovs_mutex. */
147 const char *ovs_dp_name(const struct datapath *dp)
148 {
149 	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
150 	return ovs_vport_name(vport);
151 }
152 
153 static int get_dpifindex(const struct datapath *dp)
154 {
155 	struct vport *local;
156 	int ifindex;
157 
158 	rcu_read_lock();
159 
160 	local = ovs_vport_rcu(dp, OVSP_LOCAL);
161 	if (local)
162 		ifindex = local->dev->ifindex;
163 	else
164 		ifindex = 0;
165 
166 	rcu_read_unlock();
167 
168 	return ifindex;
169 }
170 
171 static void destroy_dp_rcu(struct rcu_head *rcu)
172 {
173 	struct datapath *dp = container_of(rcu, struct datapath, rcu);
174 
175 	ovs_flow_tbl_destroy(&dp->table);
176 	free_percpu(dp->stats_percpu);
177 	kfree(dp->ports);
178 	ovs_meters_exit(dp);
179 	kfree(dp);
180 }
181 
182 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
183 					    u16 port_no)
184 {
185 	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
186 }
187 
188 /* Called with ovs_mutex or RCU read lock. */
189 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
190 {
191 	struct vport *vport;
192 	struct hlist_head *head;
193 
194 	head = vport_hash_bucket(dp, port_no);
195 	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
196 		if (vport->port_no == port_no)
197 			return vport;
198 	}
199 	return NULL;
200 }
201 
202 /* Called with ovs_mutex. */
203 static struct vport *new_vport(const struct vport_parms *parms)
204 {
205 	struct vport *vport;
206 
207 	vport = ovs_vport_add(parms);
208 	if (!IS_ERR(vport)) {
209 		struct datapath *dp = parms->dp;
210 		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
211 
212 		hlist_add_head_rcu(&vport->dp_hash_node, head);
213 	}
214 	return vport;
215 }
216 
217 void ovs_dp_detach_port(struct vport *p)
218 {
219 	ASSERT_OVSL();
220 
221 	/* First drop references to device. */
222 	hlist_del_rcu(&p->dp_hash_node);
223 
224 	/* Then destroy it. */
225 	ovs_vport_del(p);
226 }
227 
228 /* Must be called with rcu_read_lock. */
229 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
230 {
231 	const struct vport *p = OVS_CB(skb)->input_vport;
232 	struct datapath *dp = p->dp;
233 	struct sw_flow *flow;
234 	struct sw_flow_actions *sf_acts;
235 	struct dp_stats_percpu *stats;
236 	u64 *stats_counter;
237 	u32 n_mask_hit;
238 
239 	stats = this_cpu_ptr(dp->stats_percpu);
240 
241 	/* Look up flow. */
242 	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
243 	if (unlikely(!flow)) {
244 		struct dp_upcall_info upcall;
245 		int error;
246 
247 		memset(&upcall, 0, sizeof(upcall));
248 		upcall.cmd = OVS_PACKET_CMD_MISS;
249 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
250 		upcall.mru = OVS_CB(skb)->mru;
251 		error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
252 		if (unlikely(error))
253 			kfree_skb(skb);
254 		else
255 			consume_skb(skb);
256 		stats_counter = &stats->n_missed;
257 		goto out;
258 	}
259 
260 	ovs_flow_stats_update(flow, key->tp.flags, skb);
261 	sf_acts = rcu_dereference(flow->sf_acts);
262 	ovs_execute_actions(dp, skb, sf_acts, key);
263 
264 	stats_counter = &stats->n_hit;
265 
266 out:
267 	/* Update datapath statistics. */
268 	u64_stats_update_begin(&stats->syncp);
269 	(*stats_counter)++;
270 	stats->n_mask_hit += n_mask_hit;
271 	u64_stats_update_end(&stats->syncp);
272 }
273 
274 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
275 		  const struct sw_flow_key *key,
276 		  const struct dp_upcall_info *upcall_info,
277 		  uint32_t cutlen)
278 {
279 	struct dp_stats_percpu *stats;
280 	int err;
281 
282 	if (upcall_info->portid == 0) {
283 		err = -ENOTCONN;
284 		goto err;
285 	}
286 
287 	if (!skb_is_gso(skb))
288 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
289 	else
290 		err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
291 	if (err)
292 		goto err;
293 
294 	return 0;
295 
296 err:
297 	stats = this_cpu_ptr(dp->stats_percpu);
298 
299 	u64_stats_update_begin(&stats->syncp);
300 	stats->n_lost++;
301 	u64_stats_update_end(&stats->syncp);
302 
303 	return err;
304 }
305 
306 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
307 			     const struct sw_flow_key *key,
308 			     const struct dp_upcall_info *upcall_info,
309 				 uint32_t cutlen)
310 {
311 	struct sk_buff *segs, *nskb;
312 	int err;
313 
314 	BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
315 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
316 	if (IS_ERR(segs))
317 		return PTR_ERR(segs);
318 	if (segs == NULL)
319 		return -EINVAL;
320 
321 	/* Queue all of the segments. */
322 	skb = segs;
323 	do {
324 		err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
325 		if (err)
326 			break;
327 
328 	} while ((skb = skb->next));
329 
330 	/* Free all of the segments. */
331 	skb = segs;
332 	do {
333 		nskb = skb->next;
334 		if (err)
335 			kfree_skb(skb);
336 		else
337 			consume_skb(skb);
338 	} while ((skb = nskb));
339 	return err;
340 }
341 
342 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
343 			      unsigned int hdrlen, int actions_attrlen)
344 {
345 	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
346 		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
347 		+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
348 		+ nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
349 
350 	/* OVS_PACKET_ATTR_USERDATA */
351 	if (upcall_info->userdata)
352 		size += NLA_ALIGN(upcall_info->userdata->nla_len);
353 
354 	/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
355 	if (upcall_info->egress_tun_info)
356 		size += nla_total_size(ovs_tun_key_attr_size());
357 
358 	/* OVS_PACKET_ATTR_ACTIONS */
359 	if (upcall_info->actions_len)
360 		size += nla_total_size(actions_attrlen);
361 
362 	/* OVS_PACKET_ATTR_MRU */
363 	if (upcall_info->mru)
364 		size += nla_total_size(sizeof(upcall_info->mru));
365 
366 	return size;
367 }
368 
369 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
370 {
371 	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
372 		size_t plen = NLA_ALIGN(skb->len) - skb->len;
373 
374 		if (plen > 0)
375 			skb_put_zero(skb, plen);
376 	}
377 }
378 
379 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
380 				  const struct sw_flow_key *key,
381 				  const struct dp_upcall_info *upcall_info,
382 				  uint32_t cutlen)
383 {
384 	struct ovs_header *upcall;
385 	struct sk_buff *nskb = NULL;
386 	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
387 	struct nlattr *nla;
388 	size_t len;
389 	unsigned int hlen;
390 	int err, dp_ifindex;
391 
392 	dp_ifindex = get_dpifindex(dp);
393 	if (!dp_ifindex)
394 		return -ENODEV;
395 
396 	if (skb_vlan_tag_present(skb)) {
397 		nskb = skb_clone(skb, GFP_ATOMIC);
398 		if (!nskb)
399 			return -ENOMEM;
400 
401 		nskb = __vlan_hwaccel_push_inside(nskb);
402 		if (!nskb)
403 			return -ENOMEM;
404 
405 		skb = nskb;
406 	}
407 
408 	if (nla_attr_size(skb->len) > USHRT_MAX) {
409 		err = -EFBIG;
410 		goto out;
411 	}
412 
413 	/* Complete checksum if needed */
414 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
415 	    (err = skb_csum_hwoffload_help(skb, 0)))
416 		goto out;
417 
418 	/* Older versions of OVS user space enforce alignment of the last
419 	 * Netlink attribute to NLA_ALIGNTO which would require extensive
420 	 * padding logic. Only perform zerocopy if padding is not required.
421 	 */
422 	if (dp->user_features & OVS_DP_F_UNALIGNED)
423 		hlen = skb_zerocopy_headlen(skb);
424 	else
425 		hlen = skb->len;
426 
427 	len = upcall_msg_size(upcall_info, hlen - cutlen,
428 			      OVS_CB(skb)->acts_origlen);
429 	user_skb = genlmsg_new(len, GFP_ATOMIC);
430 	if (!user_skb) {
431 		err = -ENOMEM;
432 		goto out;
433 	}
434 
435 	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
436 			     0, upcall_info->cmd);
437 	upcall->dp_ifindex = dp_ifindex;
438 
439 	err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
440 	BUG_ON(err);
441 
442 	if (upcall_info->userdata)
443 		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
444 			  nla_len(upcall_info->userdata),
445 			  nla_data(upcall_info->userdata));
446 
447 	if (upcall_info->egress_tun_info) {
448 		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
449 		err = ovs_nla_put_tunnel_info(user_skb,
450 					      upcall_info->egress_tun_info);
451 		BUG_ON(err);
452 		nla_nest_end(user_skb, nla);
453 	}
454 
455 	if (upcall_info->actions_len) {
456 		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
457 		err = ovs_nla_put_actions(upcall_info->actions,
458 					  upcall_info->actions_len,
459 					  user_skb);
460 		if (!err)
461 			nla_nest_end(user_skb, nla);
462 		else
463 			nla_nest_cancel(user_skb, nla);
464 	}
465 
466 	/* Add OVS_PACKET_ATTR_MRU */
467 	if (upcall_info->mru) {
468 		if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
469 				upcall_info->mru)) {
470 			err = -ENOBUFS;
471 			goto out;
472 		}
473 		pad_packet(dp, user_skb);
474 	}
475 
476 	/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
477 	if (cutlen > 0) {
478 		if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
479 				skb->len)) {
480 			err = -ENOBUFS;
481 			goto out;
482 		}
483 		pad_packet(dp, user_skb);
484 	}
485 
486 	/* Only reserve room for attribute header, packet data is added
487 	 * in skb_zerocopy() */
488 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
489 		err = -ENOBUFS;
490 		goto out;
491 	}
492 	nla->nla_len = nla_attr_size(skb->len - cutlen);
493 
494 	err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
495 	if (err)
496 		goto out;
497 
498 	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
499 	pad_packet(dp, user_skb);
500 
501 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
502 
503 	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
504 	user_skb = NULL;
505 out:
506 	if (err)
507 		skb_tx_error(skb);
508 	kfree_skb(user_skb);
509 	kfree_skb(nskb);
510 	return err;
511 }
512 
513 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
514 {
515 	struct ovs_header *ovs_header = info->userhdr;
516 	struct net *net = sock_net(skb->sk);
517 	struct nlattr **a = info->attrs;
518 	struct sw_flow_actions *acts;
519 	struct sk_buff *packet;
520 	struct sw_flow *flow;
521 	struct sw_flow_actions *sf_acts;
522 	struct datapath *dp;
523 	struct vport *input_vport;
524 	u16 mru = 0;
525 	int len;
526 	int err;
527 	bool log = !a[OVS_PACKET_ATTR_PROBE];
528 
529 	err = -EINVAL;
530 	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
531 	    !a[OVS_PACKET_ATTR_ACTIONS])
532 		goto err;
533 
534 	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
535 	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
536 	err = -ENOMEM;
537 	if (!packet)
538 		goto err;
539 	skb_reserve(packet, NET_IP_ALIGN);
540 
541 	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
542 
543 	/* Set packet's mru */
544 	if (a[OVS_PACKET_ATTR_MRU]) {
545 		mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
546 		packet->ignore_df = 1;
547 	}
548 	OVS_CB(packet)->mru = mru;
549 
550 	/* Build an sw_flow for sending this packet. */
551 	flow = ovs_flow_alloc();
552 	err = PTR_ERR(flow);
553 	if (IS_ERR(flow))
554 		goto err_kfree_skb;
555 
556 	err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
557 					     packet, &flow->key, log);
558 	if (err)
559 		goto err_flow_free;
560 
561 	err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
562 				   &flow->key, &acts, log);
563 	if (err)
564 		goto err_flow_free;
565 
566 	rcu_assign_pointer(flow->sf_acts, acts);
567 	packet->priority = flow->key.phy.priority;
568 	packet->mark = flow->key.phy.skb_mark;
569 
570 	rcu_read_lock();
571 	dp = get_dp_rcu(net, ovs_header->dp_ifindex);
572 	err = -ENODEV;
573 	if (!dp)
574 		goto err_unlock;
575 
576 	input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
577 	if (!input_vport)
578 		input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
579 
580 	if (!input_vport)
581 		goto err_unlock;
582 
583 	packet->dev = input_vport->dev;
584 	OVS_CB(packet)->input_vport = input_vport;
585 	sf_acts = rcu_dereference(flow->sf_acts);
586 
587 	local_bh_disable();
588 	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
589 	local_bh_enable();
590 	rcu_read_unlock();
591 
592 	ovs_flow_free(flow, false);
593 	return err;
594 
595 err_unlock:
596 	rcu_read_unlock();
597 err_flow_free:
598 	ovs_flow_free(flow, false);
599 err_kfree_skb:
600 	kfree_skb(packet);
601 err:
602 	return err;
603 }
604 
605 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
606 	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
607 	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
608 	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
609 	[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
610 	[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
611 };
612 
613 static const struct genl_ops dp_packet_genl_ops[] = {
614 	{ .cmd = OVS_PACKET_CMD_EXECUTE,
615 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
616 	  .policy = packet_policy,
617 	  .doit = ovs_packet_cmd_execute
618 	}
619 };
620 
621 static struct genl_family dp_packet_genl_family __ro_after_init = {
622 	.hdrsize = sizeof(struct ovs_header),
623 	.name = OVS_PACKET_FAMILY,
624 	.version = OVS_PACKET_VERSION,
625 	.maxattr = OVS_PACKET_ATTR_MAX,
626 	.netnsok = true,
627 	.parallel_ops = true,
628 	.ops = dp_packet_genl_ops,
629 	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
630 	.module = THIS_MODULE,
631 };
632 
633 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
634 			 struct ovs_dp_megaflow_stats *mega_stats)
635 {
636 	int i;
637 
638 	memset(mega_stats, 0, sizeof(*mega_stats));
639 
640 	stats->n_flows = ovs_flow_tbl_count(&dp->table);
641 	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
642 
643 	stats->n_hit = stats->n_missed = stats->n_lost = 0;
644 
645 	for_each_possible_cpu(i) {
646 		const struct dp_stats_percpu *percpu_stats;
647 		struct dp_stats_percpu local_stats;
648 		unsigned int start;
649 
650 		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
651 
652 		do {
653 			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
654 			local_stats = *percpu_stats;
655 		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
656 
657 		stats->n_hit += local_stats.n_hit;
658 		stats->n_missed += local_stats.n_missed;
659 		stats->n_lost += local_stats.n_lost;
660 		mega_stats->n_mask_hit += local_stats.n_mask_hit;
661 	}
662 }
663 
664 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
665 {
666 	return ovs_identifier_is_ufid(sfid) &&
667 	       !(ufid_flags & OVS_UFID_F_OMIT_KEY);
668 }
669 
670 static bool should_fill_mask(uint32_t ufid_flags)
671 {
672 	return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
673 }
674 
675 static bool should_fill_actions(uint32_t ufid_flags)
676 {
677 	return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
678 }
679 
680 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
681 				    const struct sw_flow_id *sfid,
682 				    uint32_t ufid_flags)
683 {
684 	size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
685 
686 	/* OVS_FLOW_ATTR_UFID */
687 	if (sfid && ovs_identifier_is_ufid(sfid))
688 		len += nla_total_size(sfid->ufid_len);
689 
690 	/* OVS_FLOW_ATTR_KEY */
691 	if (!sfid || should_fill_key(sfid, ufid_flags))
692 		len += nla_total_size(ovs_key_attr_size());
693 
694 	/* OVS_FLOW_ATTR_MASK */
695 	if (should_fill_mask(ufid_flags))
696 		len += nla_total_size(ovs_key_attr_size());
697 
698 	/* OVS_FLOW_ATTR_ACTIONS */
699 	if (should_fill_actions(ufid_flags))
700 		len += nla_total_size(acts->orig_len);
701 
702 	return len
703 		+ nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
704 		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
705 		+ nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
706 }
707 
708 /* Called with ovs_mutex or RCU read lock. */
709 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
710 				   struct sk_buff *skb)
711 {
712 	struct ovs_flow_stats stats;
713 	__be16 tcp_flags;
714 	unsigned long used;
715 
716 	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
717 
718 	if (used &&
719 	    nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
720 			      OVS_FLOW_ATTR_PAD))
721 		return -EMSGSIZE;
722 
723 	if (stats.n_packets &&
724 	    nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
725 			  sizeof(struct ovs_flow_stats), &stats,
726 			  OVS_FLOW_ATTR_PAD))
727 		return -EMSGSIZE;
728 
729 	if ((u8)ntohs(tcp_flags) &&
730 	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
731 		return -EMSGSIZE;
732 
733 	return 0;
734 }
735 
736 /* Called with ovs_mutex or RCU read lock. */
737 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
738 				     struct sk_buff *skb, int skb_orig_len)
739 {
740 	struct nlattr *start;
741 	int err;
742 
743 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
744 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
745 	 * Netlink but individual action lists can be longer than
746 	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
747 	 * The userspace caller can always fetch the actions separately if it
748 	 * really wants them.  (Most userspace callers in fact don't care.)
749 	 *
750 	 * This can only fail for dump operations because the skb is always
751 	 * properly sized for single flows.
752 	 */
753 	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
754 	if (start) {
755 		const struct sw_flow_actions *sf_acts;
756 
757 		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
758 		err = ovs_nla_put_actions(sf_acts->actions,
759 					  sf_acts->actions_len, skb);
760 
761 		if (!err)
762 			nla_nest_end(skb, start);
763 		else {
764 			if (skb_orig_len)
765 				return err;
766 
767 			nla_nest_cancel(skb, start);
768 		}
769 	} else if (skb_orig_len) {
770 		return -EMSGSIZE;
771 	}
772 
773 	return 0;
774 }
775 
776 /* Called with ovs_mutex or RCU read lock. */
777 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
778 				  struct sk_buff *skb, u32 portid,
779 				  u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
780 {
781 	const int skb_orig_len = skb->len;
782 	struct ovs_header *ovs_header;
783 	int err;
784 
785 	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
786 				 flags, cmd);
787 	if (!ovs_header)
788 		return -EMSGSIZE;
789 
790 	ovs_header->dp_ifindex = dp_ifindex;
791 
792 	err = ovs_nla_put_identifier(flow, skb);
793 	if (err)
794 		goto error;
795 
796 	if (should_fill_key(&flow->id, ufid_flags)) {
797 		err = ovs_nla_put_masked_key(flow, skb);
798 		if (err)
799 			goto error;
800 	}
801 
802 	if (should_fill_mask(ufid_flags)) {
803 		err = ovs_nla_put_mask(flow, skb);
804 		if (err)
805 			goto error;
806 	}
807 
808 	err = ovs_flow_cmd_fill_stats(flow, skb);
809 	if (err)
810 		goto error;
811 
812 	if (should_fill_actions(ufid_flags)) {
813 		err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
814 		if (err)
815 			goto error;
816 	}
817 
818 	genlmsg_end(skb, ovs_header);
819 	return 0;
820 
821 error:
822 	genlmsg_cancel(skb, ovs_header);
823 	return err;
824 }
825 
826 /* May not be called with RCU read lock. */
827 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
828 					       const struct sw_flow_id *sfid,
829 					       struct genl_info *info,
830 					       bool always,
831 					       uint32_t ufid_flags)
832 {
833 	struct sk_buff *skb;
834 	size_t len;
835 
836 	if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
837 		return NULL;
838 
839 	len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
840 	skb = genlmsg_new(len, GFP_KERNEL);
841 	if (!skb)
842 		return ERR_PTR(-ENOMEM);
843 
844 	return skb;
845 }
846 
847 /* Called with ovs_mutex. */
848 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
849 					       int dp_ifindex,
850 					       struct genl_info *info, u8 cmd,
851 					       bool always, u32 ufid_flags)
852 {
853 	struct sk_buff *skb;
854 	int retval;
855 
856 	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
857 				      &flow->id, info, always, ufid_flags);
858 	if (IS_ERR_OR_NULL(skb))
859 		return skb;
860 
861 	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
862 					info->snd_portid, info->snd_seq, 0,
863 					cmd, ufid_flags);
864 	BUG_ON(retval < 0);
865 	return skb;
866 }
867 
868 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
869 {
870 	struct net *net = sock_net(skb->sk);
871 	struct nlattr **a = info->attrs;
872 	struct ovs_header *ovs_header = info->userhdr;
873 	struct sw_flow *flow = NULL, *new_flow;
874 	struct sw_flow_mask mask;
875 	struct sk_buff *reply;
876 	struct datapath *dp;
877 	struct sw_flow_actions *acts;
878 	struct sw_flow_match match;
879 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
880 	int error;
881 	bool log = !a[OVS_FLOW_ATTR_PROBE];
882 
883 	/* Must have key and actions. */
884 	error = -EINVAL;
885 	if (!a[OVS_FLOW_ATTR_KEY]) {
886 		OVS_NLERR(log, "Flow key attr not present in new flow.");
887 		goto error;
888 	}
889 	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
890 		OVS_NLERR(log, "Flow actions attr not present in new flow.");
891 		goto error;
892 	}
893 
894 	/* Most of the time we need to allocate a new flow, do it before
895 	 * locking.
896 	 */
897 	new_flow = ovs_flow_alloc();
898 	if (IS_ERR(new_flow)) {
899 		error = PTR_ERR(new_flow);
900 		goto error;
901 	}
902 
903 	/* Extract key. */
904 	ovs_match_init(&match, &new_flow->key, false, &mask);
905 	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
906 				  a[OVS_FLOW_ATTR_MASK], log);
907 	if (error)
908 		goto err_kfree_flow;
909 
910 	/* Extract flow identifier. */
911 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
912 				       &new_flow->key, log);
913 	if (error)
914 		goto err_kfree_flow;
915 
916 	/* unmasked key is needed to match when ufid is not used. */
917 	if (ovs_identifier_is_key(&new_flow->id))
918 		match.key = new_flow->id.unmasked_key;
919 
920 	ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
921 
922 	/* Validate actions. */
923 	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
924 				     &new_flow->key, &acts, log);
925 	if (error) {
926 		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
927 		goto err_kfree_flow;
928 	}
929 
930 	reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
931 					ufid_flags);
932 	if (IS_ERR(reply)) {
933 		error = PTR_ERR(reply);
934 		goto err_kfree_acts;
935 	}
936 
937 	ovs_lock();
938 	dp = get_dp(net, ovs_header->dp_ifindex);
939 	if (unlikely(!dp)) {
940 		error = -ENODEV;
941 		goto err_unlock_ovs;
942 	}
943 
944 	/* Check if this is a duplicate flow */
945 	if (ovs_identifier_is_ufid(&new_flow->id))
946 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
947 	if (!flow)
948 		flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
949 	if (likely(!flow)) {
950 		rcu_assign_pointer(new_flow->sf_acts, acts);
951 
952 		/* Put flow in bucket. */
953 		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
954 		if (unlikely(error)) {
955 			acts = NULL;
956 			goto err_unlock_ovs;
957 		}
958 
959 		if (unlikely(reply)) {
960 			error = ovs_flow_cmd_fill_info(new_flow,
961 						       ovs_header->dp_ifindex,
962 						       reply, info->snd_portid,
963 						       info->snd_seq, 0,
964 						       OVS_FLOW_CMD_NEW,
965 						       ufid_flags);
966 			BUG_ON(error < 0);
967 		}
968 		ovs_unlock();
969 	} else {
970 		struct sw_flow_actions *old_acts;
971 
972 		/* Bail out if we're not allowed to modify an existing flow.
973 		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
974 		 * because Generic Netlink treats the latter as a dump
975 		 * request.  We also accept NLM_F_EXCL in case that bug ever
976 		 * gets fixed.
977 		 */
978 		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
979 							 | NLM_F_EXCL))) {
980 			error = -EEXIST;
981 			goto err_unlock_ovs;
982 		}
983 		/* The flow identifier has to be the same for flow updates.
984 		 * Look for any overlapping flow.
985 		 */
986 		if (unlikely(!ovs_flow_cmp(flow, &match))) {
987 			if (ovs_identifier_is_key(&flow->id))
988 				flow = ovs_flow_tbl_lookup_exact(&dp->table,
989 								 &match);
990 			else /* UFID matches but key is different */
991 				flow = NULL;
992 			if (!flow) {
993 				error = -ENOENT;
994 				goto err_unlock_ovs;
995 			}
996 		}
997 		/* Update actions. */
998 		old_acts = ovsl_dereference(flow->sf_acts);
999 		rcu_assign_pointer(flow->sf_acts, acts);
1000 
1001 		if (unlikely(reply)) {
1002 			error = ovs_flow_cmd_fill_info(flow,
1003 						       ovs_header->dp_ifindex,
1004 						       reply, info->snd_portid,
1005 						       info->snd_seq, 0,
1006 						       OVS_FLOW_CMD_NEW,
1007 						       ufid_flags);
1008 			BUG_ON(error < 0);
1009 		}
1010 		ovs_unlock();
1011 
1012 		ovs_nla_free_flow_actions_rcu(old_acts);
1013 		ovs_flow_free(new_flow, false);
1014 	}
1015 
1016 	if (reply)
1017 		ovs_notify(&dp_flow_genl_family, reply, info);
1018 	return 0;
1019 
1020 err_unlock_ovs:
1021 	ovs_unlock();
1022 	kfree_skb(reply);
1023 err_kfree_acts:
1024 	ovs_nla_free_flow_actions(acts);
1025 err_kfree_flow:
1026 	ovs_flow_free(new_flow, false);
1027 error:
1028 	return error;
1029 }
1030 
1031 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1032 static struct sw_flow_actions *get_flow_actions(struct net *net,
1033 						const struct nlattr *a,
1034 						const struct sw_flow_key *key,
1035 						const struct sw_flow_mask *mask,
1036 						bool log)
1037 {
1038 	struct sw_flow_actions *acts;
1039 	struct sw_flow_key masked_key;
1040 	int error;
1041 
1042 	ovs_flow_mask_key(&masked_key, key, true, mask);
1043 	error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1044 	if (error) {
1045 		OVS_NLERR(log,
1046 			  "Actions may not be safe on all matching packets");
1047 		return ERR_PTR(error);
1048 	}
1049 
1050 	return acts;
1051 }
1052 
1053 /* Factor out match-init and action-copy to avoid
1054  * "Wframe-larger-than=1024" warning. Because mask is only
1055  * used to get actions, we new a function to save some
1056  * stack space.
1057  *
1058  * If there are not key and action attrs, we return 0
1059  * directly. In the case, the caller will also not use the
1060  * match as before. If there is action attr, we try to get
1061  * actions and save them to *acts. Before returning from
1062  * the function, we reset the match->mask pointer. Because
1063  * we should not to return match object with dangling reference
1064  * to mask.
1065  * */
1066 static int ovs_nla_init_match_and_action(struct net *net,
1067 					 struct sw_flow_match *match,
1068 					 struct sw_flow_key *key,
1069 					 struct nlattr **a,
1070 					 struct sw_flow_actions **acts,
1071 					 bool log)
1072 {
1073 	struct sw_flow_mask mask;
1074 	int error = 0;
1075 
1076 	if (a[OVS_FLOW_ATTR_KEY]) {
1077 		ovs_match_init(match, key, true, &mask);
1078 		error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1079 					  a[OVS_FLOW_ATTR_MASK], log);
1080 		if (error)
1081 			goto error;
1082 	}
1083 
1084 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
1085 		if (!a[OVS_FLOW_ATTR_KEY]) {
1086 			OVS_NLERR(log,
1087 				  "Flow key attribute not present in set flow.");
1088 			error = -EINVAL;
1089 			goto error;
1090 		}
1091 
1092 		*acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1093 					 &mask, log);
1094 		if (IS_ERR(*acts)) {
1095 			error = PTR_ERR(*acts);
1096 			goto error;
1097 		}
1098 	}
1099 
1100 	/* On success, error is 0. */
1101 error:
1102 	match->mask = NULL;
1103 	return error;
1104 }
1105 
1106 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1107 {
1108 	struct net *net = sock_net(skb->sk);
1109 	struct nlattr **a = info->attrs;
1110 	struct ovs_header *ovs_header = info->userhdr;
1111 	struct sw_flow_key key;
1112 	struct sw_flow *flow;
1113 	struct sk_buff *reply = NULL;
1114 	struct datapath *dp;
1115 	struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1116 	struct sw_flow_match match;
1117 	struct sw_flow_id sfid;
1118 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1119 	int error = 0;
1120 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1121 	bool ufid_present;
1122 
1123 	ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1124 	if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1125 		OVS_NLERR(log,
1126 			  "Flow set message rejected, Key attribute missing.");
1127 		return -EINVAL;
1128 	}
1129 
1130 	error = ovs_nla_init_match_and_action(net, &match, &key, a,
1131 					      &acts, log);
1132 	if (error)
1133 		goto error;
1134 
1135 	if (acts) {
1136 		/* Can allocate before locking if have acts. */
1137 		reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1138 						ufid_flags);
1139 		if (IS_ERR(reply)) {
1140 			error = PTR_ERR(reply);
1141 			goto err_kfree_acts;
1142 		}
1143 	}
1144 
1145 	ovs_lock();
1146 	dp = get_dp(net, ovs_header->dp_ifindex);
1147 	if (unlikely(!dp)) {
1148 		error = -ENODEV;
1149 		goto err_unlock_ovs;
1150 	}
1151 	/* Check that the flow exists. */
1152 	if (ufid_present)
1153 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1154 	else
1155 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1156 	if (unlikely(!flow)) {
1157 		error = -ENOENT;
1158 		goto err_unlock_ovs;
1159 	}
1160 
1161 	/* Update actions, if present. */
1162 	if (likely(acts)) {
1163 		old_acts = ovsl_dereference(flow->sf_acts);
1164 		rcu_assign_pointer(flow->sf_acts, acts);
1165 
1166 		if (unlikely(reply)) {
1167 			error = ovs_flow_cmd_fill_info(flow,
1168 						       ovs_header->dp_ifindex,
1169 						       reply, info->snd_portid,
1170 						       info->snd_seq, 0,
1171 						       OVS_FLOW_CMD_NEW,
1172 						       ufid_flags);
1173 			BUG_ON(error < 0);
1174 		}
1175 	} else {
1176 		/* Could not alloc without acts before locking. */
1177 		reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1178 						info, OVS_FLOW_CMD_NEW, false,
1179 						ufid_flags);
1180 
1181 		if (IS_ERR(reply)) {
1182 			error = PTR_ERR(reply);
1183 			goto err_unlock_ovs;
1184 		}
1185 	}
1186 
1187 	/* Clear stats. */
1188 	if (a[OVS_FLOW_ATTR_CLEAR])
1189 		ovs_flow_stats_clear(flow);
1190 	ovs_unlock();
1191 
1192 	if (reply)
1193 		ovs_notify(&dp_flow_genl_family, reply, info);
1194 	if (old_acts)
1195 		ovs_nla_free_flow_actions_rcu(old_acts);
1196 
1197 	return 0;
1198 
1199 err_unlock_ovs:
1200 	ovs_unlock();
1201 	kfree_skb(reply);
1202 err_kfree_acts:
1203 	ovs_nla_free_flow_actions(acts);
1204 error:
1205 	return error;
1206 }
1207 
1208 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1209 {
1210 	struct nlattr **a = info->attrs;
1211 	struct ovs_header *ovs_header = info->userhdr;
1212 	struct net *net = sock_net(skb->sk);
1213 	struct sw_flow_key key;
1214 	struct sk_buff *reply;
1215 	struct sw_flow *flow;
1216 	struct datapath *dp;
1217 	struct sw_flow_match match;
1218 	struct sw_flow_id ufid;
1219 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1220 	int err = 0;
1221 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1222 	bool ufid_present;
1223 
1224 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1225 	if (a[OVS_FLOW_ATTR_KEY]) {
1226 		ovs_match_init(&match, &key, true, NULL);
1227 		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1228 					log);
1229 	} else if (!ufid_present) {
1230 		OVS_NLERR(log,
1231 			  "Flow get message rejected, Key attribute missing.");
1232 		err = -EINVAL;
1233 	}
1234 	if (err)
1235 		return err;
1236 
1237 	ovs_lock();
1238 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1239 	if (!dp) {
1240 		err = -ENODEV;
1241 		goto unlock;
1242 	}
1243 
1244 	if (ufid_present)
1245 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1246 	else
1247 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1248 	if (!flow) {
1249 		err = -ENOENT;
1250 		goto unlock;
1251 	}
1252 
1253 	reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1254 					OVS_FLOW_CMD_NEW, true, ufid_flags);
1255 	if (IS_ERR(reply)) {
1256 		err = PTR_ERR(reply);
1257 		goto unlock;
1258 	}
1259 
1260 	ovs_unlock();
1261 	return genlmsg_reply(reply, info);
1262 unlock:
1263 	ovs_unlock();
1264 	return err;
1265 }
1266 
1267 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1268 {
1269 	struct nlattr **a = info->attrs;
1270 	struct ovs_header *ovs_header = info->userhdr;
1271 	struct net *net = sock_net(skb->sk);
1272 	struct sw_flow_key key;
1273 	struct sk_buff *reply;
1274 	struct sw_flow *flow = NULL;
1275 	struct datapath *dp;
1276 	struct sw_flow_match match;
1277 	struct sw_flow_id ufid;
1278 	u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1279 	int err;
1280 	bool log = !a[OVS_FLOW_ATTR_PROBE];
1281 	bool ufid_present;
1282 
1283 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1284 	if (a[OVS_FLOW_ATTR_KEY]) {
1285 		ovs_match_init(&match, &key, true, NULL);
1286 		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1287 					NULL, log);
1288 		if (unlikely(err))
1289 			return err;
1290 	}
1291 
1292 	ovs_lock();
1293 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1294 	if (unlikely(!dp)) {
1295 		err = -ENODEV;
1296 		goto unlock;
1297 	}
1298 
1299 	if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1300 		err = ovs_flow_tbl_flush(&dp->table);
1301 		goto unlock;
1302 	}
1303 
1304 	if (ufid_present)
1305 		flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1306 	else
1307 		flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1308 	if (unlikely(!flow)) {
1309 		err = -ENOENT;
1310 		goto unlock;
1311 	}
1312 
1313 	ovs_flow_tbl_remove(&dp->table, flow);
1314 	ovs_unlock();
1315 
1316 	reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1317 					&flow->id, info, false, ufid_flags);
1318 	if (likely(reply)) {
1319 		if (likely(!IS_ERR(reply))) {
1320 			rcu_read_lock();	/*To keep RCU checker happy. */
1321 			err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1322 						     reply, info->snd_portid,
1323 						     info->snd_seq, 0,
1324 						     OVS_FLOW_CMD_DEL,
1325 						     ufid_flags);
1326 			rcu_read_unlock();
1327 			BUG_ON(err < 0);
1328 
1329 			ovs_notify(&dp_flow_genl_family, reply, info);
1330 		} else {
1331 			netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1332 		}
1333 	}
1334 
1335 	ovs_flow_free(flow, true);
1336 	return 0;
1337 unlock:
1338 	ovs_unlock();
1339 	return err;
1340 }
1341 
1342 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1343 {
1344 	struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1345 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1346 	struct table_instance *ti;
1347 	struct datapath *dp;
1348 	u32 ufid_flags;
1349 	int err;
1350 
1351 	err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1352 			    OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1353 	if (err)
1354 		return err;
1355 	ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1356 
1357 	rcu_read_lock();
1358 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1359 	if (!dp) {
1360 		rcu_read_unlock();
1361 		return -ENODEV;
1362 	}
1363 
1364 	ti = rcu_dereference(dp->table.ti);
1365 	for (;;) {
1366 		struct sw_flow *flow;
1367 		u32 bucket, obj;
1368 
1369 		bucket = cb->args[0];
1370 		obj = cb->args[1];
1371 		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1372 		if (!flow)
1373 			break;
1374 
1375 		if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1376 					   NETLINK_CB(cb->skb).portid,
1377 					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
1378 					   OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1379 			break;
1380 
1381 		cb->args[0] = bucket;
1382 		cb->args[1] = obj;
1383 	}
1384 	rcu_read_unlock();
1385 	return skb->len;
1386 }
1387 
1388 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1389 	[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1390 	[OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1391 	[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1392 	[OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1393 	[OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1394 	[OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1395 	[OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1396 };
1397 
1398 static const struct genl_ops dp_flow_genl_ops[] = {
1399 	{ .cmd = OVS_FLOW_CMD_NEW,
1400 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1401 	  .policy = flow_policy,
1402 	  .doit = ovs_flow_cmd_new
1403 	},
1404 	{ .cmd = OVS_FLOW_CMD_DEL,
1405 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1406 	  .policy = flow_policy,
1407 	  .doit = ovs_flow_cmd_del
1408 	},
1409 	{ .cmd = OVS_FLOW_CMD_GET,
1410 	  .flags = 0,		    /* OK for unprivileged users. */
1411 	  .policy = flow_policy,
1412 	  .doit = ovs_flow_cmd_get,
1413 	  .dumpit = ovs_flow_cmd_dump
1414 	},
1415 	{ .cmd = OVS_FLOW_CMD_SET,
1416 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1417 	  .policy = flow_policy,
1418 	  .doit = ovs_flow_cmd_set,
1419 	},
1420 };
1421 
1422 static struct genl_family dp_flow_genl_family __ro_after_init = {
1423 	.hdrsize = sizeof(struct ovs_header),
1424 	.name = OVS_FLOW_FAMILY,
1425 	.version = OVS_FLOW_VERSION,
1426 	.maxattr = OVS_FLOW_ATTR_MAX,
1427 	.netnsok = true,
1428 	.parallel_ops = true,
1429 	.ops = dp_flow_genl_ops,
1430 	.n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1431 	.mcgrps = &ovs_dp_flow_multicast_group,
1432 	.n_mcgrps = 1,
1433 	.module = THIS_MODULE,
1434 };
1435 
1436 static size_t ovs_dp_cmd_msg_size(void)
1437 {
1438 	size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1439 
1440 	msgsize += nla_total_size(IFNAMSIZ);
1441 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1442 	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1443 	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1444 
1445 	return msgsize;
1446 }
1447 
1448 /* Called with ovs_mutex. */
1449 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1450 				u32 portid, u32 seq, u32 flags, u8 cmd)
1451 {
1452 	struct ovs_header *ovs_header;
1453 	struct ovs_dp_stats dp_stats;
1454 	struct ovs_dp_megaflow_stats dp_megaflow_stats;
1455 	int err;
1456 
1457 	ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1458 				   flags, cmd);
1459 	if (!ovs_header)
1460 		goto error;
1461 
1462 	ovs_header->dp_ifindex = get_dpifindex(dp);
1463 
1464 	err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1465 	if (err)
1466 		goto nla_put_failure;
1467 
1468 	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1469 	if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1470 			  &dp_stats, OVS_DP_ATTR_PAD))
1471 		goto nla_put_failure;
1472 
1473 	if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1474 			  sizeof(struct ovs_dp_megaflow_stats),
1475 			  &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1476 		goto nla_put_failure;
1477 
1478 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1479 		goto nla_put_failure;
1480 
1481 	genlmsg_end(skb, ovs_header);
1482 	return 0;
1483 
1484 nla_put_failure:
1485 	genlmsg_cancel(skb, ovs_header);
1486 error:
1487 	return -EMSGSIZE;
1488 }
1489 
1490 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1491 {
1492 	return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1493 }
1494 
1495 /* Called with rcu_read_lock or ovs_mutex. */
1496 static struct datapath *lookup_datapath(struct net *net,
1497 					const struct ovs_header *ovs_header,
1498 					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1499 {
1500 	struct datapath *dp;
1501 
1502 	if (!a[OVS_DP_ATTR_NAME])
1503 		dp = get_dp(net, ovs_header->dp_ifindex);
1504 	else {
1505 		struct vport *vport;
1506 
1507 		vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1508 		dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1509 	}
1510 	return dp ? dp : ERR_PTR(-ENODEV);
1511 }
1512 
1513 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1514 {
1515 	struct datapath *dp;
1516 
1517 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1518 	if (IS_ERR(dp))
1519 		return;
1520 
1521 	WARN(dp->user_features, "Dropping previously announced user features\n");
1522 	dp->user_features = 0;
1523 }
1524 
1525 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1526 {
1527 	if (a[OVS_DP_ATTR_USER_FEATURES])
1528 		dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1529 }
1530 
1531 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1532 {
1533 	struct nlattr **a = info->attrs;
1534 	struct vport_parms parms;
1535 	struct sk_buff *reply;
1536 	struct datapath *dp;
1537 	struct vport *vport;
1538 	struct ovs_net *ovs_net;
1539 	int err, i;
1540 
1541 	err = -EINVAL;
1542 	if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1543 		goto err;
1544 
1545 	reply = ovs_dp_cmd_alloc_info();
1546 	if (!reply)
1547 		return -ENOMEM;
1548 
1549 	err = -ENOMEM;
1550 	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1551 	if (dp == NULL)
1552 		goto err_free_reply;
1553 
1554 	ovs_dp_set_net(dp, sock_net(skb->sk));
1555 
1556 	/* Allocate table. */
1557 	err = ovs_flow_tbl_init(&dp->table);
1558 	if (err)
1559 		goto err_free_dp;
1560 
1561 	dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1562 	if (!dp->stats_percpu) {
1563 		err = -ENOMEM;
1564 		goto err_destroy_table;
1565 	}
1566 
1567 	dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1568 			    GFP_KERNEL);
1569 	if (!dp->ports) {
1570 		err = -ENOMEM;
1571 		goto err_destroy_percpu;
1572 	}
1573 
1574 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1575 		INIT_HLIST_HEAD(&dp->ports[i]);
1576 
1577 	err = ovs_meters_init(dp);
1578 	if (err)
1579 		goto err_destroy_ports_array;
1580 
1581 	/* Set up our datapath device. */
1582 	parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1583 	parms.type = OVS_VPORT_TYPE_INTERNAL;
1584 	parms.options = NULL;
1585 	parms.dp = dp;
1586 	parms.port_no = OVSP_LOCAL;
1587 	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1588 
1589 	ovs_dp_change(dp, a);
1590 
1591 	/* So far only local changes have been made, now need the lock. */
1592 	ovs_lock();
1593 
1594 	vport = new_vport(&parms);
1595 	if (IS_ERR(vport)) {
1596 		err = PTR_ERR(vport);
1597 		if (err == -EBUSY)
1598 			err = -EEXIST;
1599 
1600 		if (err == -EEXIST) {
1601 			/* An outdated user space instance that does not understand
1602 			 * the concept of user_features has attempted to create a new
1603 			 * datapath and is likely to reuse it. Drop all user features.
1604 			 */
1605 			if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1606 				ovs_dp_reset_user_features(skb, info);
1607 		}
1608 
1609 		goto err_destroy_meters;
1610 	}
1611 
1612 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1613 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1614 	BUG_ON(err < 0);
1615 
1616 	ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1617 	list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1618 
1619 	ovs_unlock();
1620 
1621 	ovs_notify(&dp_datapath_genl_family, reply, info);
1622 	return 0;
1623 
1624 err_destroy_meters:
1625 	ovs_unlock();
1626 	ovs_meters_exit(dp);
1627 err_destroy_ports_array:
1628 	kfree(dp->ports);
1629 err_destroy_percpu:
1630 	free_percpu(dp->stats_percpu);
1631 err_destroy_table:
1632 	ovs_flow_tbl_destroy(&dp->table);
1633 err_free_dp:
1634 	kfree(dp);
1635 err_free_reply:
1636 	kfree_skb(reply);
1637 err:
1638 	return err;
1639 }
1640 
1641 /* Called with ovs_mutex. */
1642 static void __dp_destroy(struct datapath *dp)
1643 {
1644 	int i;
1645 
1646 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1647 		struct vport *vport;
1648 		struct hlist_node *n;
1649 
1650 		hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1651 			if (vport->port_no != OVSP_LOCAL)
1652 				ovs_dp_detach_port(vport);
1653 	}
1654 
1655 	list_del_rcu(&dp->list_node);
1656 
1657 	/* OVSP_LOCAL is datapath internal port. We need to make sure that
1658 	 * all ports in datapath are destroyed first before freeing datapath.
1659 	 */
1660 	ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1661 
1662 	/* RCU destroy the flow table */
1663 	call_rcu(&dp->rcu, destroy_dp_rcu);
1664 }
1665 
1666 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1667 {
1668 	struct sk_buff *reply;
1669 	struct datapath *dp;
1670 	int err;
1671 
1672 	reply = ovs_dp_cmd_alloc_info();
1673 	if (!reply)
1674 		return -ENOMEM;
1675 
1676 	ovs_lock();
1677 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1678 	err = PTR_ERR(dp);
1679 	if (IS_ERR(dp))
1680 		goto err_unlock_free;
1681 
1682 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1683 				   info->snd_seq, 0, OVS_DP_CMD_DEL);
1684 	BUG_ON(err < 0);
1685 
1686 	__dp_destroy(dp);
1687 	ovs_unlock();
1688 
1689 	ovs_notify(&dp_datapath_genl_family, reply, info);
1690 
1691 	return 0;
1692 
1693 err_unlock_free:
1694 	ovs_unlock();
1695 	kfree_skb(reply);
1696 	return err;
1697 }
1698 
1699 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1700 {
1701 	struct sk_buff *reply;
1702 	struct datapath *dp;
1703 	int err;
1704 
1705 	reply = ovs_dp_cmd_alloc_info();
1706 	if (!reply)
1707 		return -ENOMEM;
1708 
1709 	ovs_lock();
1710 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1711 	err = PTR_ERR(dp);
1712 	if (IS_ERR(dp))
1713 		goto err_unlock_free;
1714 
1715 	ovs_dp_change(dp, info->attrs);
1716 
1717 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1718 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1719 	BUG_ON(err < 0);
1720 
1721 	ovs_unlock();
1722 	ovs_notify(&dp_datapath_genl_family, reply, info);
1723 
1724 	return 0;
1725 
1726 err_unlock_free:
1727 	ovs_unlock();
1728 	kfree_skb(reply);
1729 	return err;
1730 }
1731 
1732 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1733 {
1734 	struct sk_buff *reply;
1735 	struct datapath *dp;
1736 	int err;
1737 
1738 	reply = ovs_dp_cmd_alloc_info();
1739 	if (!reply)
1740 		return -ENOMEM;
1741 
1742 	ovs_lock();
1743 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1744 	if (IS_ERR(dp)) {
1745 		err = PTR_ERR(dp);
1746 		goto err_unlock_free;
1747 	}
1748 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1749 				   info->snd_seq, 0, OVS_DP_CMD_NEW);
1750 	BUG_ON(err < 0);
1751 	ovs_unlock();
1752 
1753 	return genlmsg_reply(reply, info);
1754 
1755 err_unlock_free:
1756 	ovs_unlock();
1757 	kfree_skb(reply);
1758 	return err;
1759 }
1760 
1761 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1762 {
1763 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1764 	struct datapath *dp;
1765 	int skip = cb->args[0];
1766 	int i = 0;
1767 
1768 	ovs_lock();
1769 	list_for_each_entry(dp, &ovs_net->dps, list_node) {
1770 		if (i >= skip &&
1771 		    ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1772 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1773 					 OVS_DP_CMD_NEW) < 0)
1774 			break;
1775 		i++;
1776 	}
1777 	ovs_unlock();
1778 
1779 	cb->args[0] = i;
1780 
1781 	return skb->len;
1782 }
1783 
1784 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1785 	[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1786 	[OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1787 	[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1788 };
1789 
1790 static const struct genl_ops dp_datapath_genl_ops[] = {
1791 	{ .cmd = OVS_DP_CMD_NEW,
1792 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1793 	  .policy = datapath_policy,
1794 	  .doit = ovs_dp_cmd_new
1795 	},
1796 	{ .cmd = OVS_DP_CMD_DEL,
1797 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1798 	  .policy = datapath_policy,
1799 	  .doit = ovs_dp_cmd_del
1800 	},
1801 	{ .cmd = OVS_DP_CMD_GET,
1802 	  .flags = 0,		    /* OK for unprivileged users. */
1803 	  .policy = datapath_policy,
1804 	  .doit = ovs_dp_cmd_get,
1805 	  .dumpit = ovs_dp_cmd_dump
1806 	},
1807 	{ .cmd = OVS_DP_CMD_SET,
1808 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1809 	  .policy = datapath_policy,
1810 	  .doit = ovs_dp_cmd_set,
1811 	},
1812 };
1813 
1814 static struct genl_family dp_datapath_genl_family __ro_after_init = {
1815 	.hdrsize = sizeof(struct ovs_header),
1816 	.name = OVS_DATAPATH_FAMILY,
1817 	.version = OVS_DATAPATH_VERSION,
1818 	.maxattr = OVS_DP_ATTR_MAX,
1819 	.netnsok = true,
1820 	.parallel_ops = true,
1821 	.ops = dp_datapath_genl_ops,
1822 	.n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1823 	.mcgrps = &ovs_dp_datapath_multicast_group,
1824 	.n_mcgrps = 1,
1825 	.module = THIS_MODULE,
1826 };
1827 
1828 /* Called with ovs_mutex or RCU read lock. */
1829 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1830 				   struct net *net, u32 portid, u32 seq,
1831 				   u32 flags, u8 cmd)
1832 {
1833 	struct ovs_header *ovs_header;
1834 	struct ovs_vport_stats vport_stats;
1835 	int err;
1836 
1837 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1838 				 flags, cmd);
1839 	if (!ovs_header)
1840 		return -EMSGSIZE;
1841 
1842 	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1843 
1844 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1845 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1846 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1847 			   ovs_vport_name(vport)) ||
1848 	    nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1849 		goto nla_put_failure;
1850 
1851 	if (!net_eq(net, dev_net(vport->dev))) {
1852 		int id = peernet2id_alloc(net, dev_net(vport->dev));
1853 
1854 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1855 			goto nla_put_failure;
1856 	}
1857 
1858 	ovs_vport_get_stats(vport, &vport_stats);
1859 	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1860 			  sizeof(struct ovs_vport_stats), &vport_stats,
1861 			  OVS_VPORT_ATTR_PAD))
1862 		goto nla_put_failure;
1863 
1864 	if (ovs_vport_get_upcall_portids(vport, skb))
1865 		goto nla_put_failure;
1866 
1867 	err = ovs_vport_get_options(vport, skb);
1868 	if (err == -EMSGSIZE)
1869 		goto error;
1870 
1871 	genlmsg_end(skb, ovs_header);
1872 	return 0;
1873 
1874 nla_put_failure:
1875 	err = -EMSGSIZE;
1876 error:
1877 	genlmsg_cancel(skb, ovs_header);
1878 	return err;
1879 }
1880 
1881 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1882 {
1883 	return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1884 }
1885 
1886 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1887 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1888 					 u32 portid, u32 seq, u8 cmd)
1889 {
1890 	struct sk_buff *skb;
1891 	int retval;
1892 
1893 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1894 	if (!skb)
1895 		return ERR_PTR(-ENOMEM);
1896 
1897 	retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
1898 	BUG_ON(retval < 0);
1899 
1900 	return skb;
1901 }
1902 
1903 /* Called with ovs_mutex or RCU read lock. */
1904 static struct vport *lookup_vport(struct net *net,
1905 				  const struct ovs_header *ovs_header,
1906 				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1907 {
1908 	struct datapath *dp;
1909 	struct vport *vport;
1910 
1911 	if (a[OVS_VPORT_ATTR_IFINDEX])
1912 		return ERR_PTR(-EOPNOTSUPP);
1913 	if (a[OVS_VPORT_ATTR_NAME]) {
1914 		vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1915 		if (!vport)
1916 			return ERR_PTR(-ENODEV);
1917 		if (ovs_header->dp_ifindex &&
1918 		    ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1919 			return ERR_PTR(-ENODEV);
1920 		return vport;
1921 	} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1922 		u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1923 
1924 		if (port_no >= DP_MAX_PORTS)
1925 			return ERR_PTR(-EFBIG);
1926 
1927 		dp = get_dp(net, ovs_header->dp_ifindex);
1928 		if (!dp)
1929 			return ERR_PTR(-ENODEV);
1930 
1931 		vport = ovs_vport_ovsl_rcu(dp, port_no);
1932 		if (!vport)
1933 			return ERR_PTR(-ENODEV);
1934 		return vport;
1935 	} else
1936 		return ERR_PTR(-EINVAL);
1937 
1938 }
1939 
1940 /* Called with ovs_mutex */
1941 static void update_headroom(struct datapath *dp)
1942 {
1943 	unsigned dev_headroom, max_headroom = 0;
1944 	struct net_device *dev;
1945 	struct vport *vport;
1946 	int i;
1947 
1948 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1949 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1950 			dev = vport->dev;
1951 			dev_headroom = netdev_get_fwd_headroom(dev);
1952 			if (dev_headroom > max_headroom)
1953 				max_headroom = dev_headroom;
1954 		}
1955 	}
1956 
1957 	dp->max_headroom = max_headroom;
1958 	for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1959 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1960 			netdev_set_rx_headroom(vport->dev, max_headroom);
1961 }
1962 
1963 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1964 {
1965 	struct nlattr **a = info->attrs;
1966 	struct ovs_header *ovs_header = info->userhdr;
1967 	struct vport_parms parms;
1968 	struct sk_buff *reply;
1969 	struct vport *vport;
1970 	struct datapath *dp;
1971 	u32 port_no;
1972 	int err;
1973 
1974 	if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1975 	    !a[OVS_VPORT_ATTR_UPCALL_PID])
1976 		return -EINVAL;
1977 	if (a[OVS_VPORT_ATTR_IFINDEX])
1978 		return -EOPNOTSUPP;
1979 
1980 	port_no = a[OVS_VPORT_ATTR_PORT_NO]
1981 		? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1982 	if (port_no >= DP_MAX_PORTS)
1983 		return -EFBIG;
1984 
1985 	reply = ovs_vport_cmd_alloc_info();
1986 	if (!reply)
1987 		return -ENOMEM;
1988 
1989 	ovs_lock();
1990 restart:
1991 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1992 	err = -ENODEV;
1993 	if (!dp)
1994 		goto exit_unlock_free;
1995 
1996 	if (port_no) {
1997 		vport = ovs_vport_ovsl(dp, port_no);
1998 		err = -EBUSY;
1999 		if (vport)
2000 			goto exit_unlock_free;
2001 	} else {
2002 		for (port_no = 1; ; port_no++) {
2003 			if (port_no >= DP_MAX_PORTS) {
2004 				err = -EFBIG;
2005 				goto exit_unlock_free;
2006 			}
2007 			vport = ovs_vport_ovsl(dp, port_no);
2008 			if (!vport)
2009 				break;
2010 		}
2011 	}
2012 
2013 	parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2014 	parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2015 	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2016 	parms.dp = dp;
2017 	parms.port_no = port_no;
2018 	parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2019 
2020 	vport = new_vport(&parms);
2021 	err = PTR_ERR(vport);
2022 	if (IS_ERR(vport)) {
2023 		if (err == -EAGAIN)
2024 			goto restart;
2025 		goto exit_unlock_free;
2026 	}
2027 
2028 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2029 				      info->snd_portid, info->snd_seq, 0,
2030 				      OVS_VPORT_CMD_NEW);
2031 
2032 	if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2033 		update_headroom(dp);
2034 	else
2035 		netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2036 
2037 	BUG_ON(err < 0);
2038 	ovs_unlock();
2039 
2040 	ovs_notify(&dp_vport_genl_family, reply, info);
2041 	return 0;
2042 
2043 exit_unlock_free:
2044 	ovs_unlock();
2045 	kfree_skb(reply);
2046 	return err;
2047 }
2048 
2049 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2050 {
2051 	struct nlattr **a = info->attrs;
2052 	struct sk_buff *reply;
2053 	struct vport *vport;
2054 	int err;
2055 
2056 	reply = ovs_vport_cmd_alloc_info();
2057 	if (!reply)
2058 		return -ENOMEM;
2059 
2060 	ovs_lock();
2061 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2062 	err = PTR_ERR(vport);
2063 	if (IS_ERR(vport))
2064 		goto exit_unlock_free;
2065 
2066 	if (a[OVS_VPORT_ATTR_TYPE] &&
2067 	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2068 		err = -EINVAL;
2069 		goto exit_unlock_free;
2070 	}
2071 
2072 	if (a[OVS_VPORT_ATTR_OPTIONS]) {
2073 		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2074 		if (err)
2075 			goto exit_unlock_free;
2076 	}
2077 
2078 
2079 	if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2080 		struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2081 
2082 		err = ovs_vport_set_upcall_portids(vport, ids);
2083 		if (err)
2084 			goto exit_unlock_free;
2085 	}
2086 
2087 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2088 				      info->snd_portid, info->snd_seq, 0,
2089 				      OVS_VPORT_CMD_NEW);
2090 	BUG_ON(err < 0);
2091 
2092 	ovs_unlock();
2093 	ovs_notify(&dp_vport_genl_family, reply, info);
2094 	return 0;
2095 
2096 exit_unlock_free:
2097 	ovs_unlock();
2098 	kfree_skb(reply);
2099 	return err;
2100 }
2101 
2102 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2103 {
2104 	bool must_update_headroom = false;
2105 	struct nlattr **a = info->attrs;
2106 	struct sk_buff *reply;
2107 	struct datapath *dp;
2108 	struct vport *vport;
2109 	int err;
2110 
2111 	reply = ovs_vport_cmd_alloc_info();
2112 	if (!reply)
2113 		return -ENOMEM;
2114 
2115 	ovs_lock();
2116 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2117 	err = PTR_ERR(vport);
2118 	if (IS_ERR(vport))
2119 		goto exit_unlock_free;
2120 
2121 	if (vport->port_no == OVSP_LOCAL) {
2122 		err = -EINVAL;
2123 		goto exit_unlock_free;
2124 	}
2125 
2126 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2127 				      info->snd_portid, info->snd_seq, 0,
2128 				      OVS_VPORT_CMD_DEL);
2129 	BUG_ON(err < 0);
2130 
2131 	/* the vport deletion may trigger dp headroom update */
2132 	dp = vport->dp;
2133 	if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2134 		must_update_headroom = true;
2135 	netdev_reset_rx_headroom(vport->dev);
2136 	ovs_dp_detach_port(vport);
2137 
2138 	if (must_update_headroom)
2139 		update_headroom(dp);
2140 	ovs_unlock();
2141 
2142 	ovs_notify(&dp_vport_genl_family, reply, info);
2143 	return 0;
2144 
2145 exit_unlock_free:
2146 	ovs_unlock();
2147 	kfree_skb(reply);
2148 	return err;
2149 }
2150 
2151 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2152 {
2153 	struct nlattr **a = info->attrs;
2154 	struct ovs_header *ovs_header = info->userhdr;
2155 	struct sk_buff *reply;
2156 	struct vport *vport;
2157 	int err;
2158 
2159 	reply = ovs_vport_cmd_alloc_info();
2160 	if (!reply)
2161 		return -ENOMEM;
2162 
2163 	rcu_read_lock();
2164 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2165 	err = PTR_ERR(vport);
2166 	if (IS_ERR(vport))
2167 		goto exit_unlock_free;
2168 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2169 				      info->snd_portid, info->snd_seq, 0,
2170 				      OVS_VPORT_CMD_NEW);
2171 	BUG_ON(err < 0);
2172 	rcu_read_unlock();
2173 
2174 	return genlmsg_reply(reply, info);
2175 
2176 exit_unlock_free:
2177 	rcu_read_unlock();
2178 	kfree_skb(reply);
2179 	return err;
2180 }
2181 
2182 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2183 {
2184 	struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2185 	struct datapath *dp;
2186 	int bucket = cb->args[0], skip = cb->args[1];
2187 	int i, j = 0;
2188 
2189 	rcu_read_lock();
2190 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2191 	if (!dp) {
2192 		rcu_read_unlock();
2193 		return -ENODEV;
2194 	}
2195 	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2196 		struct vport *vport;
2197 
2198 		j = 0;
2199 		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2200 			if (j >= skip &&
2201 			    ovs_vport_cmd_fill_info(vport, skb,
2202 						    sock_net(skb->sk),
2203 						    NETLINK_CB(cb->skb).portid,
2204 						    cb->nlh->nlmsg_seq,
2205 						    NLM_F_MULTI,
2206 						    OVS_VPORT_CMD_NEW) < 0)
2207 				goto out;
2208 
2209 			j++;
2210 		}
2211 		skip = 0;
2212 	}
2213 out:
2214 	rcu_read_unlock();
2215 
2216 	cb->args[0] = i;
2217 	cb->args[1] = j;
2218 
2219 	return skb->len;
2220 }
2221 
2222 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2223 	[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2224 	[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2225 	[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2226 	[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2227 	[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2228 	[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2229 	[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2230 	[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2231 };
2232 
2233 static const struct genl_ops dp_vport_genl_ops[] = {
2234 	{ .cmd = OVS_VPORT_CMD_NEW,
2235 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2236 	  .policy = vport_policy,
2237 	  .doit = ovs_vport_cmd_new
2238 	},
2239 	{ .cmd = OVS_VPORT_CMD_DEL,
2240 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2241 	  .policy = vport_policy,
2242 	  .doit = ovs_vport_cmd_del
2243 	},
2244 	{ .cmd = OVS_VPORT_CMD_GET,
2245 	  .flags = 0,		    /* OK for unprivileged users. */
2246 	  .policy = vport_policy,
2247 	  .doit = ovs_vport_cmd_get,
2248 	  .dumpit = ovs_vport_cmd_dump
2249 	},
2250 	{ .cmd = OVS_VPORT_CMD_SET,
2251 	  .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2252 	  .policy = vport_policy,
2253 	  .doit = ovs_vport_cmd_set,
2254 	},
2255 };
2256 
2257 struct genl_family dp_vport_genl_family __ro_after_init = {
2258 	.hdrsize = sizeof(struct ovs_header),
2259 	.name = OVS_VPORT_FAMILY,
2260 	.version = OVS_VPORT_VERSION,
2261 	.maxattr = OVS_VPORT_ATTR_MAX,
2262 	.netnsok = true,
2263 	.parallel_ops = true,
2264 	.ops = dp_vport_genl_ops,
2265 	.n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2266 	.mcgrps = &ovs_dp_vport_multicast_group,
2267 	.n_mcgrps = 1,
2268 	.module = THIS_MODULE,
2269 };
2270 
2271 static struct genl_family * const dp_genl_families[] = {
2272 	&dp_datapath_genl_family,
2273 	&dp_vport_genl_family,
2274 	&dp_flow_genl_family,
2275 	&dp_packet_genl_family,
2276 	&dp_meter_genl_family,
2277 };
2278 
2279 static void dp_unregister_genl(int n_families)
2280 {
2281 	int i;
2282 
2283 	for (i = 0; i < n_families; i++)
2284 		genl_unregister_family(dp_genl_families[i]);
2285 }
2286 
2287 static int __init dp_register_genl(void)
2288 {
2289 	int err;
2290 	int i;
2291 
2292 	for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2293 
2294 		err = genl_register_family(dp_genl_families[i]);
2295 		if (err)
2296 			goto error;
2297 	}
2298 
2299 	return 0;
2300 
2301 error:
2302 	dp_unregister_genl(i);
2303 	return err;
2304 }
2305 
2306 static int __net_init ovs_init_net(struct net *net)
2307 {
2308 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2309 
2310 	INIT_LIST_HEAD(&ovs_net->dps);
2311 	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2312 	ovs_ct_init(net);
2313 	return 0;
2314 }
2315 
2316 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2317 					    struct list_head *head)
2318 {
2319 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2320 	struct datapath *dp;
2321 
2322 	list_for_each_entry(dp, &ovs_net->dps, list_node) {
2323 		int i;
2324 
2325 		for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2326 			struct vport *vport;
2327 
2328 			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2329 				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2330 					continue;
2331 
2332 				if (dev_net(vport->dev) == dnet)
2333 					list_add(&vport->detach_list, head);
2334 			}
2335 		}
2336 	}
2337 }
2338 
2339 static void __net_exit ovs_exit_net(struct net *dnet)
2340 {
2341 	struct datapath *dp, *dp_next;
2342 	struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2343 	struct vport *vport, *vport_next;
2344 	struct net *net;
2345 	LIST_HEAD(head);
2346 
2347 	ovs_ct_exit(dnet);
2348 	ovs_lock();
2349 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2350 		__dp_destroy(dp);
2351 
2352 	rtnl_lock();
2353 	for_each_net(net)
2354 		list_vports_from_net(net, dnet, &head);
2355 	rtnl_unlock();
2356 
2357 	/* Detach all vports from given namespace. */
2358 	list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2359 		list_del(&vport->detach_list);
2360 		ovs_dp_detach_port(vport);
2361 	}
2362 
2363 	ovs_unlock();
2364 
2365 	cancel_work_sync(&ovs_net->dp_notify_work);
2366 }
2367 
2368 static struct pernet_operations ovs_net_ops = {
2369 	.init = ovs_init_net,
2370 	.exit = ovs_exit_net,
2371 	.id   = &ovs_net_id,
2372 	.size = sizeof(struct ovs_net),
2373 };
2374 
2375 static int __init dp_init(void)
2376 {
2377 	int err;
2378 
2379 	BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2380 
2381 	pr_info("Open vSwitch switching datapath\n");
2382 
2383 	err = action_fifos_init();
2384 	if (err)
2385 		goto error;
2386 
2387 	err = ovs_internal_dev_rtnl_link_register();
2388 	if (err)
2389 		goto error_action_fifos_exit;
2390 
2391 	err = ovs_flow_init();
2392 	if (err)
2393 		goto error_unreg_rtnl_link;
2394 
2395 	err = ovs_vport_init();
2396 	if (err)
2397 		goto error_flow_exit;
2398 
2399 	err = register_pernet_device(&ovs_net_ops);
2400 	if (err)
2401 		goto error_vport_exit;
2402 
2403 	err = register_netdevice_notifier(&ovs_dp_device_notifier);
2404 	if (err)
2405 		goto error_netns_exit;
2406 
2407 	err = ovs_netdev_init();
2408 	if (err)
2409 		goto error_unreg_notifier;
2410 
2411 	err = dp_register_genl();
2412 	if (err < 0)
2413 		goto error_unreg_netdev;
2414 
2415 	return 0;
2416 
2417 error_unreg_netdev:
2418 	ovs_netdev_exit();
2419 error_unreg_notifier:
2420 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2421 error_netns_exit:
2422 	unregister_pernet_device(&ovs_net_ops);
2423 error_vport_exit:
2424 	ovs_vport_exit();
2425 error_flow_exit:
2426 	ovs_flow_exit();
2427 error_unreg_rtnl_link:
2428 	ovs_internal_dev_rtnl_link_unregister();
2429 error_action_fifos_exit:
2430 	action_fifos_exit();
2431 error:
2432 	return err;
2433 }
2434 
2435 static void dp_cleanup(void)
2436 {
2437 	dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2438 	ovs_netdev_exit();
2439 	unregister_netdevice_notifier(&ovs_dp_device_notifier);
2440 	unregister_pernet_device(&ovs_net_ops);
2441 	rcu_barrier();
2442 	ovs_vport_exit();
2443 	ovs_flow_exit();
2444 	ovs_internal_dev_rtnl_link_unregister();
2445 	action_fifos_exit();
2446 }
2447 
2448 module_init(dp_init);
2449 module_exit(dp_cleanup);
2450 
2451 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2452 MODULE_LICENSE("GPL");
2453 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2454 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2455 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2456 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2457 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2458