xref: /linux/net/openvswitch/actions.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2017 Nicira, Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/skbuff.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
18 
19 #include <net/dst.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
26 #include <net/mpls.h>
27 
28 #if IS_ENABLED(CONFIG_PSAMPLE)
29 #include <net/psample.h>
30 #endif
31 
32 #include <net/sctp/checksum.h>
33 
34 #include "datapath.h"
35 #include "drop.h"
36 #include "flow.h"
37 #include "conntrack.h"
38 #include "vport.h"
39 #include "flow_netlink.h"
40 #include "openvswitch_trace.h"
41 
42 DEFINE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage) = {
43 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
44 };
45 
46 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
47  * space. Return NULL if out of key spaces.
48  */
49 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
50 {
51 	struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(&ovs_pcpu_storage);
52 	struct action_flow_keys *keys = &ovs_pcpu->flow_keys;
53 	int level = ovs_pcpu->exec_level;
54 	struct sw_flow_key *key = NULL;
55 
56 	if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
57 		key = &keys->key[level - 1];
58 		*key = *key_;
59 	}
60 
61 	return key;
62 }
63 
64 static void action_fifo_init(struct action_fifo *fifo)
65 {
66 	fifo->head = 0;
67 	fifo->tail = 0;
68 }
69 
70 static bool action_fifo_is_empty(const struct action_fifo *fifo)
71 {
72 	return (fifo->head == fifo->tail);
73 }
74 
75 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
76 {
77 	if (action_fifo_is_empty(fifo))
78 		return NULL;
79 
80 	return &fifo->fifo[fifo->tail++];
81 }
82 
83 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
84 {
85 	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
86 		return NULL;
87 
88 	return &fifo->fifo[fifo->head++];
89 }
90 
91 /* Return true if fifo is not full */
92 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
93 				    const struct sw_flow_key *key,
94 				    const struct nlattr *actions,
95 				    const int actions_len)
96 {
97 	struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos);
98 	struct deferred_action *da;
99 
100 	da = action_fifo_put(fifo);
101 	if (da) {
102 		da->skb = skb;
103 		da->actions = actions;
104 		da->actions_len = actions_len;
105 		da->pkt_key = *key;
106 	}
107 
108 	return da;
109 }
110 
111 static void invalidate_flow_key(struct sw_flow_key *key)
112 {
113 	key->mac_proto |= SW_FLOW_KEY_INVALID;
114 }
115 
116 static bool is_flow_key_valid(const struct sw_flow_key *key)
117 {
118 	return !(key->mac_proto & SW_FLOW_KEY_INVALID);
119 }
120 
121 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
122 			 struct sw_flow_key *key,
123 			 u32 recirc_id,
124 			 const struct nlattr *actions, int len,
125 			 bool last, bool clone_flow_key);
126 
127 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
128 			      struct sw_flow_key *key,
129 			      const struct nlattr *attr, int len);
130 
131 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
132 		     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
133 {
134 	int err;
135 
136 	err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
137 	if (err)
138 		return err;
139 
140 	if (!mac_len)
141 		key->mac_proto = MAC_PROTO_NONE;
142 
143 	invalidate_flow_key(key);
144 	return 0;
145 }
146 
147 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
148 		    const __be16 ethertype)
149 {
150 	int err;
151 
152 	err = skb_mpls_pop(skb, ethertype, skb->mac_len,
153 			   ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
154 	if (err)
155 		return err;
156 
157 	if (ethertype == htons(ETH_P_TEB))
158 		key->mac_proto = MAC_PROTO_ETHERNET;
159 
160 	invalidate_flow_key(key);
161 	return 0;
162 }
163 
164 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
165 		    const __be32 *mpls_lse, const __be32 *mask)
166 {
167 	struct mpls_shim_hdr *stack;
168 	__be32 lse;
169 	int err;
170 
171 	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
172 		return -ENOMEM;
173 
174 	stack = mpls_hdr(skb);
175 	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
176 	err = skb_mpls_update_lse(skb, lse);
177 	if (err)
178 		return err;
179 
180 	flow_key->mpls.lse[0] = lse;
181 	return 0;
182 }
183 
184 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
185 {
186 	int err;
187 
188 	err = skb_vlan_pop(skb);
189 	if (skb_vlan_tag_present(skb)) {
190 		invalidate_flow_key(key);
191 	} else {
192 		key->eth.vlan.tci = 0;
193 		key->eth.vlan.tpid = 0;
194 	}
195 	return err;
196 }
197 
198 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
199 		     const struct ovs_action_push_vlan *vlan)
200 {
201 	int err;
202 
203 	if (skb_vlan_tag_present(skb)) {
204 		invalidate_flow_key(key);
205 	} else {
206 		key->eth.vlan.tci = vlan->vlan_tci;
207 		key->eth.vlan.tpid = vlan->vlan_tpid;
208 	}
209 	err = skb_vlan_push(skb, vlan->vlan_tpid,
210 			    ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
211 	skb_reset_mac_len(skb);
212 	return err;
213 }
214 
215 /* 'src' is already properly masked. */
216 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
217 {
218 	u16 *dst = (u16 *)dst_;
219 	const u16 *src = (const u16 *)src_;
220 	const u16 *mask = (const u16 *)mask_;
221 
222 	OVS_SET_MASKED(dst[0], src[0], mask[0]);
223 	OVS_SET_MASKED(dst[1], src[1], mask[1]);
224 	OVS_SET_MASKED(dst[2], src[2], mask[2]);
225 }
226 
227 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
228 			const struct ovs_key_ethernet *key,
229 			const struct ovs_key_ethernet *mask)
230 {
231 	int err;
232 
233 	err = skb_ensure_writable(skb, ETH_HLEN);
234 	if (unlikely(err))
235 		return err;
236 
237 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
238 
239 	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
240 			       mask->eth_src);
241 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
242 			       mask->eth_dst);
243 
244 	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
245 
246 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
247 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
248 	return 0;
249 }
250 
251 /* pop_eth does not support VLAN packets as this action is never called
252  * for them.
253  */
254 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
255 {
256 	int err;
257 
258 	err = skb_eth_pop(skb);
259 	if (err)
260 		return err;
261 
262 	/* safe right before invalidate_flow_key */
263 	key->mac_proto = MAC_PROTO_NONE;
264 	invalidate_flow_key(key);
265 	return 0;
266 }
267 
268 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
269 		    const struct ovs_action_push_eth *ethh)
270 {
271 	int err;
272 
273 	err = skb_eth_push(skb, ethh->addresses.eth_dst,
274 			   ethh->addresses.eth_src);
275 	if (err)
276 		return err;
277 
278 	/* safe right before invalidate_flow_key */
279 	key->mac_proto = MAC_PROTO_ETHERNET;
280 	invalidate_flow_key(key);
281 	return 0;
282 }
283 
284 static noinline_for_stack int push_nsh(struct sk_buff *skb,
285 				       struct sw_flow_key *key,
286 				       const struct nlattr *a)
287 {
288 	u8 buffer[NSH_HDR_MAX_LEN];
289 	struct nshhdr *nh = (struct nshhdr *)buffer;
290 	int err;
291 
292 	err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
293 	if (err)
294 		return err;
295 
296 	err = nsh_push(skb, nh);
297 	if (err)
298 		return err;
299 
300 	/* safe right before invalidate_flow_key */
301 	key->mac_proto = MAC_PROTO_NONE;
302 	invalidate_flow_key(key);
303 	return 0;
304 }
305 
306 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
307 {
308 	int err;
309 
310 	err = nsh_pop(skb);
311 	if (err)
312 		return err;
313 
314 	/* safe right before invalidate_flow_key */
315 	if (skb->protocol == htons(ETH_P_TEB))
316 		key->mac_proto = MAC_PROTO_ETHERNET;
317 	else
318 		key->mac_proto = MAC_PROTO_NONE;
319 	invalidate_flow_key(key);
320 	return 0;
321 }
322 
323 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
324 				  __be32 addr, __be32 new_addr)
325 {
326 	int transport_len = skb->len - skb_transport_offset(skb);
327 
328 	if (nh->frag_off & htons(IP_OFFSET))
329 		return;
330 
331 	if (nh->protocol == IPPROTO_TCP) {
332 		if (likely(transport_len >= sizeof(struct tcphdr)))
333 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
334 						 addr, new_addr, true);
335 	} else if (nh->protocol == IPPROTO_UDP) {
336 		if (likely(transport_len >= sizeof(struct udphdr))) {
337 			struct udphdr *uh = udp_hdr(skb);
338 
339 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
340 				inet_proto_csum_replace4(&uh->check, skb,
341 							 addr, new_addr, true);
342 				if (!uh->check)
343 					uh->check = CSUM_MANGLED_0;
344 			}
345 		}
346 	}
347 }
348 
349 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
350 			__be32 *addr, __be32 new_addr)
351 {
352 	update_ip_l4_checksum(skb, nh, *addr, new_addr);
353 	csum_replace4(&nh->check, *addr, new_addr);
354 	skb_clear_hash(skb);
355 	ovs_ct_clear(skb, NULL);
356 	*addr = new_addr;
357 }
358 
359 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
360 				 __be32 addr[4], const __be32 new_addr[4])
361 {
362 	int transport_len = skb->len - skb_transport_offset(skb);
363 
364 	if (l4_proto == NEXTHDR_TCP) {
365 		if (likely(transport_len >= sizeof(struct tcphdr)))
366 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
367 						  addr, new_addr, true);
368 	} else if (l4_proto == NEXTHDR_UDP) {
369 		if (likely(transport_len >= sizeof(struct udphdr))) {
370 			struct udphdr *uh = udp_hdr(skb);
371 
372 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
373 				inet_proto_csum_replace16(&uh->check, skb,
374 							  addr, new_addr, true);
375 				if (!uh->check)
376 					uh->check = CSUM_MANGLED_0;
377 			}
378 		}
379 	} else if (l4_proto == NEXTHDR_ICMP) {
380 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
381 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
382 						  skb, addr, new_addr, true);
383 	}
384 }
385 
386 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
387 			   const __be32 mask[4], __be32 masked[4])
388 {
389 	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
390 	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
391 	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
392 	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
393 }
394 
395 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
396 			  __be32 addr[4], const __be32 new_addr[4],
397 			  bool recalculate_csum)
398 {
399 	if (recalculate_csum)
400 		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
401 
402 	skb_clear_hash(skb);
403 	ovs_ct_clear(skb, NULL);
404 	memcpy(addr, new_addr, sizeof(__be32[4]));
405 }
406 
407 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
408 {
409 	u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
410 
411 	ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
412 
413 	if (skb->ip_summed == CHECKSUM_COMPLETE)
414 		csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
415 			     (__force __wsum)(ipv6_tclass << 12));
416 
417 	ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
418 }
419 
420 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
421 {
422 	u32 ofl;
423 
424 	ofl = nh->flow_lbl[0] << 16 |  nh->flow_lbl[1] << 8 |  nh->flow_lbl[2];
425 	fl = OVS_MASKED(ofl, fl, mask);
426 
427 	/* Bits 21-24 are always unmasked, so this retains their values. */
428 	nh->flow_lbl[0] = (u8)(fl >> 16);
429 	nh->flow_lbl[1] = (u8)(fl >> 8);
430 	nh->flow_lbl[2] = (u8)fl;
431 
432 	if (skb->ip_summed == CHECKSUM_COMPLETE)
433 		csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
434 }
435 
436 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
437 {
438 	new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
439 
440 	if (skb->ip_summed == CHECKSUM_COMPLETE)
441 		csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
442 			     (__force __wsum)(new_ttl << 8));
443 	nh->hop_limit = new_ttl;
444 }
445 
446 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
447 		       u8 mask)
448 {
449 	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
450 
451 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
452 	nh->ttl = new_ttl;
453 }
454 
455 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
456 		    const struct ovs_key_ipv4 *key,
457 		    const struct ovs_key_ipv4 *mask)
458 {
459 	struct iphdr *nh;
460 	__be32 new_addr;
461 	int err;
462 
463 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
464 				  sizeof(struct iphdr));
465 	if (unlikely(err))
466 		return err;
467 
468 	nh = ip_hdr(skb);
469 
470 	/* Setting an IP addresses is typically only a side effect of
471 	 * matching on them in the current userspace implementation, so it
472 	 * makes sense to check if the value actually changed.
473 	 */
474 	if (mask->ipv4_src) {
475 		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
476 
477 		if (unlikely(new_addr != nh->saddr)) {
478 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
479 			flow_key->ipv4.addr.src = new_addr;
480 		}
481 	}
482 	if (mask->ipv4_dst) {
483 		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
484 
485 		if (unlikely(new_addr != nh->daddr)) {
486 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
487 			flow_key->ipv4.addr.dst = new_addr;
488 		}
489 	}
490 	if (mask->ipv4_tos) {
491 		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
492 		flow_key->ip.tos = nh->tos;
493 	}
494 	if (mask->ipv4_ttl) {
495 		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
496 		flow_key->ip.ttl = nh->ttl;
497 	}
498 
499 	return 0;
500 }
501 
502 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
503 {
504 	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
505 }
506 
507 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
508 		    const struct ovs_key_ipv6 *key,
509 		    const struct ovs_key_ipv6 *mask)
510 {
511 	struct ipv6hdr *nh;
512 	int err;
513 
514 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
515 				  sizeof(struct ipv6hdr));
516 	if (unlikely(err))
517 		return err;
518 
519 	nh = ipv6_hdr(skb);
520 
521 	/* Setting an IP addresses is typically only a side effect of
522 	 * matching on them in the current userspace implementation, so it
523 	 * makes sense to check if the value actually changed.
524 	 */
525 	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
526 		__be32 *saddr = (__be32 *)&nh->saddr;
527 		__be32 masked[4];
528 
529 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
530 
531 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
532 			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
533 				      true);
534 			memcpy(&flow_key->ipv6.addr.src, masked,
535 			       sizeof(flow_key->ipv6.addr.src));
536 		}
537 	}
538 	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
539 		unsigned int offset = 0;
540 		int flags = IP6_FH_F_SKIP_RH;
541 		bool recalc_csum = true;
542 		__be32 *daddr = (__be32 *)&nh->daddr;
543 		__be32 masked[4];
544 
545 		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
546 
547 		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
548 			if (ipv6_ext_hdr(nh->nexthdr))
549 				recalc_csum = (ipv6_find_hdr(skb, &offset,
550 							     NEXTHDR_ROUTING,
551 							     NULL, &flags)
552 					       != NEXTHDR_ROUTING);
553 
554 			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
555 				      recalc_csum);
556 			memcpy(&flow_key->ipv6.addr.dst, masked,
557 			       sizeof(flow_key->ipv6.addr.dst));
558 		}
559 	}
560 	if (mask->ipv6_tclass) {
561 		set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
562 		flow_key->ip.tos = ipv6_get_dsfield(nh);
563 	}
564 	if (mask->ipv6_label) {
565 		set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
566 			    ntohl(mask->ipv6_label));
567 		flow_key->ipv6.label =
568 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
569 	}
570 	if (mask->ipv6_hlimit) {
571 		set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
572 		flow_key->ip.ttl = nh->hop_limit;
573 	}
574 	return 0;
575 }
576 
577 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
578 		   const struct nlattr *a)
579 {
580 	struct nshhdr *nh;
581 	size_t length;
582 	int err;
583 	u8 flags;
584 	u8 ttl;
585 	int i;
586 
587 	struct ovs_key_nsh key;
588 	struct ovs_key_nsh mask;
589 
590 	err = nsh_key_from_nlattr(a, &key, &mask);
591 	if (err)
592 		return err;
593 
594 	/* Make sure the NSH base header is there */
595 	if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
596 		return -ENOMEM;
597 
598 	nh = nsh_hdr(skb);
599 	length = nsh_hdr_len(nh);
600 
601 	/* Make sure the whole NSH header is there */
602 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
603 				       length);
604 	if (unlikely(err))
605 		return err;
606 
607 	nh = nsh_hdr(skb);
608 	skb_postpull_rcsum(skb, nh, length);
609 	flags = nsh_get_flags(nh);
610 	flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
611 	flow_key->nsh.base.flags = flags;
612 	ttl = nsh_get_ttl(nh);
613 	ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
614 	flow_key->nsh.base.ttl = ttl;
615 	nsh_set_flags_and_ttl(nh, flags, ttl);
616 	nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
617 				  mask.base.path_hdr);
618 	flow_key->nsh.base.path_hdr = nh->path_hdr;
619 	switch (nh->mdtype) {
620 	case NSH_M_TYPE1:
621 		for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
622 			nh->md1.context[i] =
623 			    OVS_MASKED(nh->md1.context[i], key.context[i],
624 				       mask.context[i]);
625 		}
626 		memcpy(flow_key->nsh.context, nh->md1.context,
627 		       sizeof(nh->md1.context));
628 		break;
629 	case NSH_M_TYPE2:
630 		memset(flow_key->nsh.context, 0,
631 		       sizeof(flow_key->nsh.context));
632 		break;
633 	default:
634 		return -EINVAL;
635 	}
636 	skb_postpush_rcsum(skb, nh, length);
637 	return 0;
638 }
639 
640 /* Must follow skb_ensure_writable() since that can move the skb data. */
641 static void set_tp_port(struct sk_buff *skb, __be16 *port,
642 			__be16 new_port, __sum16 *check)
643 {
644 	ovs_ct_clear(skb, NULL);
645 	inet_proto_csum_replace2(check, skb, *port, new_port, false);
646 	*port = new_port;
647 }
648 
649 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
650 		   const struct ovs_key_udp *key,
651 		   const struct ovs_key_udp *mask)
652 {
653 	struct udphdr *uh;
654 	__be16 src, dst;
655 	int err;
656 
657 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
658 				  sizeof(struct udphdr));
659 	if (unlikely(err))
660 		return err;
661 
662 	uh = udp_hdr(skb);
663 	/* Either of the masks is non-zero, so do not bother checking them. */
664 	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
665 	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
666 
667 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
668 		if (likely(src != uh->source)) {
669 			set_tp_port(skb, &uh->source, src, &uh->check);
670 			flow_key->tp.src = src;
671 		}
672 		if (likely(dst != uh->dest)) {
673 			set_tp_port(skb, &uh->dest, dst, &uh->check);
674 			flow_key->tp.dst = dst;
675 		}
676 
677 		if (unlikely(!uh->check))
678 			uh->check = CSUM_MANGLED_0;
679 	} else {
680 		uh->source = src;
681 		uh->dest = dst;
682 		flow_key->tp.src = src;
683 		flow_key->tp.dst = dst;
684 		ovs_ct_clear(skb, NULL);
685 	}
686 
687 	skb_clear_hash(skb);
688 
689 	return 0;
690 }
691 
692 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
693 		   const struct ovs_key_tcp *key,
694 		   const struct ovs_key_tcp *mask)
695 {
696 	struct tcphdr *th;
697 	__be16 src, dst;
698 	int err;
699 
700 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
701 				  sizeof(struct tcphdr));
702 	if (unlikely(err))
703 		return err;
704 
705 	th = tcp_hdr(skb);
706 	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
707 	if (likely(src != th->source)) {
708 		set_tp_port(skb, &th->source, src, &th->check);
709 		flow_key->tp.src = src;
710 	}
711 	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
712 	if (likely(dst != th->dest)) {
713 		set_tp_port(skb, &th->dest, dst, &th->check);
714 		flow_key->tp.dst = dst;
715 	}
716 	skb_clear_hash(skb);
717 
718 	return 0;
719 }
720 
721 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
722 		    const struct ovs_key_sctp *key,
723 		    const struct ovs_key_sctp *mask)
724 {
725 	unsigned int sctphoff = skb_transport_offset(skb);
726 	struct sctphdr *sh;
727 	__le32 old_correct_csum, new_csum, old_csum;
728 	int err;
729 
730 	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
731 	if (unlikely(err))
732 		return err;
733 
734 	sh = sctp_hdr(skb);
735 	old_csum = sh->checksum;
736 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
737 
738 	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
739 	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
740 
741 	new_csum = sctp_compute_cksum(skb, sctphoff);
742 
743 	/* Carry any checksum errors through. */
744 	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
745 
746 	skb_clear_hash(skb);
747 	ovs_ct_clear(skb, NULL);
748 
749 	flow_key->tp.src = sh->source;
750 	flow_key->tp.dst = sh->dest;
751 
752 	return 0;
753 }
754 
755 static int ovs_vport_output(struct net *net, struct sock *sk,
756 			    struct sk_buff *skb)
757 {
758 	struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
759 	struct vport *vport = data->vport;
760 
761 	if (skb_cow_head(skb, data->l2_len) < 0) {
762 		kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
763 		return -ENOMEM;
764 	}
765 
766 	__skb_dst_copy(skb, data->dst);
767 	*OVS_CB(skb) = data->cb;
768 	skb->inner_protocol = data->inner_protocol;
769 	if (data->vlan_tci & VLAN_CFI_MASK)
770 		__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
771 	else
772 		__vlan_hwaccel_clear_tag(skb);
773 
774 	/* Reconstruct the MAC header.  */
775 	skb_push(skb, data->l2_len);
776 	memcpy(skb->data, &data->l2_data, data->l2_len);
777 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
778 	skb_reset_mac_header(skb);
779 
780 	if (eth_p_mpls(skb->protocol)) {
781 		skb->inner_network_header = skb->network_header;
782 		skb_set_network_header(skb, data->network_offset);
783 		skb_reset_mac_len(skb);
784 	}
785 
786 	ovs_vport_send(vport, skb, data->mac_proto);
787 	return 0;
788 }
789 
790 static unsigned int
791 ovs_dst_get_mtu(const struct dst_entry *dst)
792 {
793 	return dst->dev->mtu;
794 }
795 
796 static struct dst_ops ovs_dst_ops = {
797 	.family = AF_UNSPEC,
798 	.mtu = ovs_dst_get_mtu,
799 };
800 
801 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
802  * ovs_vport_output(), which is called once per fragmented packet.
803  */
804 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
805 			 u16 orig_network_offset, u8 mac_proto)
806 {
807 	unsigned int hlen = skb_network_offset(skb);
808 	struct ovs_frag_data *data;
809 
810 	data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
811 	data->dst = skb->_skb_refdst;
812 	data->vport = vport;
813 	data->cb = *OVS_CB(skb);
814 	data->inner_protocol = skb->inner_protocol;
815 	data->network_offset = orig_network_offset;
816 	if (skb_vlan_tag_present(skb))
817 		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
818 	else
819 		data->vlan_tci = 0;
820 	data->vlan_proto = skb->vlan_proto;
821 	data->mac_proto = mac_proto;
822 	data->l2_len = hlen;
823 	memcpy(&data->l2_data, skb->data, hlen);
824 
825 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
826 	skb_pull(skb, hlen);
827 }
828 
829 static void ovs_fragment(struct net *net, struct vport *vport,
830 			 struct sk_buff *skb, u16 mru,
831 			 struct sw_flow_key *key)
832 {
833 	enum ovs_drop_reason reason;
834 	u16 orig_network_offset = 0;
835 
836 	if (eth_p_mpls(skb->protocol)) {
837 		orig_network_offset = skb_network_offset(skb);
838 		skb->network_header = skb->inner_network_header;
839 	}
840 
841 	if (skb_network_offset(skb) > MAX_L2_LEN) {
842 		OVS_NLERR(1, "L2 header too long to fragment");
843 		reason = OVS_DROP_FRAG_L2_TOO_LONG;
844 		goto err;
845 	}
846 
847 	if (key->eth.type == htons(ETH_P_IP)) {
848 		struct rtable ovs_rt = { 0 };
849 		unsigned long orig_dst;
850 
851 		prepare_frag(vport, skb, orig_network_offset,
852 			     ovs_key_mac_proto(key));
853 		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
854 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
855 		ovs_rt.dst.dev = vport->dev;
856 
857 		orig_dst = skb->_skb_refdst;
858 		skb_dst_set_noref(skb, &ovs_rt.dst);
859 		IPCB(skb)->frag_max_size = mru;
860 
861 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
862 		refdst_drop(orig_dst);
863 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
864 		unsigned long orig_dst;
865 		struct rt6_info ovs_rt;
866 
867 		prepare_frag(vport, skb, orig_network_offset,
868 			     ovs_key_mac_proto(key));
869 		memset(&ovs_rt, 0, sizeof(ovs_rt));
870 		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
871 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
872 		ovs_rt.dst.dev = vport->dev;
873 
874 		orig_dst = skb->_skb_refdst;
875 		skb_dst_set_noref(skb, &ovs_rt.dst);
876 		IP6CB(skb)->frag_max_size = mru;
877 
878 		ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
879 		refdst_drop(orig_dst);
880 	} else {
881 		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
882 			  ovs_vport_name(vport), ntohs(key->eth.type), mru,
883 			  vport->dev->mtu);
884 		reason = OVS_DROP_FRAG_INVALID_PROTO;
885 		goto err;
886 	}
887 
888 	return;
889 err:
890 	ovs_kfree_skb_reason(skb, reason);
891 }
892 
893 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
894 		      struct sw_flow_key *key)
895 {
896 	struct vport *vport = ovs_vport_rcu(dp, out_port);
897 
898 	if (likely(vport &&
899 		   netif_running(vport->dev) &&
900 		   netif_carrier_ok(vport->dev))) {
901 		u16 mru = OVS_CB(skb)->mru;
902 		u32 cutlen = OVS_CB(skb)->cutlen;
903 
904 		if (unlikely(cutlen > 0)) {
905 			if (skb->len - cutlen > ovs_mac_header_len(key))
906 				pskb_trim(skb, skb->len - cutlen);
907 			else
908 				pskb_trim(skb, ovs_mac_header_len(key));
909 		}
910 
911 		if (likely(!mru ||
912 		           (skb->len <= mru + vport->dev->hard_header_len))) {
913 			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
914 		} else if (mru <= vport->dev->mtu) {
915 			struct net *net = read_pnet(&dp->net);
916 
917 			ovs_fragment(net, vport, skb, mru, key);
918 		} else {
919 			kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
920 		}
921 	} else {
922 		kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
923 	}
924 }
925 
926 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
927 			    struct sw_flow_key *key, const struct nlattr *attr,
928 			    const struct nlattr *actions, int actions_len,
929 			    uint32_t cutlen)
930 {
931 	struct dp_upcall_info upcall;
932 	const struct nlattr *a;
933 	int rem;
934 
935 	memset(&upcall, 0, sizeof(upcall));
936 	upcall.cmd = OVS_PACKET_CMD_ACTION;
937 	upcall.mru = OVS_CB(skb)->mru;
938 
939 	nla_for_each_nested(a, attr, rem) {
940 		switch (nla_type(a)) {
941 		case OVS_USERSPACE_ATTR_USERDATA:
942 			upcall.userdata = a;
943 			break;
944 
945 		case OVS_USERSPACE_ATTR_PID:
946 			if (dp->user_features &
947 			    OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
948 				upcall.portid =
949 				  ovs_dp_get_upcall_portid(dp,
950 							   smp_processor_id());
951 			else
952 				upcall.portid = nla_get_u32(a);
953 			break;
954 
955 		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
956 			/* Get out tunnel info. */
957 			struct vport *vport;
958 
959 			vport = ovs_vport_rcu(dp, nla_get_u32(a));
960 			if (vport) {
961 				int err;
962 
963 				err = dev_fill_metadata_dst(vport->dev, skb);
964 				if (!err)
965 					upcall.egress_tun_info = skb_tunnel_info(skb);
966 			}
967 
968 			break;
969 		}
970 
971 		case OVS_USERSPACE_ATTR_ACTIONS: {
972 			/* Include actions. */
973 			upcall.actions = actions;
974 			upcall.actions_len = actions_len;
975 			break;
976 		}
977 
978 		} /* End of switch. */
979 	}
980 
981 	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
982 }
983 
984 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
985 				     struct sw_flow_key *key,
986 				     const struct nlattr *attr)
987 {
988 	/* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
989 	struct nlattr *actions = nla_data(attr);
990 
991 	if (nla_len(actions))
992 		return clone_execute(dp, skb, key, 0, nla_data(actions),
993 				     nla_len(actions), true, false);
994 
995 	ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
996 	return 0;
997 }
998 
999 /* When 'last' is true, sample() should always consume the 'skb'.
1000  * Otherwise, sample() should keep 'skb' intact regardless what
1001  * actions are executed within sample().
1002  */
1003 static int sample(struct datapath *dp, struct sk_buff *skb,
1004 		  struct sw_flow_key *key, const struct nlattr *attr,
1005 		  bool last)
1006 {
1007 	struct nlattr *actions;
1008 	struct nlattr *sample_arg;
1009 	int rem = nla_len(attr);
1010 	const struct sample_arg *arg;
1011 	u32 init_probability;
1012 	bool clone_flow_key;
1013 	int err;
1014 
1015 	/* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1016 	sample_arg = nla_data(attr);
1017 	arg = nla_data(sample_arg);
1018 	actions = nla_next(sample_arg, &rem);
1019 	init_probability = OVS_CB(skb)->probability;
1020 
1021 	if ((arg->probability != U32_MAX) &&
1022 	    (!arg->probability || get_random_u32() > arg->probability)) {
1023 		if (last)
1024 			ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1025 		return 0;
1026 	}
1027 
1028 	OVS_CB(skb)->probability = arg->probability;
1029 
1030 	clone_flow_key = !arg->exec;
1031 	err = clone_execute(dp, skb, key, 0, actions, rem, last,
1032 			    clone_flow_key);
1033 
1034 	if (!last)
1035 		OVS_CB(skb)->probability = init_probability;
1036 
1037 	return err;
1038 }
1039 
1040 /* When 'last' is true, clone() should always consume the 'skb'.
1041  * Otherwise, clone() should keep 'skb' intact regardless what
1042  * actions are executed within clone().
1043  */
1044 static int clone(struct datapath *dp, struct sk_buff *skb,
1045 		 struct sw_flow_key *key, const struct nlattr *attr,
1046 		 bool last)
1047 {
1048 	struct nlattr *actions;
1049 	struct nlattr *clone_arg;
1050 	int rem = nla_len(attr);
1051 	bool dont_clone_flow_key;
1052 
1053 	/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1054 	clone_arg = nla_data(attr);
1055 	dont_clone_flow_key = nla_get_u32(clone_arg);
1056 	actions = nla_next(clone_arg, &rem);
1057 
1058 	return clone_execute(dp, skb, key, 0, actions, rem, last,
1059 			     !dont_clone_flow_key);
1060 }
1061 
1062 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1063 			 const struct nlattr *attr)
1064 {
1065 	struct ovs_action_hash *hash_act = nla_data(attr);
1066 	u32 hash = 0;
1067 
1068 	if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1069 		/* OVS_HASH_ALG_L4 hasing type. */
1070 		hash = skb_get_hash(skb);
1071 	} else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1072 		/* OVS_HASH_ALG_SYM_L4 hashing type.  NOTE: this doesn't
1073 		 * extend past an encapsulated header.
1074 		 */
1075 		hash = __skb_get_hash_symmetric(skb);
1076 	}
1077 
1078 	hash = jhash_1word(hash, hash_act->hash_basis);
1079 	if (!hash)
1080 		hash = 0x1;
1081 
1082 	key->ovs_flow_hash = hash;
1083 }
1084 
1085 static int execute_set_action(struct sk_buff *skb,
1086 			      struct sw_flow_key *flow_key,
1087 			      const struct nlattr *a)
1088 {
1089 	/* Only tunnel set execution is supported without a mask. */
1090 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1091 		struct ovs_tunnel_info *tun = nla_data(a);
1092 
1093 		skb_dst_drop(skb);
1094 		dst_hold((struct dst_entry *)tun->tun_dst);
1095 		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1096 		return 0;
1097 	}
1098 
1099 	return -EINVAL;
1100 }
1101 
1102 /* Mask is at the midpoint of the data. */
1103 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1104 
1105 static int execute_masked_set_action(struct sk_buff *skb,
1106 				     struct sw_flow_key *flow_key,
1107 				     const struct nlattr *a)
1108 {
1109 	int err = 0;
1110 
1111 	switch (nla_type(a)) {
1112 	case OVS_KEY_ATTR_PRIORITY:
1113 		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1114 			       *get_mask(a, u32 *));
1115 		flow_key->phy.priority = skb->priority;
1116 		break;
1117 
1118 	case OVS_KEY_ATTR_SKB_MARK:
1119 		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1120 		flow_key->phy.skb_mark = skb->mark;
1121 		break;
1122 
1123 	case OVS_KEY_ATTR_TUNNEL_INFO:
1124 		/* Masked data not supported for tunnel. */
1125 		err = -EINVAL;
1126 		break;
1127 
1128 	case OVS_KEY_ATTR_ETHERNET:
1129 		err = set_eth_addr(skb, flow_key, nla_data(a),
1130 				   get_mask(a, struct ovs_key_ethernet *));
1131 		break;
1132 
1133 	case OVS_KEY_ATTR_NSH:
1134 		err = set_nsh(skb, flow_key, a);
1135 		break;
1136 
1137 	case OVS_KEY_ATTR_IPV4:
1138 		err = set_ipv4(skb, flow_key, nla_data(a),
1139 			       get_mask(a, struct ovs_key_ipv4 *));
1140 		break;
1141 
1142 	case OVS_KEY_ATTR_IPV6:
1143 		err = set_ipv6(skb, flow_key, nla_data(a),
1144 			       get_mask(a, struct ovs_key_ipv6 *));
1145 		break;
1146 
1147 	case OVS_KEY_ATTR_TCP:
1148 		err = set_tcp(skb, flow_key, nla_data(a),
1149 			      get_mask(a, struct ovs_key_tcp *));
1150 		break;
1151 
1152 	case OVS_KEY_ATTR_UDP:
1153 		err = set_udp(skb, flow_key, nla_data(a),
1154 			      get_mask(a, struct ovs_key_udp *));
1155 		break;
1156 
1157 	case OVS_KEY_ATTR_SCTP:
1158 		err = set_sctp(skb, flow_key, nla_data(a),
1159 			       get_mask(a, struct ovs_key_sctp *));
1160 		break;
1161 
1162 	case OVS_KEY_ATTR_MPLS:
1163 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1164 								    __be32 *));
1165 		break;
1166 
1167 	case OVS_KEY_ATTR_CT_STATE:
1168 	case OVS_KEY_ATTR_CT_ZONE:
1169 	case OVS_KEY_ATTR_CT_MARK:
1170 	case OVS_KEY_ATTR_CT_LABELS:
1171 	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1172 	case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1173 		err = -EINVAL;
1174 		break;
1175 	}
1176 
1177 	return err;
1178 }
1179 
1180 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1181 			  struct sw_flow_key *key,
1182 			  const struct nlattr *a, bool last)
1183 {
1184 	u32 recirc_id;
1185 
1186 	if (!is_flow_key_valid(key)) {
1187 		int err;
1188 
1189 		err = ovs_flow_key_update(skb, key);
1190 		if (err)
1191 			return err;
1192 	}
1193 	BUG_ON(!is_flow_key_valid(key));
1194 
1195 	recirc_id = nla_get_u32(a);
1196 	return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1197 }
1198 
1199 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1200 				 struct sw_flow_key *key,
1201 				 const struct nlattr *attr, bool last)
1202 {
1203 	struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1204 	const struct nlattr *actions, *cpl_arg;
1205 	int len, max_len, rem = nla_len(attr);
1206 	const struct check_pkt_len_arg *arg;
1207 	bool clone_flow_key;
1208 
1209 	/* The first netlink attribute in 'attr' is always
1210 	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1211 	 */
1212 	cpl_arg = nla_data(attr);
1213 	arg = nla_data(cpl_arg);
1214 
1215 	len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1216 	max_len = arg->pkt_len;
1217 
1218 	if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1219 	    len <= max_len) {
1220 		/* Second netlink attribute in 'attr' is always
1221 		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1222 		 */
1223 		actions = nla_next(cpl_arg, &rem);
1224 		clone_flow_key = !arg->exec_for_lesser_equal;
1225 	} else {
1226 		/* Third netlink attribute in 'attr' is always
1227 		 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1228 		 */
1229 		actions = nla_next(cpl_arg, &rem);
1230 		actions = nla_next(actions, &rem);
1231 		clone_flow_key = !arg->exec_for_greater;
1232 	}
1233 
1234 	return clone_execute(dp, skb, key, 0, nla_data(actions),
1235 			     nla_len(actions), last, clone_flow_key);
1236 }
1237 
1238 static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1239 {
1240 	int err;
1241 
1242 	if (skb->protocol == htons(ETH_P_IPV6)) {
1243 		struct ipv6hdr *nh;
1244 
1245 		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1246 					  sizeof(*nh));
1247 		if (unlikely(err))
1248 			return err;
1249 
1250 		nh = ipv6_hdr(skb);
1251 
1252 		if (nh->hop_limit <= 1)
1253 			return -EHOSTUNREACH;
1254 
1255 		key->ip.ttl = --nh->hop_limit;
1256 	} else if (skb->protocol == htons(ETH_P_IP)) {
1257 		struct iphdr *nh;
1258 		u8 old_ttl;
1259 
1260 		err = skb_ensure_writable(skb, skb_network_offset(skb) +
1261 					  sizeof(*nh));
1262 		if (unlikely(err))
1263 			return err;
1264 
1265 		nh = ip_hdr(skb);
1266 		if (nh->ttl <= 1)
1267 			return -EHOSTUNREACH;
1268 
1269 		old_ttl = nh->ttl--;
1270 		csum_replace2(&nh->check, htons(old_ttl << 8),
1271 			      htons(nh->ttl << 8));
1272 		key->ip.ttl = nh->ttl;
1273 	}
1274 	return 0;
1275 }
1276 
1277 #if IS_ENABLED(CONFIG_PSAMPLE)
1278 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1279 			    const struct nlattr *attr)
1280 {
1281 	struct psample_group psample_group = {};
1282 	struct psample_metadata md = {};
1283 	const struct nlattr *a;
1284 	u32 rate;
1285 	int rem;
1286 
1287 	nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1288 		switch (nla_type(a)) {
1289 		case OVS_PSAMPLE_ATTR_GROUP:
1290 			psample_group.group_num = nla_get_u32(a);
1291 			break;
1292 
1293 		case OVS_PSAMPLE_ATTR_COOKIE:
1294 			md.user_cookie = nla_data(a);
1295 			md.user_cookie_len = nla_len(a);
1296 			break;
1297 		}
1298 	}
1299 
1300 	psample_group.net = ovs_dp_get_net(dp);
1301 	md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1302 	md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1303 	md.rate_as_probability = 1;
1304 
1305 	rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1306 
1307 	psample_sample_packet(&psample_group, skb, rate, &md);
1308 }
1309 #else
1310 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1311 			    const struct nlattr *attr)
1312 {}
1313 #endif
1314 
1315 /* Execute a list of actions against 'skb'. */
1316 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1317 			      struct sw_flow_key *key,
1318 			      const struct nlattr *attr, int len)
1319 {
1320 	const struct nlattr *a;
1321 	int rem;
1322 
1323 	for (a = attr, rem = len; rem > 0;
1324 	     a = nla_next(a, &rem)) {
1325 		int err = 0;
1326 
1327 		if (trace_ovs_do_execute_action_enabled())
1328 			trace_ovs_do_execute_action(dp, skb, key, a, rem);
1329 
1330 		/* Actions that rightfully have to consume the skb should do it
1331 		 * and return directly.
1332 		 */
1333 		switch (nla_type(a)) {
1334 		case OVS_ACTION_ATTR_OUTPUT: {
1335 			int port = nla_get_u32(a);
1336 			struct sk_buff *clone;
1337 
1338 			/* Every output action needs a separate clone
1339 			 * of 'skb', In case the output action is the
1340 			 * last action, cloning can be avoided.
1341 			 */
1342 			if (nla_is_last(a, rem)) {
1343 				do_output(dp, skb, port, key);
1344 				/* 'skb' has been used for output.
1345 				 */
1346 				return 0;
1347 			}
1348 
1349 			clone = skb_clone(skb, GFP_ATOMIC);
1350 			if (clone)
1351 				do_output(dp, clone, port, key);
1352 			OVS_CB(skb)->cutlen = 0;
1353 			break;
1354 		}
1355 
1356 		case OVS_ACTION_ATTR_TRUNC: {
1357 			struct ovs_action_trunc *trunc = nla_data(a);
1358 
1359 			if (skb->len > trunc->max_len)
1360 				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1361 			break;
1362 		}
1363 
1364 		case OVS_ACTION_ATTR_USERSPACE:
1365 			output_userspace(dp, skb, key, a, attr,
1366 						     len, OVS_CB(skb)->cutlen);
1367 			OVS_CB(skb)->cutlen = 0;
1368 			if (nla_is_last(a, rem)) {
1369 				consume_skb(skb);
1370 				return 0;
1371 			}
1372 			break;
1373 
1374 		case OVS_ACTION_ATTR_HASH:
1375 			execute_hash(skb, key, a);
1376 			break;
1377 
1378 		case OVS_ACTION_ATTR_PUSH_MPLS: {
1379 			struct ovs_action_push_mpls *mpls = nla_data(a);
1380 
1381 			err = push_mpls(skb, key, mpls->mpls_lse,
1382 					mpls->mpls_ethertype, skb->mac_len);
1383 			break;
1384 		}
1385 		case OVS_ACTION_ATTR_ADD_MPLS: {
1386 			struct ovs_action_add_mpls *mpls = nla_data(a);
1387 			__u16 mac_len = 0;
1388 
1389 			if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1390 				mac_len = skb->mac_len;
1391 
1392 			err = push_mpls(skb, key, mpls->mpls_lse,
1393 					mpls->mpls_ethertype, mac_len);
1394 			break;
1395 		}
1396 		case OVS_ACTION_ATTR_POP_MPLS:
1397 			err = pop_mpls(skb, key, nla_get_be16(a));
1398 			break;
1399 
1400 		case OVS_ACTION_ATTR_PUSH_VLAN:
1401 			err = push_vlan(skb, key, nla_data(a));
1402 			break;
1403 
1404 		case OVS_ACTION_ATTR_POP_VLAN:
1405 			err = pop_vlan(skb, key);
1406 			break;
1407 
1408 		case OVS_ACTION_ATTR_RECIRC: {
1409 			bool last = nla_is_last(a, rem);
1410 
1411 			err = execute_recirc(dp, skb, key, a, last);
1412 			if (last) {
1413 				/* If this is the last action, the skb has
1414 				 * been consumed or freed.
1415 				 * Return immediately.
1416 				 */
1417 				return err;
1418 			}
1419 			break;
1420 		}
1421 
1422 		case OVS_ACTION_ATTR_SET:
1423 			err = execute_set_action(skb, key, nla_data(a));
1424 			break;
1425 
1426 		case OVS_ACTION_ATTR_SET_MASKED:
1427 		case OVS_ACTION_ATTR_SET_TO_MASKED:
1428 			err = execute_masked_set_action(skb, key, nla_data(a));
1429 			break;
1430 
1431 		case OVS_ACTION_ATTR_SAMPLE: {
1432 			bool last = nla_is_last(a, rem);
1433 
1434 			err = sample(dp, skb, key, a, last);
1435 			if (last)
1436 				return err;
1437 
1438 			break;
1439 		}
1440 
1441 		case OVS_ACTION_ATTR_CT:
1442 			if (!is_flow_key_valid(key)) {
1443 				err = ovs_flow_key_update(skb, key);
1444 				if (err)
1445 					return err;
1446 			}
1447 
1448 			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1449 					     nla_data(a));
1450 
1451 			/* Hide stolen IP fragments from user space. */
1452 			if (err)
1453 				return err == -EINPROGRESS ? 0 : err;
1454 			break;
1455 
1456 		case OVS_ACTION_ATTR_CT_CLEAR:
1457 			err = ovs_ct_clear(skb, key);
1458 			break;
1459 
1460 		case OVS_ACTION_ATTR_PUSH_ETH:
1461 			err = push_eth(skb, key, nla_data(a));
1462 			break;
1463 
1464 		case OVS_ACTION_ATTR_POP_ETH:
1465 			err = pop_eth(skb, key);
1466 			break;
1467 
1468 		case OVS_ACTION_ATTR_PUSH_NSH:
1469 			err = push_nsh(skb, key, nla_data(a));
1470 			break;
1471 
1472 		case OVS_ACTION_ATTR_POP_NSH:
1473 			err = pop_nsh(skb, key);
1474 			break;
1475 
1476 		case OVS_ACTION_ATTR_METER:
1477 			if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1478 				ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1479 				return 0;
1480 			}
1481 			break;
1482 
1483 		case OVS_ACTION_ATTR_CLONE: {
1484 			bool last = nla_is_last(a, rem);
1485 
1486 			err = clone(dp, skb, key, a, last);
1487 			if (last)
1488 				return err;
1489 
1490 			break;
1491 		}
1492 
1493 		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1494 			bool last = nla_is_last(a, rem);
1495 
1496 			err = execute_check_pkt_len(dp, skb, key, a, last);
1497 			if (last)
1498 				return err;
1499 
1500 			break;
1501 		}
1502 
1503 		case OVS_ACTION_ATTR_DEC_TTL:
1504 			err = execute_dec_ttl(skb, key);
1505 			if (err == -EHOSTUNREACH)
1506 				return dec_ttl_exception_handler(dp, skb,
1507 								 key, a);
1508 			break;
1509 
1510 		case OVS_ACTION_ATTR_DROP: {
1511 			enum ovs_drop_reason reason = nla_get_u32(a)
1512 				? OVS_DROP_EXPLICIT_WITH_ERROR
1513 				: OVS_DROP_EXPLICIT;
1514 
1515 			ovs_kfree_skb_reason(skb, reason);
1516 			return 0;
1517 		}
1518 
1519 		case OVS_ACTION_ATTR_PSAMPLE:
1520 			execute_psample(dp, skb, a);
1521 			OVS_CB(skb)->cutlen = 0;
1522 			if (nla_is_last(a, rem)) {
1523 				consume_skb(skb);
1524 				return 0;
1525 			}
1526 			break;
1527 		}
1528 
1529 		if (unlikely(err)) {
1530 			ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1531 			return err;
1532 		}
1533 	}
1534 
1535 	ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1536 	return 0;
1537 }
1538 
1539 /* Execute the actions on the clone of the packet. The effect of the
1540  * execution does not affect the original 'skb' nor the original 'key'.
1541  *
1542  * The execution may be deferred in case the actions can not be executed
1543  * immediately.
1544  */
1545 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1546 			 struct sw_flow_key *key, u32 recirc_id,
1547 			 const struct nlattr *actions, int len,
1548 			 bool last, bool clone_flow_key)
1549 {
1550 	struct deferred_action *da;
1551 	struct sw_flow_key *clone;
1552 
1553 	skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1554 	if (!skb) {
1555 		/* Out of memory, skip this action.
1556 		 */
1557 		return 0;
1558 	}
1559 
1560 	/* When clone_flow_key is false, the 'key' will not be change
1561 	 * by the actions, then the 'key' can be used directly.
1562 	 * Otherwise, try to clone key from the next recursion level of
1563 	 * 'flow_keys'. If clone is successful, execute the actions
1564 	 * without deferring.
1565 	 */
1566 	clone = clone_flow_key ? clone_key(key) : key;
1567 	if (clone) {
1568 		int err = 0;
1569 
1570 		if (actions) { /* Sample action */
1571 			if (clone_flow_key)
1572 				__this_cpu_inc(ovs_pcpu_storage.exec_level);
1573 
1574 			err = do_execute_actions(dp, skb, clone,
1575 						 actions, len);
1576 
1577 			if (clone_flow_key)
1578 				__this_cpu_dec(ovs_pcpu_storage.exec_level);
1579 		} else { /* Recirc action */
1580 			clone->recirc_id = recirc_id;
1581 			ovs_dp_process_packet(skb, clone);
1582 		}
1583 		return err;
1584 	}
1585 
1586 	/* Out of 'flow_keys' space. Defer actions */
1587 	da = add_deferred_actions(skb, key, actions, len);
1588 	if (da) {
1589 		if (!actions) { /* Recirc action */
1590 			key = &da->pkt_key;
1591 			key->recirc_id = recirc_id;
1592 		}
1593 	} else {
1594 		/* Out of per CPU action FIFO space. Drop the 'skb' and
1595 		 * log an error.
1596 		 */
1597 		ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1598 
1599 		if (net_ratelimit()) {
1600 			if (actions) { /* Sample action */
1601 				pr_warn("%s: deferred action limit reached, drop sample action\n",
1602 					ovs_dp_name(dp));
1603 			} else {  /* Recirc action */
1604 				pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1605 					ovs_dp_name(dp), recirc_id);
1606 			}
1607 		}
1608 	}
1609 	return 0;
1610 }
1611 
1612 static void process_deferred_actions(struct datapath *dp)
1613 {
1614 	struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos);
1615 
1616 	/* Do not touch the FIFO in case there is no deferred actions. */
1617 	if (action_fifo_is_empty(fifo))
1618 		return;
1619 
1620 	/* Finishing executing all deferred actions. */
1621 	do {
1622 		struct deferred_action *da = action_fifo_get(fifo);
1623 		struct sk_buff *skb = da->skb;
1624 		struct sw_flow_key *key = &da->pkt_key;
1625 		const struct nlattr *actions = da->actions;
1626 		int actions_len = da->actions_len;
1627 
1628 		if (actions)
1629 			do_execute_actions(dp, skb, key, actions, actions_len);
1630 		else
1631 			ovs_dp_process_packet(skb, key);
1632 	} while (!action_fifo_is_empty(fifo));
1633 
1634 	/* Reset FIFO for the next packet.  */
1635 	action_fifo_init(fifo);
1636 }
1637 
1638 /* Execute a list of actions against 'skb'. */
1639 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1640 			const struct sw_flow_actions *acts,
1641 			struct sw_flow_key *key)
1642 {
1643 	int err, level;
1644 
1645 	level = __this_cpu_inc_return(ovs_pcpu_storage.exec_level);
1646 	if (unlikely(level > OVS_RECURSION_LIMIT)) {
1647 		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1648 				     ovs_dp_name(dp));
1649 		ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1650 		err = -ENETDOWN;
1651 		goto out;
1652 	}
1653 
1654 	OVS_CB(skb)->acts_origlen = acts->orig_len;
1655 	err = do_execute_actions(dp, skb, key,
1656 				 acts->actions, acts->actions_len);
1657 
1658 	if (level == 1)
1659 		process_deferred_actions(dp);
1660 
1661 out:
1662 	__this_cpu_dec(ovs_pcpu_storage.exec_level);
1663 	return err;
1664 }
1665