xref: /linux/net/openvswitch/actions.c (revision 9ee0034b8f49aaaa7e7c2da8db1038915db99c19)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
32 
33 #include <net/dst.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/mpls.h>
40 #include <net/sctp/checksum.h>
41 
42 #include "datapath.h"
43 #include "flow.h"
44 #include "conntrack.h"
45 #include "vport.h"
46 
47 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 			      struct sw_flow_key *key,
49 			      const struct nlattr *attr, int len);
50 
51 struct deferred_action {
52 	struct sk_buff *skb;
53 	const struct nlattr *actions;
54 
55 	/* Store pkt_key clone when creating deferred action. */
56 	struct sw_flow_key pkt_key;
57 };
58 
59 #define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data {
61 	unsigned long dst;
62 	struct vport *vport;
63 	struct ovs_skb_cb cb;
64 	__be16 inner_protocol;
65 	__u16 vlan_tci;
66 	__be16 vlan_proto;
67 	unsigned int l2_len;
68 	u8 l2_data[MAX_L2_LEN];
69 };
70 
71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72 
73 #define DEFERRED_ACTION_FIFO_SIZE 10
74 struct action_fifo {
75 	int head;
76 	int tail;
77 	/* Deferred action fifo queue storage. */
78 	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
79 };
80 
81 static struct action_fifo __percpu *action_fifos;
82 static DEFINE_PER_CPU(int, exec_actions_level);
83 
84 static void action_fifo_init(struct action_fifo *fifo)
85 {
86 	fifo->head = 0;
87 	fifo->tail = 0;
88 }
89 
90 static bool action_fifo_is_empty(const struct action_fifo *fifo)
91 {
92 	return (fifo->head == fifo->tail);
93 }
94 
95 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
96 {
97 	if (action_fifo_is_empty(fifo))
98 		return NULL;
99 
100 	return &fifo->fifo[fifo->tail++];
101 }
102 
103 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
104 {
105 	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 		return NULL;
107 
108 	return &fifo->fifo[fifo->head++];
109 }
110 
111 /* Return true if fifo is not full */
112 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
113 						    const struct sw_flow_key *key,
114 						    const struct nlattr *attr)
115 {
116 	struct action_fifo *fifo;
117 	struct deferred_action *da;
118 
119 	fifo = this_cpu_ptr(action_fifos);
120 	da = action_fifo_put(fifo);
121 	if (da) {
122 		da->skb = skb;
123 		da->actions = attr;
124 		da->pkt_key = *key;
125 	}
126 
127 	return da;
128 }
129 
130 static void invalidate_flow_key(struct sw_flow_key *key)
131 {
132 	key->eth.type = htons(0);
133 }
134 
135 static bool is_flow_key_valid(const struct sw_flow_key *key)
136 {
137 	return !!key->eth.type;
138 }
139 
140 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 			     __be16 ethertype)
142 {
143 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 		__be16 diff[] = { ~(hdr->h_proto), ethertype };
145 
146 		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 					~skb->csum);
148 	}
149 
150 	hdr->h_proto = ethertype;
151 }
152 
153 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
154 		     const struct ovs_action_push_mpls *mpls)
155 {
156 	__be32 *new_mpls_lse;
157 
158 	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
159 	if (skb->encapsulation)
160 		return -ENOTSUPP;
161 
162 	if (skb_cow_head(skb, MPLS_HLEN) < 0)
163 		return -ENOMEM;
164 
165 	if (!skb->inner_protocol) {
166 		skb_set_inner_network_header(skb, skb->mac_len);
167 		skb_set_inner_protocol(skb, skb->protocol);
168 	}
169 
170 	skb_push(skb, MPLS_HLEN);
171 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
172 		skb->mac_len);
173 	skb_reset_mac_header(skb);
174 	skb_set_network_header(skb, skb->mac_len);
175 
176 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
177 	*new_mpls_lse = mpls->mpls_lse;
178 
179 	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
180 
181 	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
182 	skb->protocol = mpls->mpls_ethertype;
183 
184 	invalidate_flow_key(key);
185 	return 0;
186 }
187 
188 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
189 		    const __be16 ethertype)
190 {
191 	struct ethhdr *hdr;
192 	int err;
193 
194 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
195 	if (unlikely(err))
196 		return err;
197 
198 	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
199 
200 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
201 		skb->mac_len);
202 
203 	__skb_pull(skb, MPLS_HLEN);
204 	skb_reset_mac_header(skb);
205 	skb_set_network_header(skb, skb->mac_len);
206 
207 	/* skb_mpls_header() is used to locate the ethertype
208 	 * field correctly in the presence of VLAN tags.
209 	 */
210 	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
211 	update_ethertype(skb, hdr, ethertype);
212 	if (eth_p_mpls(skb->protocol))
213 		skb->protocol = ethertype;
214 
215 	invalidate_flow_key(key);
216 	return 0;
217 }
218 
219 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
220 		    const __be32 *mpls_lse, const __be32 *mask)
221 {
222 	__be32 *stack;
223 	__be32 lse;
224 	int err;
225 
226 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
227 	if (unlikely(err))
228 		return err;
229 
230 	stack = (__be32 *)skb_mpls_header(skb);
231 	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
232 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
233 		__be32 diff[] = { ~(*stack), lse };
234 
235 		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
236 					  ~skb->csum);
237 	}
238 
239 	*stack = lse;
240 	flow_key->mpls.top_lse = lse;
241 	return 0;
242 }
243 
244 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
245 {
246 	int err;
247 
248 	err = skb_vlan_pop(skb);
249 	if (skb_vlan_tag_present(skb)) {
250 		invalidate_flow_key(key);
251 	} else {
252 		key->eth.vlan.tci = 0;
253 		key->eth.vlan.tpid = 0;
254 	}
255 	return err;
256 }
257 
258 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
259 		     const struct ovs_action_push_vlan *vlan)
260 {
261 	if (skb_vlan_tag_present(skb)) {
262 		invalidate_flow_key(key);
263 	} else {
264 		key->eth.vlan.tci = vlan->vlan_tci;
265 		key->eth.vlan.tpid = vlan->vlan_tpid;
266 	}
267 	return skb_vlan_push(skb, vlan->vlan_tpid,
268 			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
269 }
270 
271 /* 'src' is already properly masked. */
272 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
273 {
274 	u16 *dst = (u16 *)dst_;
275 	const u16 *src = (const u16 *)src_;
276 	const u16 *mask = (const u16 *)mask_;
277 
278 	OVS_SET_MASKED(dst[0], src[0], mask[0]);
279 	OVS_SET_MASKED(dst[1], src[1], mask[1]);
280 	OVS_SET_MASKED(dst[2], src[2], mask[2]);
281 }
282 
283 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
284 			const struct ovs_key_ethernet *key,
285 			const struct ovs_key_ethernet *mask)
286 {
287 	int err;
288 
289 	err = skb_ensure_writable(skb, ETH_HLEN);
290 	if (unlikely(err))
291 		return err;
292 
293 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
294 
295 	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
296 			       mask->eth_src);
297 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
298 			       mask->eth_dst);
299 
300 	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
301 
302 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
303 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
304 	return 0;
305 }
306 
307 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
308 				  __be32 addr, __be32 new_addr)
309 {
310 	int transport_len = skb->len - skb_transport_offset(skb);
311 
312 	if (nh->frag_off & htons(IP_OFFSET))
313 		return;
314 
315 	if (nh->protocol == IPPROTO_TCP) {
316 		if (likely(transport_len >= sizeof(struct tcphdr)))
317 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
318 						 addr, new_addr, true);
319 	} else if (nh->protocol == IPPROTO_UDP) {
320 		if (likely(transport_len >= sizeof(struct udphdr))) {
321 			struct udphdr *uh = udp_hdr(skb);
322 
323 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
324 				inet_proto_csum_replace4(&uh->check, skb,
325 							 addr, new_addr, true);
326 				if (!uh->check)
327 					uh->check = CSUM_MANGLED_0;
328 			}
329 		}
330 	}
331 }
332 
333 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
334 			__be32 *addr, __be32 new_addr)
335 {
336 	update_ip_l4_checksum(skb, nh, *addr, new_addr);
337 	csum_replace4(&nh->check, *addr, new_addr);
338 	skb_clear_hash(skb);
339 	*addr = new_addr;
340 }
341 
342 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
343 				 __be32 addr[4], const __be32 new_addr[4])
344 {
345 	int transport_len = skb->len - skb_transport_offset(skb);
346 
347 	if (l4_proto == NEXTHDR_TCP) {
348 		if (likely(transport_len >= sizeof(struct tcphdr)))
349 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
350 						  addr, new_addr, true);
351 	} else if (l4_proto == NEXTHDR_UDP) {
352 		if (likely(transport_len >= sizeof(struct udphdr))) {
353 			struct udphdr *uh = udp_hdr(skb);
354 
355 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
356 				inet_proto_csum_replace16(&uh->check, skb,
357 							  addr, new_addr, true);
358 				if (!uh->check)
359 					uh->check = CSUM_MANGLED_0;
360 			}
361 		}
362 	} else if (l4_proto == NEXTHDR_ICMP) {
363 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
364 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
365 						  skb, addr, new_addr, true);
366 	}
367 }
368 
369 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
370 			   const __be32 mask[4], __be32 masked[4])
371 {
372 	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
373 	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
374 	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
375 	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
376 }
377 
378 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
379 			  __be32 addr[4], const __be32 new_addr[4],
380 			  bool recalculate_csum)
381 {
382 	if (recalculate_csum)
383 		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
384 
385 	skb_clear_hash(skb);
386 	memcpy(addr, new_addr, sizeof(__be32[4]));
387 }
388 
389 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
390 {
391 	/* Bits 21-24 are always unmasked, so this retains their values. */
392 	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
393 	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
394 	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
395 }
396 
397 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
398 		       u8 mask)
399 {
400 	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
401 
402 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
403 	nh->ttl = new_ttl;
404 }
405 
406 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
407 		    const struct ovs_key_ipv4 *key,
408 		    const struct ovs_key_ipv4 *mask)
409 {
410 	struct iphdr *nh;
411 	__be32 new_addr;
412 	int err;
413 
414 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
415 				  sizeof(struct iphdr));
416 	if (unlikely(err))
417 		return err;
418 
419 	nh = ip_hdr(skb);
420 
421 	/* Setting an IP addresses is typically only a side effect of
422 	 * matching on them in the current userspace implementation, so it
423 	 * makes sense to check if the value actually changed.
424 	 */
425 	if (mask->ipv4_src) {
426 		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
427 
428 		if (unlikely(new_addr != nh->saddr)) {
429 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
430 			flow_key->ipv4.addr.src = new_addr;
431 		}
432 	}
433 	if (mask->ipv4_dst) {
434 		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
435 
436 		if (unlikely(new_addr != nh->daddr)) {
437 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
438 			flow_key->ipv4.addr.dst = new_addr;
439 		}
440 	}
441 	if (mask->ipv4_tos) {
442 		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
443 		flow_key->ip.tos = nh->tos;
444 	}
445 	if (mask->ipv4_ttl) {
446 		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
447 		flow_key->ip.ttl = nh->ttl;
448 	}
449 
450 	return 0;
451 }
452 
453 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
454 {
455 	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
456 }
457 
458 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
459 		    const struct ovs_key_ipv6 *key,
460 		    const struct ovs_key_ipv6 *mask)
461 {
462 	struct ipv6hdr *nh;
463 	int err;
464 
465 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
466 				  sizeof(struct ipv6hdr));
467 	if (unlikely(err))
468 		return err;
469 
470 	nh = ipv6_hdr(skb);
471 
472 	/* Setting an IP addresses is typically only a side effect of
473 	 * matching on them in the current userspace implementation, so it
474 	 * makes sense to check if the value actually changed.
475 	 */
476 	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
477 		__be32 *saddr = (__be32 *)&nh->saddr;
478 		__be32 masked[4];
479 
480 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
481 
482 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
483 			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
484 				      true);
485 			memcpy(&flow_key->ipv6.addr.src, masked,
486 			       sizeof(flow_key->ipv6.addr.src));
487 		}
488 	}
489 	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
490 		unsigned int offset = 0;
491 		int flags = IP6_FH_F_SKIP_RH;
492 		bool recalc_csum = true;
493 		__be32 *daddr = (__be32 *)&nh->daddr;
494 		__be32 masked[4];
495 
496 		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
497 
498 		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
499 			if (ipv6_ext_hdr(nh->nexthdr))
500 				recalc_csum = (ipv6_find_hdr(skb, &offset,
501 							     NEXTHDR_ROUTING,
502 							     NULL, &flags)
503 					       != NEXTHDR_ROUTING);
504 
505 			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
506 				      recalc_csum);
507 			memcpy(&flow_key->ipv6.addr.dst, masked,
508 			       sizeof(flow_key->ipv6.addr.dst));
509 		}
510 	}
511 	if (mask->ipv6_tclass) {
512 		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
513 		flow_key->ip.tos = ipv6_get_dsfield(nh);
514 	}
515 	if (mask->ipv6_label) {
516 		set_ipv6_fl(nh, ntohl(key->ipv6_label),
517 			    ntohl(mask->ipv6_label));
518 		flow_key->ipv6.label =
519 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
520 	}
521 	if (mask->ipv6_hlimit) {
522 		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
523 			       mask->ipv6_hlimit);
524 		flow_key->ip.ttl = nh->hop_limit;
525 	}
526 	return 0;
527 }
528 
529 /* Must follow skb_ensure_writable() since that can move the skb data. */
530 static void set_tp_port(struct sk_buff *skb, __be16 *port,
531 			__be16 new_port, __sum16 *check)
532 {
533 	inet_proto_csum_replace2(check, skb, *port, new_port, false);
534 	*port = new_port;
535 }
536 
537 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
538 		   const struct ovs_key_udp *key,
539 		   const struct ovs_key_udp *mask)
540 {
541 	struct udphdr *uh;
542 	__be16 src, dst;
543 	int err;
544 
545 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
546 				  sizeof(struct udphdr));
547 	if (unlikely(err))
548 		return err;
549 
550 	uh = udp_hdr(skb);
551 	/* Either of the masks is non-zero, so do not bother checking them. */
552 	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
553 	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
554 
555 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
556 		if (likely(src != uh->source)) {
557 			set_tp_port(skb, &uh->source, src, &uh->check);
558 			flow_key->tp.src = src;
559 		}
560 		if (likely(dst != uh->dest)) {
561 			set_tp_port(skb, &uh->dest, dst, &uh->check);
562 			flow_key->tp.dst = dst;
563 		}
564 
565 		if (unlikely(!uh->check))
566 			uh->check = CSUM_MANGLED_0;
567 	} else {
568 		uh->source = src;
569 		uh->dest = dst;
570 		flow_key->tp.src = src;
571 		flow_key->tp.dst = dst;
572 	}
573 
574 	skb_clear_hash(skb);
575 
576 	return 0;
577 }
578 
579 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
580 		   const struct ovs_key_tcp *key,
581 		   const struct ovs_key_tcp *mask)
582 {
583 	struct tcphdr *th;
584 	__be16 src, dst;
585 	int err;
586 
587 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
588 				  sizeof(struct tcphdr));
589 	if (unlikely(err))
590 		return err;
591 
592 	th = tcp_hdr(skb);
593 	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
594 	if (likely(src != th->source)) {
595 		set_tp_port(skb, &th->source, src, &th->check);
596 		flow_key->tp.src = src;
597 	}
598 	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
599 	if (likely(dst != th->dest)) {
600 		set_tp_port(skb, &th->dest, dst, &th->check);
601 		flow_key->tp.dst = dst;
602 	}
603 	skb_clear_hash(skb);
604 
605 	return 0;
606 }
607 
608 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
609 		    const struct ovs_key_sctp *key,
610 		    const struct ovs_key_sctp *mask)
611 {
612 	unsigned int sctphoff = skb_transport_offset(skb);
613 	struct sctphdr *sh;
614 	__le32 old_correct_csum, new_csum, old_csum;
615 	int err;
616 
617 	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
618 	if (unlikely(err))
619 		return err;
620 
621 	sh = sctp_hdr(skb);
622 	old_csum = sh->checksum;
623 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
624 
625 	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
626 	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
627 
628 	new_csum = sctp_compute_cksum(skb, sctphoff);
629 
630 	/* Carry any checksum errors through. */
631 	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
632 
633 	skb_clear_hash(skb);
634 	flow_key->tp.src = sh->source;
635 	flow_key->tp.dst = sh->dest;
636 
637 	return 0;
638 }
639 
640 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
641 {
642 	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
643 	struct vport *vport = data->vport;
644 
645 	if (skb_cow_head(skb, data->l2_len) < 0) {
646 		kfree_skb(skb);
647 		return -ENOMEM;
648 	}
649 
650 	__skb_dst_copy(skb, data->dst);
651 	*OVS_CB(skb) = data->cb;
652 	skb->inner_protocol = data->inner_protocol;
653 	skb->vlan_tci = data->vlan_tci;
654 	skb->vlan_proto = data->vlan_proto;
655 
656 	/* Reconstruct the MAC header.  */
657 	skb_push(skb, data->l2_len);
658 	memcpy(skb->data, &data->l2_data, data->l2_len);
659 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
660 	skb_reset_mac_header(skb);
661 
662 	ovs_vport_send(vport, skb);
663 	return 0;
664 }
665 
666 static unsigned int
667 ovs_dst_get_mtu(const struct dst_entry *dst)
668 {
669 	return dst->dev->mtu;
670 }
671 
672 static struct dst_ops ovs_dst_ops = {
673 	.family = AF_UNSPEC,
674 	.mtu = ovs_dst_get_mtu,
675 };
676 
677 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
678  * ovs_vport_output(), which is called once per fragmented packet.
679  */
680 static void prepare_frag(struct vport *vport, struct sk_buff *skb)
681 {
682 	unsigned int hlen = skb_network_offset(skb);
683 	struct ovs_frag_data *data;
684 
685 	data = this_cpu_ptr(&ovs_frag_data_storage);
686 	data->dst = skb->_skb_refdst;
687 	data->vport = vport;
688 	data->cb = *OVS_CB(skb);
689 	data->inner_protocol = skb->inner_protocol;
690 	data->vlan_tci = skb->vlan_tci;
691 	data->vlan_proto = skb->vlan_proto;
692 	data->l2_len = hlen;
693 	memcpy(&data->l2_data, skb->data, hlen);
694 
695 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
696 	skb_pull(skb, hlen);
697 }
698 
699 static void ovs_fragment(struct net *net, struct vport *vport,
700 			 struct sk_buff *skb, u16 mru, __be16 ethertype)
701 {
702 	if (skb_network_offset(skb) > MAX_L2_LEN) {
703 		OVS_NLERR(1, "L2 header too long to fragment");
704 		goto err;
705 	}
706 
707 	if (ethertype == htons(ETH_P_IP)) {
708 		struct dst_entry ovs_dst;
709 		unsigned long orig_dst;
710 
711 		prepare_frag(vport, skb);
712 		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
713 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
714 		ovs_dst.dev = vport->dev;
715 
716 		orig_dst = skb->_skb_refdst;
717 		skb_dst_set_noref(skb, &ovs_dst);
718 		IPCB(skb)->frag_max_size = mru;
719 
720 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
721 		refdst_drop(orig_dst);
722 	} else if (ethertype == htons(ETH_P_IPV6)) {
723 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
724 		unsigned long orig_dst;
725 		struct rt6_info ovs_rt;
726 
727 		if (!v6ops) {
728 			goto err;
729 		}
730 
731 		prepare_frag(vport, skb);
732 		memset(&ovs_rt, 0, sizeof(ovs_rt));
733 		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
734 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
735 		ovs_rt.dst.dev = vport->dev;
736 
737 		orig_dst = skb->_skb_refdst;
738 		skb_dst_set_noref(skb, &ovs_rt.dst);
739 		IP6CB(skb)->frag_max_size = mru;
740 
741 		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
742 		refdst_drop(orig_dst);
743 	} else {
744 		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
745 			  ovs_vport_name(vport), ntohs(ethertype), mru,
746 			  vport->dev->mtu);
747 		goto err;
748 	}
749 
750 	return;
751 err:
752 	kfree_skb(skb);
753 }
754 
755 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
756 		      struct sw_flow_key *key)
757 {
758 	struct vport *vport = ovs_vport_rcu(dp, out_port);
759 
760 	if (likely(vport)) {
761 		u16 mru = OVS_CB(skb)->mru;
762 		u32 cutlen = OVS_CB(skb)->cutlen;
763 
764 		if (unlikely(cutlen > 0)) {
765 			if (skb->len - cutlen > ETH_HLEN)
766 				pskb_trim(skb, skb->len - cutlen);
767 			else
768 				pskb_trim(skb, ETH_HLEN);
769 		}
770 
771 		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
772 			ovs_vport_send(vport, skb);
773 		} else if (mru <= vport->dev->mtu) {
774 			struct net *net = read_pnet(&dp->net);
775 			__be16 ethertype = key->eth.type;
776 
777 			if (!is_flow_key_valid(key)) {
778 				if (eth_p_mpls(skb->protocol))
779 					ethertype = skb->inner_protocol;
780 				else
781 					ethertype = vlan_get_protocol(skb);
782 			}
783 
784 			ovs_fragment(net, vport, skb, mru, ethertype);
785 		} else {
786 			kfree_skb(skb);
787 		}
788 	} else {
789 		kfree_skb(skb);
790 	}
791 }
792 
793 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
794 			    struct sw_flow_key *key, const struct nlattr *attr,
795 			    const struct nlattr *actions, int actions_len,
796 			    uint32_t cutlen)
797 {
798 	struct dp_upcall_info upcall;
799 	const struct nlattr *a;
800 	int rem;
801 
802 	memset(&upcall, 0, sizeof(upcall));
803 	upcall.cmd = OVS_PACKET_CMD_ACTION;
804 	upcall.mru = OVS_CB(skb)->mru;
805 
806 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
807 		 a = nla_next(a, &rem)) {
808 		switch (nla_type(a)) {
809 		case OVS_USERSPACE_ATTR_USERDATA:
810 			upcall.userdata = a;
811 			break;
812 
813 		case OVS_USERSPACE_ATTR_PID:
814 			upcall.portid = nla_get_u32(a);
815 			break;
816 
817 		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
818 			/* Get out tunnel info. */
819 			struct vport *vport;
820 
821 			vport = ovs_vport_rcu(dp, nla_get_u32(a));
822 			if (vport) {
823 				int err;
824 
825 				err = dev_fill_metadata_dst(vport->dev, skb);
826 				if (!err)
827 					upcall.egress_tun_info = skb_tunnel_info(skb);
828 			}
829 
830 			break;
831 		}
832 
833 		case OVS_USERSPACE_ATTR_ACTIONS: {
834 			/* Include actions. */
835 			upcall.actions = actions;
836 			upcall.actions_len = actions_len;
837 			break;
838 		}
839 
840 		} /* End of switch. */
841 	}
842 
843 	return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
844 }
845 
846 static int sample(struct datapath *dp, struct sk_buff *skb,
847 		  struct sw_flow_key *key, const struct nlattr *attr,
848 		  const struct nlattr *actions, int actions_len)
849 {
850 	const struct nlattr *acts_list = NULL;
851 	const struct nlattr *a;
852 	int rem;
853 	u32 cutlen = 0;
854 
855 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
856 		 a = nla_next(a, &rem)) {
857 		u32 probability;
858 
859 		switch (nla_type(a)) {
860 		case OVS_SAMPLE_ATTR_PROBABILITY:
861 			probability = nla_get_u32(a);
862 			if (!probability || prandom_u32() > probability)
863 				return 0;
864 			break;
865 
866 		case OVS_SAMPLE_ATTR_ACTIONS:
867 			acts_list = a;
868 			break;
869 		}
870 	}
871 
872 	rem = nla_len(acts_list);
873 	a = nla_data(acts_list);
874 
875 	/* Actions list is empty, do nothing */
876 	if (unlikely(!rem))
877 		return 0;
878 
879 	/* The only known usage of sample action is having a single user-space
880 	 * action, or having a truncate action followed by a single user-space
881 	 * action. Treat this usage as a special case.
882 	 * The output_userspace() should clone the skb to be sent to the
883 	 * user space. This skb will be consumed by its caller.
884 	 */
885 	if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
886 		struct ovs_action_trunc *trunc = nla_data(a);
887 
888 		if (skb->len > trunc->max_len)
889 			cutlen = skb->len - trunc->max_len;
890 
891 		a = nla_next(a, &rem);
892 	}
893 
894 	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
895 		   nla_is_last(a, rem)))
896 		return output_userspace(dp, skb, key, a, actions,
897 					actions_len, cutlen);
898 
899 	skb = skb_clone(skb, GFP_ATOMIC);
900 	if (!skb)
901 		/* Skip the sample action when out of memory. */
902 		return 0;
903 
904 	if (!add_deferred_actions(skb, key, a)) {
905 		if (net_ratelimit())
906 			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
907 				ovs_dp_name(dp));
908 
909 		kfree_skb(skb);
910 	}
911 	return 0;
912 }
913 
914 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
915 			 const struct nlattr *attr)
916 {
917 	struct ovs_action_hash *hash_act = nla_data(attr);
918 	u32 hash = 0;
919 
920 	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
921 	hash = skb_get_hash(skb);
922 	hash = jhash_1word(hash, hash_act->hash_basis);
923 	if (!hash)
924 		hash = 0x1;
925 
926 	key->ovs_flow_hash = hash;
927 }
928 
929 static int execute_set_action(struct sk_buff *skb,
930 			      struct sw_flow_key *flow_key,
931 			      const struct nlattr *a)
932 {
933 	/* Only tunnel set execution is supported without a mask. */
934 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
935 		struct ovs_tunnel_info *tun = nla_data(a);
936 
937 		skb_dst_drop(skb);
938 		dst_hold((struct dst_entry *)tun->tun_dst);
939 		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
940 		return 0;
941 	}
942 
943 	return -EINVAL;
944 }
945 
946 /* Mask is at the midpoint of the data. */
947 #define get_mask(a, type) ((const type)nla_data(a) + 1)
948 
949 static int execute_masked_set_action(struct sk_buff *skb,
950 				     struct sw_flow_key *flow_key,
951 				     const struct nlattr *a)
952 {
953 	int err = 0;
954 
955 	switch (nla_type(a)) {
956 	case OVS_KEY_ATTR_PRIORITY:
957 		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
958 			       *get_mask(a, u32 *));
959 		flow_key->phy.priority = skb->priority;
960 		break;
961 
962 	case OVS_KEY_ATTR_SKB_MARK:
963 		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
964 		flow_key->phy.skb_mark = skb->mark;
965 		break;
966 
967 	case OVS_KEY_ATTR_TUNNEL_INFO:
968 		/* Masked data not supported for tunnel. */
969 		err = -EINVAL;
970 		break;
971 
972 	case OVS_KEY_ATTR_ETHERNET:
973 		err = set_eth_addr(skb, flow_key, nla_data(a),
974 				   get_mask(a, struct ovs_key_ethernet *));
975 		break;
976 
977 	case OVS_KEY_ATTR_IPV4:
978 		err = set_ipv4(skb, flow_key, nla_data(a),
979 			       get_mask(a, struct ovs_key_ipv4 *));
980 		break;
981 
982 	case OVS_KEY_ATTR_IPV6:
983 		err = set_ipv6(skb, flow_key, nla_data(a),
984 			       get_mask(a, struct ovs_key_ipv6 *));
985 		break;
986 
987 	case OVS_KEY_ATTR_TCP:
988 		err = set_tcp(skb, flow_key, nla_data(a),
989 			      get_mask(a, struct ovs_key_tcp *));
990 		break;
991 
992 	case OVS_KEY_ATTR_UDP:
993 		err = set_udp(skb, flow_key, nla_data(a),
994 			      get_mask(a, struct ovs_key_udp *));
995 		break;
996 
997 	case OVS_KEY_ATTR_SCTP:
998 		err = set_sctp(skb, flow_key, nla_data(a),
999 			       get_mask(a, struct ovs_key_sctp *));
1000 		break;
1001 
1002 	case OVS_KEY_ATTR_MPLS:
1003 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1004 								    __be32 *));
1005 		break;
1006 
1007 	case OVS_KEY_ATTR_CT_STATE:
1008 	case OVS_KEY_ATTR_CT_ZONE:
1009 	case OVS_KEY_ATTR_CT_MARK:
1010 	case OVS_KEY_ATTR_CT_LABELS:
1011 		err = -EINVAL;
1012 		break;
1013 	}
1014 
1015 	return err;
1016 }
1017 
1018 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1019 			  struct sw_flow_key *key,
1020 			  const struct nlattr *a, int rem)
1021 {
1022 	struct deferred_action *da;
1023 
1024 	if (!is_flow_key_valid(key)) {
1025 		int err;
1026 
1027 		err = ovs_flow_key_update(skb, key);
1028 		if (err)
1029 			return err;
1030 	}
1031 	BUG_ON(!is_flow_key_valid(key));
1032 
1033 	if (!nla_is_last(a, rem)) {
1034 		/* Recirc action is the not the last action
1035 		 * of the action list, need to clone the skb.
1036 		 */
1037 		skb = skb_clone(skb, GFP_ATOMIC);
1038 
1039 		/* Skip the recirc action when out of memory, but
1040 		 * continue on with the rest of the action list.
1041 		 */
1042 		if (!skb)
1043 			return 0;
1044 	}
1045 
1046 	da = add_deferred_actions(skb, key, NULL);
1047 	if (da) {
1048 		da->pkt_key.recirc_id = nla_get_u32(a);
1049 	} else {
1050 		kfree_skb(skb);
1051 
1052 		if (net_ratelimit())
1053 			pr_warn("%s: deferred action limit reached, drop recirc action\n",
1054 				ovs_dp_name(dp));
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 /* Execute a list of actions against 'skb'. */
1061 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1062 			      struct sw_flow_key *key,
1063 			      const struct nlattr *attr, int len)
1064 {
1065 	/* Every output action needs a separate clone of 'skb', but the common
1066 	 * case is just a single output action, so that doing a clone and
1067 	 * then freeing the original skbuff is wasteful.  So the following code
1068 	 * is slightly obscure just to avoid that.
1069 	 */
1070 	int prev_port = -1;
1071 	const struct nlattr *a;
1072 	int rem;
1073 
1074 	for (a = attr, rem = len; rem > 0;
1075 	     a = nla_next(a, &rem)) {
1076 		int err = 0;
1077 
1078 		if (unlikely(prev_port != -1)) {
1079 			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1080 
1081 			if (out_skb)
1082 				do_output(dp, out_skb, prev_port, key);
1083 
1084 			OVS_CB(skb)->cutlen = 0;
1085 			prev_port = -1;
1086 		}
1087 
1088 		switch (nla_type(a)) {
1089 		case OVS_ACTION_ATTR_OUTPUT:
1090 			prev_port = nla_get_u32(a);
1091 			break;
1092 
1093 		case OVS_ACTION_ATTR_TRUNC: {
1094 			struct ovs_action_trunc *trunc = nla_data(a);
1095 
1096 			if (skb->len > trunc->max_len)
1097 				OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1098 			break;
1099 		}
1100 
1101 		case OVS_ACTION_ATTR_USERSPACE:
1102 			output_userspace(dp, skb, key, a, attr,
1103 						     len, OVS_CB(skb)->cutlen);
1104 			OVS_CB(skb)->cutlen = 0;
1105 			break;
1106 
1107 		case OVS_ACTION_ATTR_HASH:
1108 			execute_hash(skb, key, a);
1109 			break;
1110 
1111 		case OVS_ACTION_ATTR_PUSH_MPLS:
1112 			err = push_mpls(skb, key, nla_data(a));
1113 			break;
1114 
1115 		case OVS_ACTION_ATTR_POP_MPLS:
1116 			err = pop_mpls(skb, key, nla_get_be16(a));
1117 			break;
1118 
1119 		case OVS_ACTION_ATTR_PUSH_VLAN:
1120 			err = push_vlan(skb, key, nla_data(a));
1121 			break;
1122 
1123 		case OVS_ACTION_ATTR_POP_VLAN:
1124 			err = pop_vlan(skb, key);
1125 			break;
1126 
1127 		case OVS_ACTION_ATTR_RECIRC:
1128 			err = execute_recirc(dp, skb, key, a, rem);
1129 			if (nla_is_last(a, rem)) {
1130 				/* If this is the last action, the skb has
1131 				 * been consumed or freed.
1132 				 * Return immediately.
1133 				 */
1134 				return err;
1135 			}
1136 			break;
1137 
1138 		case OVS_ACTION_ATTR_SET:
1139 			err = execute_set_action(skb, key, nla_data(a));
1140 			break;
1141 
1142 		case OVS_ACTION_ATTR_SET_MASKED:
1143 		case OVS_ACTION_ATTR_SET_TO_MASKED:
1144 			err = execute_masked_set_action(skb, key, nla_data(a));
1145 			break;
1146 
1147 		case OVS_ACTION_ATTR_SAMPLE:
1148 			err = sample(dp, skb, key, a, attr, len);
1149 			break;
1150 
1151 		case OVS_ACTION_ATTR_CT:
1152 			if (!is_flow_key_valid(key)) {
1153 				err = ovs_flow_key_update(skb, key);
1154 				if (err)
1155 					return err;
1156 			}
1157 
1158 			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1159 					     nla_data(a));
1160 
1161 			/* Hide stolen IP fragments from user space. */
1162 			if (err)
1163 				return err == -EINPROGRESS ? 0 : err;
1164 			break;
1165 		}
1166 
1167 		if (unlikely(err)) {
1168 			kfree_skb(skb);
1169 			return err;
1170 		}
1171 	}
1172 
1173 	if (prev_port != -1)
1174 		do_output(dp, skb, prev_port, key);
1175 	else
1176 		consume_skb(skb);
1177 
1178 	return 0;
1179 }
1180 
1181 static void process_deferred_actions(struct datapath *dp)
1182 {
1183 	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1184 
1185 	/* Do not touch the FIFO in case there is no deferred actions. */
1186 	if (action_fifo_is_empty(fifo))
1187 		return;
1188 
1189 	/* Finishing executing all deferred actions. */
1190 	do {
1191 		struct deferred_action *da = action_fifo_get(fifo);
1192 		struct sk_buff *skb = da->skb;
1193 		struct sw_flow_key *key = &da->pkt_key;
1194 		const struct nlattr *actions = da->actions;
1195 
1196 		if (actions)
1197 			do_execute_actions(dp, skb, key, actions,
1198 					   nla_len(actions));
1199 		else
1200 			ovs_dp_process_packet(skb, key);
1201 	} while (!action_fifo_is_empty(fifo));
1202 
1203 	/* Reset FIFO for the next packet.  */
1204 	action_fifo_init(fifo);
1205 }
1206 
1207 /* Execute a list of actions against 'skb'. */
1208 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1209 			const struct sw_flow_actions *acts,
1210 			struct sw_flow_key *key)
1211 {
1212 	static const int ovs_recursion_limit = 5;
1213 	int err, level;
1214 
1215 	level = __this_cpu_inc_return(exec_actions_level);
1216 	if (unlikely(level > ovs_recursion_limit)) {
1217 		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1218 				     ovs_dp_name(dp));
1219 		kfree_skb(skb);
1220 		err = -ENETDOWN;
1221 		goto out;
1222 	}
1223 
1224 	err = do_execute_actions(dp, skb, key,
1225 				 acts->actions, acts->actions_len);
1226 
1227 	if (level == 1)
1228 		process_deferred_actions(dp);
1229 
1230 out:
1231 	__this_cpu_dec(exec_actions_level);
1232 	return err;
1233 }
1234 
1235 int action_fifos_init(void)
1236 {
1237 	action_fifos = alloc_percpu(struct action_fifo);
1238 	if (!action_fifos)
1239 		return -ENOMEM;
1240 
1241 	return 0;
1242 }
1243 
1244 void action_fifos_exit(void)
1245 {
1246 	free_percpu(action_fifos);
1247 }
1248