1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/skbuff.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
18
19 #include <net/dst.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
26 #include <net/mpls.h>
27
28 #if IS_ENABLED(CONFIG_PSAMPLE)
29 #include <net/psample.h>
30 #endif
31
32 #include <net/sctp/checksum.h>
33
34 #include "datapath.h"
35 #include "drop.h"
36 #include "flow.h"
37 #include "conntrack.h"
38 #include "vport.h"
39 #include "flow_netlink.h"
40 #include "openvswitch_trace.h"
41
42 struct ovs_pcpu_storage __percpu *ovs_pcpu_storage;
43
44 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
45 * space. Return NULL if out of key spaces.
46 */
clone_key(const struct sw_flow_key * key_)47 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
48 {
49 struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage);
50 struct action_flow_keys *keys = &ovs_pcpu->flow_keys;
51 int level = ovs_pcpu->exec_level;
52 struct sw_flow_key *key = NULL;
53
54 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
55 key = &keys->key[level - 1];
56 *key = *key_;
57 }
58
59 return key;
60 }
61
action_fifo_init(struct action_fifo * fifo)62 static void action_fifo_init(struct action_fifo *fifo)
63 {
64 fifo->head = 0;
65 fifo->tail = 0;
66 }
67
action_fifo_is_empty(const struct action_fifo * fifo)68 static bool action_fifo_is_empty(const struct action_fifo *fifo)
69 {
70 return (fifo->head == fifo->tail);
71 }
72
action_fifo_get(struct action_fifo * fifo)73 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
74 {
75 if (action_fifo_is_empty(fifo))
76 return NULL;
77
78 return &fifo->fifo[fifo->tail++];
79 }
80
action_fifo_put(struct action_fifo * fifo)81 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
82 {
83 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
84 return NULL;
85
86 return &fifo->fifo[fifo->head++];
87 }
88
89 /* Return true if fifo is not full */
add_deferred_actions(struct sk_buff * skb,const struct sw_flow_key * key,const struct nlattr * actions,const int actions_len)90 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
91 const struct sw_flow_key *key,
92 const struct nlattr *actions,
93 const int actions_len)
94 {
95 struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
96 struct deferred_action *da;
97
98 da = action_fifo_put(fifo);
99 if (da) {
100 da->skb = skb;
101 da->actions = actions;
102 da->actions_len = actions_len;
103 da->pkt_key = *key;
104 }
105
106 return da;
107 }
108
invalidate_flow_key(struct sw_flow_key * key)109 static void invalidate_flow_key(struct sw_flow_key *key)
110 {
111 key->mac_proto |= SW_FLOW_KEY_INVALID;
112 }
113
is_flow_key_valid(const struct sw_flow_key * key)114 static bool is_flow_key_valid(const struct sw_flow_key *key)
115 {
116 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
117 }
118
119 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
120 struct sw_flow_key *key,
121 u32 recirc_id,
122 const struct nlattr *actions, int len,
123 bool last, bool clone_flow_key);
124
125 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
126 struct sw_flow_key *key,
127 const struct nlattr *attr, int len);
128
push_mpls(struct sk_buff * skb,struct sw_flow_key * key,__be32 mpls_lse,__be16 mpls_ethertype,__u16 mac_len)129 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
130 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
131 {
132 int err;
133
134 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
135 if (err)
136 return err;
137
138 if (!mac_len)
139 key->mac_proto = MAC_PROTO_NONE;
140
141 invalidate_flow_key(key);
142 return 0;
143 }
144
pop_mpls(struct sk_buff * skb,struct sw_flow_key * key,const __be16 ethertype)145 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
146 const __be16 ethertype)
147 {
148 int err;
149
150 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
151 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
152 if (err)
153 return err;
154
155 if (ethertype == htons(ETH_P_TEB))
156 key->mac_proto = MAC_PROTO_ETHERNET;
157
158 invalidate_flow_key(key);
159 return 0;
160 }
161
set_mpls(struct sk_buff * skb,struct sw_flow_key * flow_key,const __be32 * mpls_lse,const __be32 * mask)162 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
163 const __be32 *mpls_lse, const __be32 *mask)
164 {
165 struct mpls_shim_hdr *stack;
166 __be32 lse;
167 int err;
168
169 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
170 return -ENOMEM;
171
172 stack = mpls_hdr(skb);
173 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
174 err = skb_mpls_update_lse(skb, lse);
175 if (err)
176 return err;
177
178 flow_key->mpls.lse[0] = lse;
179 return 0;
180 }
181
pop_vlan(struct sk_buff * skb,struct sw_flow_key * key)182 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
183 {
184 int err;
185
186 err = skb_vlan_pop(skb);
187 if (skb_vlan_tag_present(skb)) {
188 invalidate_flow_key(key);
189 } else {
190 key->eth.vlan.tci = 0;
191 key->eth.vlan.tpid = 0;
192 }
193 return err;
194 }
195
push_vlan(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_vlan * vlan)196 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
197 const struct ovs_action_push_vlan *vlan)
198 {
199 int err;
200
201 if (skb_vlan_tag_present(skb)) {
202 invalidate_flow_key(key);
203 } else {
204 key->eth.vlan.tci = vlan->vlan_tci;
205 key->eth.vlan.tpid = vlan->vlan_tpid;
206 }
207 err = skb_vlan_push(skb, vlan->vlan_tpid,
208 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
209 skb_reset_mac_len(skb);
210 return err;
211 }
212
213 /* 'src' is already properly masked. */
ether_addr_copy_masked(u8 * dst_,const u8 * src_,const u8 * mask_)214 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
215 {
216 u16 *dst = (u16 *)dst_;
217 const u16 *src = (const u16 *)src_;
218 const u16 *mask = (const u16 *)mask_;
219
220 OVS_SET_MASKED(dst[0], src[0], mask[0]);
221 OVS_SET_MASKED(dst[1], src[1], mask[1]);
222 OVS_SET_MASKED(dst[2], src[2], mask[2]);
223 }
224
set_eth_addr(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ethernet * key,const struct ovs_key_ethernet * mask)225 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
226 const struct ovs_key_ethernet *key,
227 const struct ovs_key_ethernet *mask)
228 {
229 int err;
230
231 err = skb_ensure_writable(skb, ETH_HLEN);
232 if (unlikely(err))
233 return err;
234
235 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
236
237 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
238 mask->eth_src);
239 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
240 mask->eth_dst);
241
242 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
243
244 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
245 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
246 return 0;
247 }
248
249 /* pop_eth does not support VLAN packets as this action is never called
250 * for them.
251 */
pop_eth(struct sk_buff * skb,struct sw_flow_key * key)252 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
253 {
254 int err;
255
256 err = skb_eth_pop(skb);
257 if (err)
258 return err;
259
260 /* safe right before invalidate_flow_key */
261 key->mac_proto = MAC_PROTO_NONE;
262 invalidate_flow_key(key);
263 return 0;
264 }
265
push_eth(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_eth * ethh)266 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
267 const struct ovs_action_push_eth *ethh)
268 {
269 int err;
270
271 err = skb_eth_push(skb, ethh->addresses.eth_dst,
272 ethh->addresses.eth_src);
273 if (err)
274 return err;
275
276 /* safe right before invalidate_flow_key */
277 key->mac_proto = MAC_PROTO_ETHERNET;
278 invalidate_flow_key(key);
279 return 0;
280 }
281
push_nsh(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a)282 static noinline_for_stack int push_nsh(struct sk_buff *skb,
283 struct sw_flow_key *key,
284 const struct nlattr *a)
285 {
286 u8 buffer[NSH_HDR_MAX_LEN];
287 struct nshhdr *nh = (struct nshhdr *)buffer;
288 int err;
289
290 err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
291 if (err)
292 return err;
293
294 err = nsh_push(skb, nh);
295 if (err)
296 return err;
297
298 /* safe right before invalidate_flow_key */
299 key->mac_proto = MAC_PROTO_NONE;
300 invalidate_flow_key(key);
301 return 0;
302 }
303
pop_nsh(struct sk_buff * skb,struct sw_flow_key * key)304 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
305 {
306 int err;
307
308 err = nsh_pop(skb);
309 if (err)
310 return err;
311
312 /* safe right before invalidate_flow_key */
313 if (skb->protocol == htons(ETH_P_TEB))
314 key->mac_proto = MAC_PROTO_ETHERNET;
315 else
316 key->mac_proto = MAC_PROTO_NONE;
317 invalidate_flow_key(key);
318 return 0;
319 }
320
update_ip_l4_checksum(struct sk_buff * skb,struct iphdr * nh,__be32 addr,__be32 new_addr)321 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
322 __be32 addr, __be32 new_addr)
323 {
324 int transport_len = skb->len - skb_transport_offset(skb);
325
326 if (nh->frag_off & htons(IP_OFFSET))
327 return;
328
329 if (nh->protocol == IPPROTO_TCP) {
330 if (likely(transport_len >= sizeof(struct tcphdr)))
331 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
332 addr, new_addr, true);
333 } else if (nh->protocol == IPPROTO_UDP) {
334 if (likely(transport_len >= sizeof(struct udphdr))) {
335 struct udphdr *uh = udp_hdr(skb);
336
337 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
338 inet_proto_csum_replace4(&uh->check, skb,
339 addr, new_addr, true);
340 if (!uh->check)
341 uh->check = CSUM_MANGLED_0;
342 }
343 }
344 }
345 }
346
set_ip_addr(struct sk_buff * skb,struct iphdr * nh,__be32 * addr,__be32 new_addr)347 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
348 __be32 *addr, __be32 new_addr)
349 {
350 update_ip_l4_checksum(skb, nh, *addr, new_addr);
351 csum_replace4(&nh->check, *addr, new_addr);
352 skb_clear_hash(skb);
353 ovs_ct_clear(skb, NULL);
354 *addr = new_addr;
355 }
356
update_ipv6_checksum(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4])357 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
358 __be32 addr[4], const __be32 new_addr[4])
359 {
360 int transport_len = skb->len - skb_transport_offset(skb);
361
362 if (l4_proto == NEXTHDR_TCP) {
363 if (likely(transport_len >= sizeof(struct tcphdr)))
364 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
365 addr, new_addr, true);
366 } else if (l4_proto == NEXTHDR_UDP) {
367 if (likely(transport_len >= sizeof(struct udphdr))) {
368 struct udphdr *uh = udp_hdr(skb);
369
370 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
371 inet_proto_csum_replace16(&uh->check, skb,
372 addr, new_addr, true);
373 if (!uh->check)
374 uh->check = CSUM_MANGLED_0;
375 }
376 }
377 } else if (l4_proto == NEXTHDR_ICMP) {
378 if (likely(transport_len >= sizeof(struct icmp6hdr)))
379 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
380 skb, addr, new_addr, true);
381 }
382 }
383
mask_ipv6_addr(const __be32 old[4],const __be32 addr[4],const __be32 mask[4],__be32 masked[4])384 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
385 const __be32 mask[4], __be32 masked[4])
386 {
387 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
388 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
389 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
390 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
391 }
392
set_ipv6_addr(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4],bool recalculate_csum)393 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
394 __be32 addr[4], const __be32 new_addr[4],
395 bool recalculate_csum)
396 {
397 if (recalculate_csum)
398 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
399
400 skb_clear_hash(skb);
401 ovs_ct_clear(skb, NULL);
402 memcpy(addr, new_addr, sizeof(__be32[4]));
403 }
404
set_ipv6_dsfield(struct sk_buff * skb,struct ipv6hdr * nh,u8 ipv6_tclass,u8 mask)405 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
406 {
407 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
408
409 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
410
411 if (skb->ip_summed == CHECKSUM_COMPLETE)
412 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
413 (__force __wsum)(ipv6_tclass << 12));
414
415 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
416 }
417
set_ipv6_fl(struct sk_buff * skb,struct ipv6hdr * nh,u32 fl,u32 mask)418 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
419 {
420 u32 ofl;
421
422 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
423 fl = OVS_MASKED(ofl, fl, mask);
424
425 /* Bits 21-24 are always unmasked, so this retains their values. */
426 nh->flow_lbl[0] = (u8)(fl >> 16);
427 nh->flow_lbl[1] = (u8)(fl >> 8);
428 nh->flow_lbl[2] = (u8)fl;
429
430 if (skb->ip_summed == CHECKSUM_COMPLETE)
431 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
432 }
433
set_ipv6_ttl(struct sk_buff * skb,struct ipv6hdr * nh,u8 new_ttl,u8 mask)434 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
435 {
436 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
437
438 if (skb->ip_summed == CHECKSUM_COMPLETE)
439 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
440 (__force __wsum)(new_ttl << 8));
441 nh->hop_limit = new_ttl;
442 }
443
set_ip_ttl(struct sk_buff * skb,struct iphdr * nh,u8 new_ttl,u8 mask)444 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
445 u8 mask)
446 {
447 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
448
449 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
450 nh->ttl = new_ttl;
451 }
452
set_ipv4(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv4 * key,const struct ovs_key_ipv4 * mask)453 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
454 const struct ovs_key_ipv4 *key,
455 const struct ovs_key_ipv4 *mask)
456 {
457 struct iphdr *nh;
458 __be32 new_addr;
459 int err;
460
461 err = skb_ensure_writable(skb, skb_network_offset(skb) +
462 sizeof(struct iphdr));
463 if (unlikely(err))
464 return err;
465
466 nh = ip_hdr(skb);
467
468 /* Setting an IP addresses is typically only a side effect of
469 * matching on them in the current userspace implementation, so it
470 * makes sense to check if the value actually changed.
471 */
472 if (mask->ipv4_src) {
473 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
474
475 if (unlikely(new_addr != nh->saddr)) {
476 set_ip_addr(skb, nh, &nh->saddr, new_addr);
477 flow_key->ipv4.addr.src = new_addr;
478 }
479 }
480 if (mask->ipv4_dst) {
481 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
482
483 if (unlikely(new_addr != nh->daddr)) {
484 set_ip_addr(skb, nh, &nh->daddr, new_addr);
485 flow_key->ipv4.addr.dst = new_addr;
486 }
487 }
488 if (mask->ipv4_tos) {
489 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
490 flow_key->ip.tos = nh->tos;
491 }
492 if (mask->ipv4_ttl) {
493 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
494 flow_key->ip.ttl = nh->ttl;
495 }
496
497 return 0;
498 }
499
is_ipv6_mask_nonzero(const __be32 addr[4])500 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
501 {
502 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
503 }
504
set_ipv6(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv6 * key,const struct ovs_key_ipv6 * mask)505 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
506 const struct ovs_key_ipv6 *key,
507 const struct ovs_key_ipv6 *mask)
508 {
509 struct ipv6hdr *nh;
510 int err;
511
512 err = skb_ensure_writable(skb, skb_network_offset(skb) +
513 sizeof(struct ipv6hdr));
514 if (unlikely(err))
515 return err;
516
517 nh = ipv6_hdr(skb);
518
519 /* Setting an IP addresses is typically only a side effect of
520 * matching on them in the current userspace implementation, so it
521 * makes sense to check if the value actually changed.
522 */
523 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
524 __be32 *saddr = (__be32 *)&nh->saddr;
525 __be32 masked[4];
526
527 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
528
529 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
530 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
531 true);
532 memcpy(&flow_key->ipv6.addr.src, masked,
533 sizeof(flow_key->ipv6.addr.src));
534 }
535 }
536 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
537 unsigned int offset = 0;
538 int flags = IP6_FH_F_SKIP_RH;
539 bool recalc_csum = true;
540 __be32 *daddr = (__be32 *)&nh->daddr;
541 __be32 masked[4];
542
543 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
544
545 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
546 if (ipv6_ext_hdr(nh->nexthdr))
547 recalc_csum = (ipv6_find_hdr(skb, &offset,
548 NEXTHDR_ROUTING,
549 NULL, &flags)
550 != NEXTHDR_ROUTING);
551
552 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
553 recalc_csum);
554 memcpy(&flow_key->ipv6.addr.dst, masked,
555 sizeof(flow_key->ipv6.addr.dst));
556 }
557 }
558 if (mask->ipv6_tclass) {
559 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
560 flow_key->ip.tos = ipv6_get_dsfield(nh);
561 }
562 if (mask->ipv6_label) {
563 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
564 ntohl(mask->ipv6_label));
565 flow_key->ipv6.label =
566 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
567 }
568 if (mask->ipv6_hlimit) {
569 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
570 flow_key->ip.ttl = nh->hop_limit;
571 }
572 return 0;
573 }
574
575 /* Must follow skb_ensure_writable() since that can move the skb data. */
set_tp_port(struct sk_buff * skb,__be16 * port,__be16 new_port,__sum16 * check)576 static void set_tp_port(struct sk_buff *skb, __be16 *port,
577 __be16 new_port, __sum16 *check)
578 {
579 ovs_ct_clear(skb, NULL);
580 inet_proto_csum_replace2(check, skb, *port, new_port, false);
581 *port = new_port;
582 }
583
set_udp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_udp * key,const struct ovs_key_udp * mask)584 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
585 const struct ovs_key_udp *key,
586 const struct ovs_key_udp *mask)
587 {
588 struct udphdr *uh;
589 __be16 src, dst;
590 int err;
591
592 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
593 sizeof(struct udphdr));
594 if (unlikely(err))
595 return err;
596
597 uh = udp_hdr(skb);
598 /* Either of the masks is non-zero, so do not bother checking them. */
599 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
600 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
601
602 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
603 if (likely(src != uh->source)) {
604 set_tp_port(skb, &uh->source, src, &uh->check);
605 flow_key->tp.src = src;
606 }
607 if (likely(dst != uh->dest)) {
608 set_tp_port(skb, &uh->dest, dst, &uh->check);
609 flow_key->tp.dst = dst;
610 }
611
612 if (unlikely(!uh->check))
613 uh->check = CSUM_MANGLED_0;
614 } else {
615 uh->source = src;
616 uh->dest = dst;
617 flow_key->tp.src = src;
618 flow_key->tp.dst = dst;
619 ovs_ct_clear(skb, NULL);
620 }
621
622 skb_clear_hash(skb);
623
624 return 0;
625 }
626
set_tcp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_tcp * key,const struct ovs_key_tcp * mask)627 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
628 const struct ovs_key_tcp *key,
629 const struct ovs_key_tcp *mask)
630 {
631 struct tcphdr *th;
632 __be16 src, dst;
633 int err;
634
635 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
636 sizeof(struct tcphdr));
637 if (unlikely(err))
638 return err;
639
640 th = tcp_hdr(skb);
641 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
642 if (likely(src != th->source)) {
643 set_tp_port(skb, &th->source, src, &th->check);
644 flow_key->tp.src = src;
645 }
646 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
647 if (likely(dst != th->dest)) {
648 set_tp_port(skb, &th->dest, dst, &th->check);
649 flow_key->tp.dst = dst;
650 }
651 skb_clear_hash(skb);
652
653 return 0;
654 }
655
set_sctp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_sctp * key,const struct ovs_key_sctp * mask)656 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
657 const struct ovs_key_sctp *key,
658 const struct ovs_key_sctp *mask)
659 {
660 unsigned int sctphoff = skb_transport_offset(skb);
661 struct sctphdr *sh;
662 __le32 old_correct_csum, new_csum, old_csum;
663 int err;
664
665 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
666 if (unlikely(err))
667 return err;
668
669 sh = sctp_hdr(skb);
670 old_csum = sh->checksum;
671 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
672
673 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
674 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
675
676 new_csum = sctp_compute_cksum(skb, sctphoff);
677
678 /* Carry any checksum errors through. */
679 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
680
681 skb_clear_hash(skb);
682 ovs_ct_clear(skb, NULL);
683
684 flow_key->tp.src = sh->source;
685 flow_key->tp.dst = sh->dest;
686
687 return 0;
688 }
689
ovs_vport_output(struct net * net,struct sock * sk,struct sk_buff * skb)690 static int ovs_vport_output(struct net *net, struct sock *sk,
691 struct sk_buff *skb)
692 {
693 struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
694 struct vport *vport = data->vport;
695
696 if (skb_cow_head(skb, data->l2_len) < 0) {
697 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
698 return -ENOMEM;
699 }
700
701 __skb_dst_copy(skb, data->dst);
702 *OVS_CB(skb) = data->cb;
703 skb->inner_protocol = data->inner_protocol;
704 if (data->vlan_tci & VLAN_CFI_MASK)
705 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
706 else
707 __vlan_hwaccel_clear_tag(skb);
708
709 /* Reconstruct the MAC header. */
710 skb_push(skb, data->l2_len);
711 memcpy(skb->data, &data->l2_data, data->l2_len);
712 skb_postpush_rcsum(skb, skb->data, data->l2_len);
713 skb_reset_mac_header(skb);
714
715 if (eth_p_mpls(skb->protocol)) {
716 skb->inner_network_header = skb->network_header;
717 skb_set_network_header(skb, data->network_offset);
718 skb_reset_mac_len(skb);
719 }
720
721 ovs_vport_send(vport, skb, data->mac_proto);
722 return 0;
723 }
724
725 static unsigned int
ovs_dst_get_mtu(const struct dst_entry * dst)726 ovs_dst_get_mtu(const struct dst_entry *dst)
727 {
728 return dst->dev->mtu;
729 }
730
731 static struct dst_ops ovs_dst_ops = {
732 .family = AF_UNSPEC,
733 .mtu = ovs_dst_get_mtu,
734 };
735
736 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
737 * ovs_vport_output(), which is called once per fragmented packet.
738 */
prepare_frag(struct vport * vport,struct sk_buff * skb,u16 orig_network_offset,u8 mac_proto)739 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
740 u16 orig_network_offset, u8 mac_proto)
741 {
742 unsigned int hlen = skb_network_offset(skb);
743 struct ovs_frag_data *data;
744
745 data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
746 data->dst = skb->_skb_refdst;
747 data->vport = vport;
748 data->cb = *OVS_CB(skb);
749 data->inner_protocol = skb->inner_protocol;
750 data->network_offset = orig_network_offset;
751 if (skb_vlan_tag_present(skb))
752 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
753 else
754 data->vlan_tci = 0;
755 data->vlan_proto = skb->vlan_proto;
756 data->mac_proto = mac_proto;
757 data->l2_len = hlen;
758 memcpy(&data->l2_data, skb->data, hlen);
759
760 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
761 skb_pull(skb, hlen);
762 }
763
ovs_fragment(struct net * net,struct vport * vport,struct sk_buff * skb,u16 mru,struct sw_flow_key * key)764 static void ovs_fragment(struct net *net, struct vport *vport,
765 struct sk_buff *skb, u16 mru,
766 struct sw_flow_key *key)
767 {
768 enum ovs_drop_reason reason;
769 u16 orig_network_offset = 0;
770
771 if (eth_p_mpls(skb->protocol)) {
772 orig_network_offset = skb_network_offset(skb);
773 skb->network_header = skb->inner_network_header;
774 }
775
776 if (skb_network_offset(skb) > MAX_L2_LEN) {
777 OVS_NLERR(1, "L2 header too long to fragment");
778 reason = OVS_DROP_FRAG_L2_TOO_LONG;
779 goto err;
780 }
781
782 if (key->eth.type == htons(ETH_P_IP)) {
783 struct rtable ovs_rt = { 0 };
784 unsigned long orig_dst;
785
786 prepare_frag(vport, skb, orig_network_offset,
787 ovs_key_mac_proto(key));
788 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
789 DST_OBSOLETE_NONE, DST_NOCOUNT);
790 ovs_rt.dst.dev = vport->dev;
791
792 orig_dst = skb->_skb_refdst;
793 skb_dst_set_noref(skb, &ovs_rt.dst);
794 IPCB(skb)->frag_max_size = mru;
795
796 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
797 refdst_drop(orig_dst);
798 } else if (key->eth.type == htons(ETH_P_IPV6)) {
799 unsigned long orig_dst;
800 struct rt6_info ovs_rt;
801
802 prepare_frag(vport, skb, orig_network_offset,
803 ovs_key_mac_proto(key));
804 memset(&ovs_rt, 0, sizeof(ovs_rt));
805 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
806 DST_OBSOLETE_NONE, DST_NOCOUNT);
807 ovs_rt.dst.dev = vport->dev;
808
809 orig_dst = skb->_skb_refdst;
810 skb_dst_set_noref(skb, &ovs_rt.dst);
811 IP6CB(skb)->frag_max_size = mru;
812
813 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
814 refdst_drop(orig_dst);
815 } else {
816 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
817 ovs_vport_name(vport), ntohs(key->eth.type), mru,
818 vport->dev->mtu);
819 reason = OVS_DROP_FRAG_INVALID_PROTO;
820 goto err;
821 }
822
823 return;
824 err:
825 ovs_kfree_skb_reason(skb, reason);
826 }
827
do_output(struct datapath * dp,struct sk_buff * skb,int out_port,struct sw_flow_key * key)828 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
829 struct sw_flow_key *key)
830 {
831 struct vport *vport = ovs_vport_rcu(dp, out_port);
832
833 if (likely(vport &&
834 netif_running(vport->dev) &&
835 netif_carrier_ok(vport->dev))) {
836 u16 mru = OVS_CB(skb)->mru;
837 u32 cutlen = OVS_CB(skb)->cutlen;
838
839 if (unlikely(cutlen > 0)) {
840 if (skb->len - cutlen > ovs_mac_header_len(key))
841 pskb_trim(skb, skb->len - cutlen);
842 else
843 pskb_trim(skb, ovs_mac_header_len(key));
844 }
845
846 if (likely(!mru ||
847 (skb->len <= mru + vport->dev->hard_header_len))) {
848 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
849 } else if (mru <= vport->dev->mtu) {
850 struct net *net = read_pnet(&dp->net);
851
852 ovs_fragment(net, vport, skb, mru, key);
853 } else {
854 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
855 }
856 } else {
857 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
858 }
859 }
860
output_userspace(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,const struct nlattr * actions,int actions_len,uint32_t cutlen)861 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
862 struct sw_flow_key *key, const struct nlattr *attr,
863 const struct nlattr *actions, int actions_len,
864 uint32_t cutlen)
865 {
866 struct dp_upcall_info upcall;
867 const struct nlattr *a;
868 int rem;
869
870 memset(&upcall, 0, sizeof(upcall));
871 upcall.cmd = OVS_PACKET_CMD_ACTION;
872 upcall.mru = OVS_CB(skb)->mru;
873
874 nla_for_each_nested(a, attr, rem) {
875 switch (nla_type(a)) {
876 case OVS_USERSPACE_ATTR_USERDATA:
877 upcall.userdata = a;
878 break;
879
880 case OVS_USERSPACE_ATTR_PID:
881 if (OVS_CB(skb)->upcall_pid)
882 upcall.portid = OVS_CB(skb)->upcall_pid;
883 else if (dp->user_features &
884 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
885 upcall.portid =
886 ovs_dp_get_upcall_portid(dp,
887 smp_processor_id());
888 else
889 upcall.portid = nla_get_u32(a);
890 break;
891
892 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
893 /* Get out tunnel info. */
894 struct vport *vport;
895
896 vport = ovs_vport_rcu(dp, nla_get_u32(a));
897 if (vport) {
898 int err;
899
900 err = dev_fill_metadata_dst(vport->dev, skb);
901 if (!err)
902 upcall.egress_tun_info = skb_tunnel_info(skb);
903 }
904
905 break;
906 }
907
908 case OVS_USERSPACE_ATTR_ACTIONS: {
909 /* Include actions. */
910 upcall.actions = actions;
911 upcall.actions_len = actions_len;
912 break;
913 }
914
915 } /* End of switch. */
916 }
917
918 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
919 }
920
dec_ttl_exception_handler(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)921 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
922 struct sw_flow_key *key,
923 const struct nlattr *attr)
924 {
925 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
926 struct nlattr *actions = nla_data(attr);
927
928 if (nla_len(actions))
929 return clone_execute(dp, skb, key, 0, nla_data(actions),
930 nla_len(actions), true, false);
931
932 ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
933 return 0;
934 }
935
936 /* When 'last' is true, sample() should always consume the 'skb'.
937 * Otherwise, sample() should keep 'skb' intact regardless what
938 * actions are executed within sample().
939 */
sample(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)940 static int sample(struct datapath *dp, struct sk_buff *skb,
941 struct sw_flow_key *key, const struct nlattr *attr,
942 bool last)
943 {
944 struct nlattr *actions;
945 struct nlattr *sample_arg;
946 int rem = nla_len(attr);
947 const struct sample_arg *arg;
948 u32 init_probability;
949 bool clone_flow_key;
950 int err;
951
952 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
953 sample_arg = nla_data(attr);
954 arg = nla_data(sample_arg);
955 actions = nla_next(sample_arg, &rem);
956 init_probability = OVS_CB(skb)->probability;
957
958 if ((arg->probability != U32_MAX) &&
959 (!arg->probability || get_random_u32() > arg->probability)) {
960 if (last)
961 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
962 return 0;
963 }
964
965 OVS_CB(skb)->probability = arg->probability;
966
967 clone_flow_key = !arg->exec;
968 err = clone_execute(dp, skb, key, 0, actions, rem, last,
969 clone_flow_key);
970
971 if (!last)
972 OVS_CB(skb)->probability = init_probability;
973
974 return err;
975 }
976
977 /* When 'last' is true, clone() should always consume the 'skb'.
978 * Otherwise, clone() should keep 'skb' intact regardless what
979 * actions are executed within clone().
980 */
clone(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)981 static int clone(struct datapath *dp, struct sk_buff *skb,
982 struct sw_flow_key *key, const struct nlattr *attr,
983 bool last)
984 {
985 struct nlattr *actions;
986 struct nlattr *clone_arg;
987 int rem = nla_len(attr);
988 bool dont_clone_flow_key;
989
990 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
991 clone_arg = nla_data(attr);
992 dont_clone_flow_key = nla_get_u32(clone_arg);
993 actions = nla_next(clone_arg, &rem);
994
995 return clone_execute(dp, skb, key, 0, actions, rem, last,
996 !dont_clone_flow_key);
997 }
998
execute_hash(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)999 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1000 const struct nlattr *attr)
1001 {
1002 struct ovs_action_hash *hash_act = nla_data(attr);
1003 u32 hash = 0;
1004
1005 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1006 /* OVS_HASH_ALG_L4 hasing type. */
1007 hash = skb_get_hash(skb);
1008 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1009 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1010 * extend past an encapsulated header.
1011 */
1012 hash = __skb_get_hash_symmetric(skb);
1013 }
1014
1015 hash = jhash_1word(hash, hash_act->hash_basis);
1016 if (!hash)
1017 hash = 0x1;
1018
1019 key->ovs_flow_hash = hash;
1020 }
1021
execute_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1022 static int execute_set_action(struct sk_buff *skb,
1023 struct sw_flow_key *flow_key,
1024 const struct nlattr *a)
1025 {
1026 /* Only tunnel set execution is supported without a mask. */
1027 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1028 struct ovs_tunnel_info *tun = nla_data(a);
1029
1030 skb_dst_drop(skb);
1031 dst_hold((struct dst_entry *)tun->tun_dst);
1032 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1033 return 0;
1034 }
1035
1036 return -EINVAL;
1037 }
1038
1039 /* Mask is at the midpoint of the data. */
1040 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1041
execute_masked_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1042 static int execute_masked_set_action(struct sk_buff *skb,
1043 struct sw_flow_key *flow_key,
1044 const struct nlattr *a)
1045 {
1046 int err = 0;
1047
1048 switch (nla_type(a)) {
1049 case OVS_KEY_ATTR_PRIORITY:
1050 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1051 *get_mask(a, u32 *));
1052 flow_key->phy.priority = skb->priority;
1053 break;
1054
1055 case OVS_KEY_ATTR_SKB_MARK:
1056 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1057 flow_key->phy.skb_mark = skb->mark;
1058 break;
1059
1060 case OVS_KEY_ATTR_TUNNEL_INFO:
1061 /* Masked data not supported for tunnel. */
1062 err = -EINVAL;
1063 break;
1064
1065 case OVS_KEY_ATTR_ETHERNET:
1066 err = set_eth_addr(skb, flow_key, nla_data(a),
1067 get_mask(a, struct ovs_key_ethernet *));
1068 break;
1069
1070 case OVS_KEY_ATTR_IPV4:
1071 err = set_ipv4(skb, flow_key, nla_data(a),
1072 get_mask(a, struct ovs_key_ipv4 *));
1073 break;
1074
1075 case OVS_KEY_ATTR_IPV6:
1076 err = set_ipv6(skb, flow_key, nla_data(a),
1077 get_mask(a, struct ovs_key_ipv6 *));
1078 break;
1079
1080 case OVS_KEY_ATTR_TCP:
1081 err = set_tcp(skb, flow_key, nla_data(a),
1082 get_mask(a, struct ovs_key_tcp *));
1083 break;
1084
1085 case OVS_KEY_ATTR_UDP:
1086 err = set_udp(skb, flow_key, nla_data(a),
1087 get_mask(a, struct ovs_key_udp *));
1088 break;
1089
1090 case OVS_KEY_ATTR_SCTP:
1091 err = set_sctp(skb, flow_key, nla_data(a),
1092 get_mask(a, struct ovs_key_sctp *));
1093 break;
1094
1095 case OVS_KEY_ATTR_MPLS:
1096 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1097 __be32 *));
1098 break;
1099
1100 case OVS_KEY_ATTR_CT_STATE:
1101 case OVS_KEY_ATTR_CT_ZONE:
1102 case OVS_KEY_ATTR_CT_MARK:
1103 case OVS_KEY_ATTR_CT_LABELS:
1104 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1105 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1106 case OVS_KEY_ATTR_NSH:
1107 err = -EINVAL;
1108 break;
1109 }
1110
1111 return err;
1112 }
1113
execute_recirc(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a,bool last)1114 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1115 struct sw_flow_key *key,
1116 const struct nlattr *a, bool last)
1117 {
1118 u32 recirc_id;
1119
1120 if (!is_flow_key_valid(key)) {
1121 int err;
1122
1123 err = ovs_flow_key_update(skb, key);
1124 if (err)
1125 return err;
1126 }
1127 BUG_ON(!is_flow_key_valid(key));
1128
1129 recirc_id = nla_get_u32(a);
1130 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1131 }
1132
execute_check_pkt_len(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1133 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1134 struct sw_flow_key *key,
1135 const struct nlattr *attr, bool last)
1136 {
1137 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1138 const struct nlattr *actions, *cpl_arg;
1139 int len, max_len, rem = nla_len(attr);
1140 const struct check_pkt_len_arg *arg;
1141 bool clone_flow_key;
1142
1143 /* The first netlink attribute in 'attr' is always
1144 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1145 */
1146 cpl_arg = nla_data(attr);
1147 arg = nla_data(cpl_arg);
1148
1149 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1150 max_len = arg->pkt_len;
1151
1152 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1153 len <= max_len) {
1154 /* Second netlink attribute in 'attr' is always
1155 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1156 */
1157 actions = nla_next(cpl_arg, &rem);
1158 clone_flow_key = !arg->exec_for_lesser_equal;
1159 } else {
1160 /* Third netlink attribute in 'attr' is always
1161 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1162 */
1163 actions = nla_next(cpl_arg, &rem);
1164 actions = nla_next(actions, &rem);
1165 clone_flow_key = !arg->exec_for_greater;
1166 }
1167
1168 return clone_execute(dp, skb, key, 0, nla_data(actions),
1169 nla_len(actions), last, clone_flow_key);
1170 }
1171
execute_dec_ttl(struct sk_buff * skb,struct sw_flow_key * key)1172 static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1173 {
1174 int err;
1175
1176 if (skb->protocol == htons(ETH_P_IPV6)) {
1177 struct ipv6hdr *nh;
1178
1179 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1180 sizeof(*nh));
1181 if (unlikely(err))
1182 return err;
1183
1184 nh = ipv6_hdr(skb);
1185
1186 if (nh->hop_limit <= 1)
1187 return -EHOSTUNREACH;
1188
1189 key->ip.ttl = --nh->hop_limit;
1190 } else if (skb->protocol == htons(ETH_P_IP)) {
1191 struct iphdr *nh;
1192 u8 old_ttl;
1193
1194 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1195 sizeof(*nh));
1196 if (unlikely(err))
1197 return err;
1198
1199 nh = ip_hdr(skb);
1200 if (nh->ttl <= 1)
1201 return -EHOSTUNREACH;
1202
1203 old_ttl = nh->ttl--;
1204 csum_replace2(&nh->check, htons(old_ttl << 8),
1205 htons(nh->ttl << 8));
1206 key->ip.ttl = nh->ttl;
1207 }
1208 return 0;
1209 }
1210
1211 #if IS_ENABLED(CONFIG_PSAMPLE)
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1212 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1213 const struct nlattr *attr)
1214 {
1215 struct psample_group psample_group = {};
1216 struct psample_metadata md = {};
1217 const struct nlattr *a;
1218 u32 rate;
1219 int rem;
1220
1221 nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1222 switch (nla_type(a)) {
1223 case OVS_PSAMPLE_ATTR_GROUP:
1224 psample_group.group_num = nla_get_u32(a);
1225 break;
1226
1227 case OVS_PSAMPLE_ATTR_COOKIE:
1228 md.user_cookie = nla_data(a);
1229 md.user_cookie_len = nla_len(a);
1230 break;
1231 }
1232 }
1233
1234 psample_group.net = ovs_dp_get_net(dp);
1235 md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1236 md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1237 md.rate_as_probability = 1;
1238
1239 rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1240
1241 psample_sample_packet(&psample_group, skb, rate, &md);
1242 }
1243 #else
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1244 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1245 const struct nlattr *attr)
1246 {}
1247 #endif
1248
1249 /* Execute a list of actions against 'skb'. */
do_execute_actions(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,int len)1250 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1251 struct sw_flow_key *key,
1252 const struct nlattr *attr, int len)
1253 {
1254 const struct nlattr *a;
1255 int rem;
1256
1257 for (a = attr, rem = len; rem > 0;
1258 a = nla_next(a, &rem)) {
1259 int err = 0;
1260
1261 if (trace_ovs_do_execute_action_enabled())
1262 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1263
1264 /* Actions that rightfully have to consume the skb should do it
1265 * and return directly.
1266 */
1267 switch (nla_type(a)) {
1268 case OVS_ACTION_ATTR_OUTPUT: {
1269 int port = nla_get_u32(a);
1270 struct sk_buff *clone;
1271
1272 /* Every output action needs a separate clone
1273 * of 'skb', In case the output action is the
1274 * last action, cloning can be avoided.
1275 */
1276 if (nla_is_last(a, rem)) {
1277 do_output(dp, skb, port, key);
1278 /* 'skb' has been used for output.
1279 */
1280 return 0;
1281 }
1282
1283 clone = skb_clone(skb, GFP_ATOMIC);
1284 if (clone)
1285 do_output(dp, clone, port, key);
1286 OVS_CB(skb)->cutlen = 0;
1287 break;
1288 }
1289
1290 case OVS_ACTION_ATTR_TRUNC: {
1291 struct ovs_action_trunc *trunc = nla_data(a);
1292
1293 if (skb->len > trunc->max_len)
1294 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1295 break;
1296 }
1297
1298 case OVS_ACTION_ATTR_USERSPACE:
1299 output_userspace(dp, skb, key, a, attr,
1300 len, OVS_CB(skb)->cutlen);
1301 OVS_CB(skb)->cutlen = 0;
1302 if (nla_is_last(a, rem)) {
1303 consume_skb(skb);
1304 return 0;
1305 }
1306 break;
1307
1308 case OVS_ACTION_ATTR_HASH:
1309 execute_hash(skb, key, a);
1310 break;
1311
1312 case OVS_ACTION_ATTR_PUSH_MPLS: {
1313 struct ovs_action_push_mpls *mpls = nla_data(a);
1314
1315 err = push_mpls(skb, key, mpls->mpls_lse,
1316 mpls->mpls_ethertype, skb->mac_len);
1317 break;
1318 }
1319 case OVS_ACTION_ATTR_ADD_MPLS: {
1320 struct ovs_action_add_mpls *mpls = nla_data(a);
1321 __u16 mac_len = 0;
1322
1323 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1324 mac_len = skb->mac_len;
1325
1326 err = push_mpls(skb, key, mpls->mpls_lse,
1327 mpls->mpls_ethertype, mac_len);
1328 break;
1329 }
1330 case OVS_ACTION_ATTR_POP_MPLS:
1331 err = pop_mpls(skb, key, nla_get_be16(a));
1332 break;
1333
1334 case OVS_ACTION_ATTR_PUSH_VLAN:
1335 err = push_vlan(skb, key, nla_data(a));
1336 break;
1337
1338 case OVS_ACTION_ATTR_POP_VLAN:
1339 err = pop_vlan(skb, key);
1340 break;
1341
1342 case OVS_ACTION_ATTR_RECIRC: {
1343 bool last = nla_is_last(a, rem);
1344
1345 err = execute_recirc(dp, skb, key, a, last);
1346 if (last) {
1347 /* If this is the last action, the skb has
1348 * been consumed or freed.
1349 * Return immediately.
1350 */
1351 return err;
1352 }
1353 break;
1354 }
1355
1356 case OVS_ACTION_ATTR_SET:
1357 err = execute_set_action(skb, key, nla_data(a));
1358 break;
1359
1360 case OVS_ACTION_ATTR_SET_MASKED:
1361 case OVS_ACTION_ATTR_SET_TO_MASKED:
1362 err = execute_masked_set_action(skb, key, nla_data(a));
1363 break;
1364
1365 case OVS_ACTION_ATTR_SAMPLE: {
1366 bool last = nla_is_last(a, rem);
1367
1368 err = sample(dp, skb, key, a, last);
1369 if (last)
1370 return err;
1371
1372 break;
1373 }
1374
1375 case OVS_ACTION_ATTR_CT:
1376 if (!is_flow_key_valid(key)) {
1377 err = ovs_flow_key_update(skb, key);
1378 if (err)
1379 return err;
1380 }
1381
1382 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1383 nla_data(a));
1384
1385 /* Hide stolen IP fragments from user space. */
1386 if (err)
1387 return err == -EINPROGRESS ? 0 : err;
1388 break;
1389
1390 case OVS_ACTION_ATTR_CT_CLEAR:
1391 err = ovs_ct_clear(skb, key);
1392 break;
1393
1394 case OVS_ACTION_ATTR_PUSH_ETH:
1395 err = push_eth(skb, key, nla_data(a));
1396 break;
1397
1398 case OVS_ACTION_ATTR_POP_ETH:
1399 err = pop_eth(skb, key);
1400 break;
1401
1402 case OVS_ACTION_ATTR_PUSH_NSH:
1403 err = push_nsh(skb, key, nla_data(a));
1404 break;
1405
1406 case OVS_ACTION_ATTR_POP_NSH:
1407 err = pop_nsh(skb, key);
1408 break;
1409
1410 case OVS_ACTION_ATTR_METER:
1411 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1412 ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1413 return 0;
1414 }
1415 break;
1416
1417 case OVS_ACTION_ATTR_CLONE: {
1418 bool last = nla_is_last(a, rem);
1419
1420 err = clone(dp, skb, key, a, last);
1421 if (last)
1422 return err;
1423
1424 break;
1425 }
1426
1427 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1428 bool last = nla_is_last(a, rem);
1429
1430 err = execute_check_pkt_len(dp, skb, key, a, last);
1431 if (last)
1432 return err;
1433
1434 break;
1435 }
1436
1437 case OVS_ACTION_ATTR_DEC_TTL:
1438 err = execute_dec_ttl(skb, key);
1439 if (err == -EHOSTUNREACH)
1440 return dec_ttl_exception_handler(dp, skb,
1441 key, a);
1442 break;
1443
1444 case OVS_ACTION_ATTR_DROP: {
1445 enum ovs_drop_reason reason = nla_get_u32(a)
1446 ? OVS_DROP_EXPLICIT_WITH_ERROR
1447 : OVS_DROP_EXPLICIT;
1448
1449 ovs_kfree_skb_reason(skb, reason);
1450 return 0;
1451 }
1452
1453 case OVS_ACTION_ATTR_PSAMPLE:
1454 execute_psample(dp, skb, a);
1455 OVS_CB(skb)->cutlen = 0;
1456 if (nla_is_last(a, rem)) {
1457 consume_skb(skb);
1458 return 0;
1459 }
1460 break;
1461 }
1462
1463 if (unlikely(err)) {
1464 ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1465 return err;
1466 }
1467 }
1468
1469 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1470 return 0;
1471 }
1472
1473 /* Execute the actions on the clone of the packet. The effect of the
1474 * execution does not affect the original 'skb' nor the original 'key'.
1475 *
1476 * The execution may be deferred in case the actions can not be executed
1477 * immediately.
1478 */
clone_execute(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,u32 recirc_id,const struct nlattr * actions,int len,bool last,bool clone_flow_key)1479 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1480 struct sw_flow_key *key, u32 recirc_id,
1481 const struct nlattr *actions, int len,
1482 bool last, bool clone_flow_key)
1483 {
1484 struct deferred_action *da;
1485 struct sw_flow_key *clone;
1486
1487 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1488 if (!skb) {
1489 /* Out of memory, skip this action.
1490 */
1491 return 0;
1492 }
1493
1494 /* When clone_flow_key is false, the 'key' will not be change
1495 * by the actions, then the 'key' can be used directly.
1496 * Otherwise, try to clone key from the next recursion level of
1497 * 'flow_keys'. If clone is successful, execute the actions
1498 * without deferring.
1499 */
1500 clone = clone_flow_key ? clone_key(key) : key;
1501 if (clone) {
1502 int err = 0;
1503 if (actions) { /* Sample action */
1504 if (clone_flow_key)
1505 __this_cpu_inc(ovs_pcpu_storage->exec_level);
1506
1507 err = do_execute_actions(dp, skb, clone,
1508 actions, len);
1509
1510 if (clone_flow_key)
1511 __this_cpu_dec(ovs_pcpu_storage->exec_level);
1512 } else { /* Recirc action */
1513 clone->recirc_id = recirc_id;
1514 ovs_dp_process_packet(skb, clone);
1515 }
1516 return err;
1517 }
1518
1519 /* Out of 'flow_keys' space. Defer actions */
1520 da = add_deferred_actions(skb, key, actions, len);
1521 if (da) {
1522 if (!actions) { /* Recirc action */
1523 key = &da->pkt_key;
1524 key->recirc_id = recirc_id;
1525 }
1526 } else {
1527 /* Out of per CPU action FIFO space. Drop the 'skb' and
1528 * log an error.
1529 */
1530 ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1531
1532 if (net_ratelimit()) {
1533 if (actions) { /* Sample action */
1534 pr_warn("%s: deferred action limit reached, drop sample action\n",
1535 ovs_dp_name(dp));
1536 } else { /* Recirc action */
1537 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1538 ovs_dp_name(dp), recirc_id);
1539 }
1540 }
1541 }
1542 return 0;
1543 }
1544
process_deferred_actions(struct datapath * dp)1545 static void process_deferred_actions(struct datapath *dp)
1546 {
1547 struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
1548
1549 /* Do not touch the FIFO in case there is no deferred actions. */
1550 if (action_fifo_is_empty(fifo))
1551 return;
1552
1553 /* Finishing executing all deferred actions. */
1554 do {
1555 struct deferred_action *da = action_fifo_get(fifo);
1556 struct sk_buff *skb = da->skb;
1557 struct sw_flow_key *key = &da->pkt_key;
1558 const struct nlattr *actions = da->actions;
1559 int actions_len = da->actions_len;
1560
1561 if (actions)
1562 do_execute_actions(dp, skb, key, actions, actions_len);
1563 else
1564 ovs_dp_process_packet(skb, key);
1565 } while (!action_fifo_is_empty(fifo));
1566
1567 /* Reset FIFO for the next packet. */
1568 action_fifo_init(fifo);
1569 }
1570
1571 /* Execute a list of actions against 'skb'. */
ovs_execute_actions(struct datapath * dp,struct sk_buff * skb,const struct sw_flow_actions * acts,struct sw_flow_key * key)1572 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1573 const struct sw_flow_actions *acts,
1574 struct sw_flow_key *key)
1575 {
1576 int err, level;
1577
1578 level = __this_cpu_inc_return(ovs_pcpu_storage->exec_level);
1579 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1580 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1581 ovs_dp_name(dp));
1582 ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1583 err = -ENETDOWN;
1584 goto out;
1585 }
1586
1587 OVS_CB(skb)->acts_origlen = acts->orig_len;
1588 err = do_execute_actions(dp, skb, key,
1589 acts->actions, acts->actions_len);
1590
1591 if (level == 1)
1592 process_deferred_actions(dp);
1593
1594 out:
1595 __this_cpu_dec(ovs_pcpu_storage->exec_level);
1596 return err;
1597 }
1598