1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2007-2017 Nicira, Inc.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/skbuff.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
18
19 #include <net/dst.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
26 #include <net/mpls.h>
27
28 #if IS_ENABLED(CONFIG_PSAMPLE)
29 #include <net/psample.h>
30 #endif
31
32 #include <net/sctp/checksum.h>
33
34 #include "datapath.h"
35 #include "drop.h"
36 #include "flow.h"
37 #include "conntrack.h"
38 #include "vport.h"
39 #include "flow_netlink.h"
40 #include "openvswitch_trace.h"
41
42 struct ovs_pcpu_storage __percpu *ovs_pcpu_storage;
43
44 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
45 * space. Return NULL if out of key spaces.
46 */
clone_key(const struct sw_flow_key * key_)47 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
48 {
49 struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage);
50 struct action_flow_keys *keys = &ovs_pcpu->flow_keys;
51 int level = ovs_pcpu->exec_level;
52 struct sw_flow_key *key = NULL;
53
54 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
55 key = &keys->key[level - 1];
56 *key = *key_;
57 }
58
59 return key;
60 }
61
action_fifo_init(struct action_fifo * fifo)62 static void action_fifo_init(struct action_fifo *fifo)
63 {
64 fifo->head = 0;
65 fifo->tail = 0;
66 }
67
action_fifo_is_empty(const struct action_fifo * fifo)68 static bool action_fifo_is_empty(const struct action_fifo *fifo)
69 {
70 return (fifo->head == fifo->tail);
71 }
72
action_fifo_get(struct action_fifo * fifo)73 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
74 {
75 if (action_fifo_is_empty(fifo))
76 return NULL;
77
78 return &fifo->fifo[fifo->tail++];
79 }
80
action_fifo_put(struct action_fifo * fifo)81 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
82 {
83 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
84 return NULL;
85
86 return &fifo->fifo[fifo->head++];
87 }
88
89 /* Return true if fifo is not full */
add_deferred_actions(struct sk_buff * skb,const struct sw_flow_key * key,const struct nlattr * actions,const int actions_len)90 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
91 const struct sw_flow_key *key,
92 const struct nlattr *actions,
93 const int actions_len)
94 {
95 struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
96 struct deferred_action *da;
97
98 da = action_fifo_put(fifo);
99 if (da) {
100 da->skb = skb;
101 da->actions = actions;
102 da->actions_len = actions_len;
103 da->pkt_key = *key;
104 }
105
106 return da;
107 }
108
invalidate_flow_key(struct sw_flow_key * key)109 static void invalidate_flow_key(struct sw_flow_key *key)
110 {
111 key->mac_proto |= SW_FLOW_KEY_INVALID;
112 }
113
is_flow_key_valid(const struct sw_flow_key * key)114 static bool is_flow_key_valid(const struct sw_flow_key *key)
115 {
116 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
117 }
118
119 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
120 struct sw_flow_key *key,
121 u32 recirc_id,
122 const struct nlattr *actions, int len,
123 bool last, bool clone_flow_key);
124
125 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
126 struct sw_flow_key *key,
127 const struct nlattr *attr, int len);
128
push_mpls(struct sk_buff * skb,struct sw_flow_key * key,__be32 mpls_lse,__be16 mpls_ethertype,__u16 mac_len)129 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
130 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
131 {
132 int err;
133
134 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
135 if (err)
136 return err;
137
138 if (!mac_len)
139 key->mac_proto = MAC_PROTO_NONE;
140
141 invalidate_flow_key(key);
142 return 0;
143 }
144
pop_mpls(struct sk_buff * skb,struct sw_flow_key * key,const __be16 ethertype)145 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
146 const __be16 ethertype)
147 {
148 int err;
149
150 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
151 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
152 if (err)
153 return err;
154
155 if (ethertype == htons(ETH_P_TEB))
156 key->mac_proto = MAC_PROTO_ETHERNET;
157
158 invalidate_flow_key(key);
159 return 0;
160 }
161
set_mpls(struct sk_buff * skb,struct sw_flow_key * flow_key,const __be32 * mpls_lse,const __be32 * mask)162 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
163 const __be32 *mpls_lse, const __be32 *mask)
164 {
165 struct mpls_shim_hdr *stack;
166 __be32 lse;
167 int err;
168
169 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
170 return -ENOMEM;
171
172 stack = mpls_hdr(skb);
173 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
174 err = skb_mpls_update_lse(skb, lse);
175 if (err)
176 return err;
177
178 flow_key->mpls.lse[0] = lse;
179 return 0;
180 }
181
pop_vlan(struct sk_buff * skb,struct sw_flow_key * key)182 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
183 {
184 int err;
185
186 err = skb_vlan_pop(skb);
187 if (skb_vlan_tag_present(skb)) {
188 invalidate_flow_key(key);
189 } else {
190 key->eth.vlan.tci = 0;
191 key->eth.vlan.tpid = 0;
192 }
193 return err;
194 }
195
push_vlan(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_vlan * vlan)196 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
197 const struct ovs_action_push_vlan *vlan)
198 {
199 int err;
200
201 if (skb_vlan_tag_present(skb)) {
202 invalidate_flow_key(key);
203 } else {
204 key->eth.vlan.tci = vlan->vlan_tci;
205 key->eth.vlan.tpid = vlan->vlan_tpid;
206 }
207 err = skb_vlan_push(skb, vlan->vlan_tpid,
208 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
209 skb_reset_mac_len(skb);
210 return err;
211 }
212
213 /* 'src' is already properly masked. */
ether_addr_copy_masked(u8 * dst_,const u8 * src_,const u8 * mask_)214 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
215 {
216 u16 *dst = (u16 *)dst_;
217 const u16 *src = (const u16 *)src_;
218 const u16 *mask = (const u16 *)mask_;
219
220 OVS_SET_MASKED(dst[0], src[0], mask[0]);
221 OVS_SET_MASKED(dst[1], src[1], mask[1]);
222 OVS_SET_MASKED(dst[2], src[2], mask[2]);
223 }
224
set_eth_addr(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ethernet * key,const struct ovs_key_ethernet * mask)225 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
226 const struct ovs_key_ethernet *key,
227 const struct ovs_key_ethernet *mask)
228 {
229 int err;
230
231 err = skb_ensure_writable(skb, ETH_HLEN);
232 if (unlikely(err))
233 return err;
234
235 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
236
237 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
238 mask->eth_src);
239 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
240 mask->eth_dst);
241
242 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
243
244 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
245 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
246 return 0;
247 }
248
249 /* pop_eth does not support VLAN packets as this action is never called
250 * for them.
251 */
pop_eth(struct sk_buff * skb,struct sw_flow_key * key)252 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
253 {
254 int err;
255
256 err = skb_eth_pop(skb);
257 if (err)
258 return err;
259
260 /* safe right before invalidate_flow_key */
261 key->mac_proto = MAC_PROTO_NONE;
262 invalidate_flow_key(key);
263 return 0;
264 }
265
push_eth(struct sk_buff * skb,struct sw_flow_key * key,const struct ovs_action_push_eth * ethh)266 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
267 const struct ovs_action_push_eth *ethh)
268 {
269 int err;
270
271 err = skb_eth_push(skb, ethh->addresses.eth_dst,
272 ethh->addresses.eth_src);
273 if (err)
274 return err;
275
276 /* safe right before invalidate_flow_key */
277 key->mac_proto = MAC_PROTO_ETHERNET;
278 invalidate_flow_key(key);
279 return 0;
280 }
281
push_nsh(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a)282 static noinline_for_stack int push_nsh(struct sk_buff *skb,
283 struct sw_flow_key *key,
284 const struct nlattr *a)
285 {
286 u8 buffer[NSH_HDR_MAX_LEN];
287 struct nshhdr *nh = (struct nshhdr *)buffer;
288 int err;
289
290 err = nsh_hdr_from_nlattr(a, nh, NSH_HDR_MAX_LEN);
291 if (err)
292 return err;
293
294 err = nsh_push(skb, nh);
295 if (err)
296 return err;
297
298 /* safe right before invalidate_flow_key */
299 key->mac_proto = MAC_PROTO_NONE;
300 invalidate_flow_key(key);
301 return 0;
302 }
303
pop_nsh(struct sk_buff * skb,struct sw_flow_key * key)304 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
305 {
306 int err;
307
308 err = nsh_pop(skb);
309 if (err)
310 return err;
311
312 /* safe right before invalidate_flow_key */
313 if (skb->protocol == htons(ETH_P_TEB))
314 key->mac_proto = MAC_PROTO_ETHERNET;
315 else
316 key->mac_proto = MAC_PROTO_NONE;
317 invalidate_flow_key(key);
318 return 0;
319 }
320
update_ip_l4_checksum(struct sk_buff * skb,struct iphdr * nh,__be32 addr,__be32 new_addr)321 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
322 __be32 addr, __be32 new_addr)
323 {
324 int transport_len = skb->len - skb_transport_offset(skb);
325
326 if (nh->frag_off & htons(IP_OFFSET))
327 return;
328
329 if (nh->protocol == IPPROTO_TCP) {
330 if (likely(transport_len >= sizeof(struct tcphdr)))
331 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
332 addr, new_addr, true);
333 } else if (nh->protocol == IPPROTO_UDP) {
334 if (likely(transport_len >= sizeof(struct udphdr))) {
335 struct udphdr *uh = udp_hdr(skb);
336
337 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
338 inet_proto_csum_replace4(&uh->check, skb,
339 addr, new_addr, true);
340 if (!uh->check)
341 uh->check = CSUM_MANGLED_0;
342 }
343 }
344 }
345 }
346
set_ip_addr(struct sk_buff * skb,struct iphdr * nh,__be32 * addr,__be32 new_addr)347 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
348 __be32 *addr, __be32 new_addr)
349 {
350 update_ip_l4_checksum(skb, nh, *addr, new_addr);
351 csum_replace4(&nh->check, *addr, new_addr);
352 skb_clear_hash(skb);
353 ovs_ct_clear(skb, NULL);
354 *addr = new_addr;
355 }
356
update_ipv6_checksum(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4])357 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
358 __be32 addr[4], const __be32 new_addr[4])
359 {
360 int transport_len = skb->len - skb_transport_offset(skb);
361
362 if (l4_proto == NEXTHDR_TCP) {
363 if (likely(transport_len >= sizeof(struct tcphdr)))
364 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
365 addr, new_addr, true);
366 } else if (l4_proto == NEXTHDR_UDP) {
367 if (likely(transport_len >= sizeof(struct udphdr))) {
368 struct udphdr *uh = udp_hdr(skb);
369
370 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
371 inet_proto_csum_replace16(&uh->check, skb,
372 addr, new_addr, true);
373 if (!uh->check)
374 uh->check = CSUM_MANGLED_0;
375 }
376 }
377 } else if (l4_proto == NEXTHDR_ICMP) {
378 if (likely(transport_len >= sizeof(struct icmp6hdr)))
379 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
380 skb, addr, new_addr, true);
381 }
382 }
383
mask_ipv6_addr(const __be32 old[4],const __be32 addr[4],const __be32 mask[4],__be32 masked[4])384 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
385 const __be32 mask[4], __be32 masked[4])
386 {
387 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
388 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
389 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
390 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
391 }
392
set_ipv6_addr(struct sk_buff * skb,u8 l4_proto,__be32 addr[4],const __be32 new_addr[4],bool recalculate_csum)393 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
394 __be32 addr[4], const __be32 new_addr[4],
395 bool recalculate_csum)
396 {
397 if (recalculate_csum)
398 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
399
400 skb_clear_hash(skb);
401 ovs_ct_clear(skb, NULL);
402 memcpy(addr, new_addr, sizeof(__be32[4]));
403 }
404
set_ipv6_dsfield(struct sk_buff * skb,struct ipv6hdr * nh,u8 ipv6_tclass,u8 mask)405 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
406 {
407 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
408
409 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
410
411 if (skb->ip_summed == CHECKSUM_COMPLETE)
412 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
413 (__force __wsum)(ipv6_tclass << 12));
414
415 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
416 }
417
set_ipv6_fl(struct sk_buff * skb,struct ipv6hdr * nh,u32 fl,u32 mask)418 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
419 {
420 u32 ofl;
421
422 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
423 fl = OVS_MASKED(ofl, fl, mask);
424
425 /* Bits 21-24 are always unmasked, so this retains their values. */
426 nh->flow_lbl[0] = (u8)(fl >> 16);
427 nh->flow_lbl[1] = (u8)(fl >> 8);
428 nh->flow_lbl[2] = (u8)fl;
429
430 if (skb->ip_summed == CHECKSUM_COMPLETE)
431 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
432 }
433
set_ipv6_ttl(struct sk_buff * skb,struct ipv6hdr * nh,u8 new_ttl,u8 mask)434 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
435 {
436 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
437
438 if (skb->ip_summed == CHECKSUM_COMPLETE)
439 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
440 (__force __wsum)(new_ttl << 8));
441 nh->hop_limit = new_ttl;
442 }
443
set_ip_ttl(struct sk_buff * skb,struct iphdr * nh,u8 new_ttl,u8 mask)444 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
445 u8 mask)
446 {
447 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
448
449 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
450 nh->ttl = new_ttl;
451 }
452
set_ipv4(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv4 * key,const struct ovs_key_ipv4 * mask)453 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
454 const struct ovs_key_ipv4 *key,
455 const struct ovs_key_ipv4 *mask)
456 {
457 struct iphdr *nh;
458 __be32 new_addr;
459 int err;
460
461 err = skb_ensure_writable(skb, skb_network_offset(skb) +
462 sizeof(struct iphdr));
463 if (unlikely(err))
464 return err;
465
466 nh = ip_hdr(skb);
467
468 /* Setting an IP addresses is typically only a side effect of
469 * matching on them in the current userspace implementation, so it
470 * makes sense to check if the value actually changed.
471 */
472 if (mask->ipv4_src) {
473 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
474
475 if (unlikely(new_addr != nh->saddr)) {
476 set_ip_addr(skb, nh, &nh->saddr, new_addr);
477 flow_key->ipv4.addr.src = new_addr;
478 }
479 }
480 if (mask->ipv4_dst) {
481 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
482
483 if (unlikely(new_addr != nh->daddr)) {
484 set_ip_addr(skb, nh, &nh->daddr, new_addr);
485 flow_key->ipv4.addr.dst = new_addr;
486 }
487 }
488 if (mask->ipv4_tos) {
489 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
490 flow_key->ip.tos = nh->tos;
491 }
492 if (mask->ipv4_ttl) {
493 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
494 flow_key->ip.ttl = nh->ttl;
495 }
496
497 return 0;
498 }
499
is_ipv6_mask_nonzero(const __be32 addr[4])500 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
501 {
502 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
503 }
504
set_ipv6(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_ipv6 * key,const struct ovs_key_ipv6 * mask)505 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
506 const struct ovs_key_ipv6 *key,
507 const struct ovs_key_ipv6 *mask)
508 {
509 struct ipv6hdr *nh;
510 int err;
511
512 err = skb_ensure_writable(skb, skb_network_offset(skb) +
513 sizeof(struct ipv6hdr));
514 if (unlikely(err))
515 return err;
516
517 nh = ipv6_hdr(skb);
518
519 /* Setting an IP addresses is typically only a side effect of
520 * matching on them in the current userspace implementation, so it
521 * makes sense to check if the value actually changed.
522 */
523 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
524 __be32 *saddr = (__be32 *)&nh->saddr;
525 __be32 masked[4];
526
527 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
528
529 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
530 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
531 true);
532 memcpy(&flow_key->ipv6.addr.src, masked,
533 sizeof(flow_key->ipv6.addr.src));
534 }
535 }
536 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
537 unsigned int offset = 0;
538 int flags = IP6_FH_F_SKIP_RH;
539 bool recalc_csum = true;
540 __be32 *daddr = (__be32 *)&nh->daddr;
541 __be32 masked[4];
542
543 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
544
545 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
546 if (ipv6_ext_hdr(nh->nexthdr))
547 recalc_csum = (ipv6_find_hdr(skb, &offset,
548 NEXTHDR_ROUTING,
549 NULL, &flags)
550 != NEXTHDR_ROUTING);
551
552 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
553 recalc_csum);
554 memcpy(&flow_key->ipv6.addr.dst, masked,
555 sizeof(flow_key->ipv6.addr.dst));
556 }
557 }
558 if (mask->ipv6_tclass) {
559 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
560 flow_key->ip.tos = ipv6_get_dsfield(nh);
561 }
562 if (mask->ipv6_label) {
563 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
564 ntohl(mask->ipv6_label));
565 flow_key->ipv6.label =
566 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
567 }
568 if (mask->ipv6_hlimit) {
569 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
570 flow_key->ip.ttl = nh->hop_limit;
571 }
572 return 0;
573 }
574
set_nsh(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)575 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
576 const struct nlattr *a)
577 {
578 struct nshhdr *nh;
579 size_t length;
580 int err;
581 u8 flags;
582 u8 ttl;
583 int i;
584
585 struct ovs_key_nsh key;
586 struct ovs_key_nsh mask;
587
588 err = nsh_key_from_nlattr(a, &key, &mask);
589 if (err)
590 return err;
591
592 /* Make sure the NSH base header is there */
593 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
594 return -ENOMEM;
595
596 nh = nsh_hdr(skb);
597 length = nsh_hdr_len(nh);
598
599 /* Make sure the whole NSH header is there */
600 err = skb_ensure_writable(skb, skb_network_offset(skb) +
601 length);
602 if (unlikely(err))
603 return err;
604
605 nh = nsh_hdr(skb);
606 skb_postpull_rcsum(skb, nh, length);
607 flags = nsh_get_flags(nh);
608 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
609 flow_key->nsh.base.flags = flags;
610 ttl = nsh_get_ttl(nh);
611 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
612 flow_key->nsh.base.ttl = ttl;
613 nsh_set_flags_and_ttl(nh, flags, ttl);
614 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
615 mask.base.path_hdr);
616 flow_key->nsh.base.path_hdr = nh->path_hdr;
617 switch (nh->mdtype) {
618 case NSH_M_TYPE1:
619 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
620 nh->md1.context[i] =
621 OVS_MASKED(nh->md1.context[i], key.context[i],
622 mask.context[i]);
623 }
624 memcpy(flow_key->nsh.context, nh->md1.context,
625 sizeof(nh->md1.context));
626 break;
627 case NSH_M_TYPE2:
628 memset(flow_key->nsh.context, 0,
629 sizeof(flow_key->nsh.context));
630 break;
631 default:
632 return -EINVAL;
633 }
634 skb_postpush_rcsum(skb, nh, length);
635 return 0;
636 }
637
638 /* Must follow skb_ensure_writable() since that can move the skb data. */
set_tp_port(struct sk_buff * skb,__be16 * port,__be16 new_port,__sum16 * check)639 static void set_tp_port(struct sk_buff *skb, __be16 *port,
640 __be16 new_port, __sum16 *check)
641 {
642 ovs_ct_clear(skb, NULL);
643 inet_proto_csum_replace2(check, skb, *port, new_port, false);
644 *port = new_port;
645 }
646
set_udp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_udp * key,const struct ovs_key_udp * mask)647 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
648 const struct ovs_key_udp *key,
649 const struct ovs_key_udp *mask)
650 {
651 struct udphdr *uh;
652 __be16 src, dst;
653 int err;
654
655 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
656 sizeof(struct udphdr));
657 if (unlikely(err))
658 return err;
659
660 uh = udp_hdr(skb);
661 /* Either of the masks is non-zero, so do not bother checking them. */
662 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
663 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
664
665 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
666 if (likely(src != uh->source)) {
667 set_tp_port(skb, &uh->source, src, &uh->check);
668 flow_key->tp.src = src;
669 }
670 if (likely(dst != uh->dest)) {
671 set_tp_port(skb, &uh->dest, dst, &uh->check);
672 flow_key->tp.dst = dst;
673 }
674
675 if (unlikely(!uh->check))
676 uh->check = CSUM_MANGLED_0;
677 } else {
678 uh->source = src;
679 uh->dest = dst;
680 flow_key->tp.src = src;
681 flow_key->tp.dst = dst;
682 ovs_ct_clear(skb, NULL);
683 }
684
685 skb_clear_hash(skb);
686
687 return 0;
688 }
689
set_tcp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_tcp * key,const struct ovs_key_tcp * mask)690 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
691 const struct ovs_key_tcp *key,
692 const struct ovs_key_tcp *mask)
693 {
694 struct tcphdr *th;
695 __be16 src, dst;
696 int err;
697
698 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
699 sizeof(struct tcphdr));
700 if (unlikely(err))
701 return err;
702
703 th = tcp_hdr(skb);
704 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
705 if (likely(src != th->source)) {
706 set_tp_port(skb, &th->source, src, &th->check);
707 flow_key->tp.src = src;
708 }
709 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
710 if (likely(dst != th->dest)) {
711 set_tp_port(skb, &th->dest, dst, &th->check);
712 flow_key->tp.dst = dst;
713 }
714 skb_clear_hash(skb);
715
716 return 0;
717 }
718
set_sctp(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct ovs_key_sctp * key,const struct ovs_key_sctp * mask)719 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
720 const struct ovs_key_sctp *key,
721 const struct ovs_key_sctp *mask)
722 {
723 unsigned int sctphoff = skb_transport_offset(skb);
724 struct sctphdr *sh;
725 __le32 old_correct_csum, new_csum, old_csum;
726 int err;
727
728 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
729 if (unlikely(err))
730 return err;
731
732 sh = sctp_hdr(skb);
733 old_csum = sh->checksum;
734 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
735
736 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
737 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
738
739 new_csum = sctp_compute_cksum(skb, sctphoff);
740
741 /* Carry any checksum errors through. */
742 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
743
744 skb_clear_hash(skb);
745 ovs_ct_clear(skb, NULL);
746
747 flow_key->tp.src = sh->source;
748 flow_key->tp.dst = sh->dest;
749
750 return 0;
751 }
752
ovs_vport_output(struct net * net,struct sock * sk,struct sk_buff * skb)753 static int ovs_vport_output(struct net *net, struct sock *sk,
754 struct sk_buff *skb)
755 {
756 struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
757 struct vport *vport = data->vport;
758
759 if (skb_cow_head(skb, data->l2_len) < 0) {
760 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
761 return -ENOMEM;
762 }
763
764 __skb_dst_copy(skb, data->dst);
765 *OVS_CB(skb) = data->cb;
766 skb->inner_protocol = data->inner_protocol;
767 if (data->vlan_tci & VLAN_CFI_MASK)
768 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
769 else
770 __vlan_hwaccel_clear_tag(skb);
771
772 /* Reconstruct the MAC header. */
773 skb_push(skb, data->l2_len);
774 memcpy(skb->data, &data->l2_data, data->l2_len);
775 skb_postpush_rcsum(skb, skb->data, data->l2_len);
776 skb_reset_mac_header(skb);
777
778 if (eth_p_mpls(skb->protocol)) {
779 skb->inner_network_header = skb->network_header;
780 skb_set_network_header(skb, data->network_offset);
781 skb_reset_mac_len(skb);
782 }
783
784 ovs_vport_send(vport, skb, data->mac_proto);
785 return 0;
786 }
787
788 static unsigned int
ovs_dst_get_mtu(const struct dst_entry * dst)789 ovs_dst_get_mtu(const struct dst_entry *dst)
790 {
791 return dst->dev->mtu;
792 }
793
794 static struct dst_ops ovs_dst_ops = {
795 .family = AF_UNSPEC,
796 .mtu = ovs_dst_get_mtu,
797 };
798
799 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
800 * ovs_vport_output(), which is called once per fragmented packet.
801 */
prepare_frag(struct vport * vport,struct sk_buff * skb,u16 orig_network_offset,u8 mac_proto)802 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
803 u16 orig_network_offset, u8 mac_proto)
804 {
805 unsigned int hlen = skb_network_offset(skb);
806 struct ovs_frag_data *data;
807
808 data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
809 data->dst = skb->_skb_refdst;
810 data->vport = vport;
811 data->cb = *OVS_CB(skb);
812 data->inner_protocol = skb->inner_protocol;
813 data->network_offset = orig_network_offset;
814 if (skb_vlan_tag_present(skb))
815 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
816 else
817 data->vlan_tci = 0;
818 data->vlan_proto = skb->vlan_proto;
819 data->mac_proto = mac_proto;
820 data->l2_len = hlen;
821 memcpy(&data->l2_data, skb->data, hlen);
822
823 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
824 skb_pull(skb, hlen);
825 }
826
ovs_fragment(struct net * net,struct vport * vport,struct sk_buff * skb,u16 mru,struct sw_flow_key * key)827 static void ovs_fragment(struct net *net, struct vport *vport,
828 struct sk_buff *skb, u16 mru,
829 struct sw_flow_key *key)
830 {
831 enum ovs_drop_reason reason;
832 u16 orig_network_offset = 0;
833
834 if (eth_p_mpls(skb->protocol)) {
835 orig_network_offset = skb_network_offset(skb);
836 skb->network_header = skb->inner_network_header;
837 }
838
839 if (skb_network_offset(skb) > MAX_L2_LEN) {
840 OVS_NLERR(1, "L2 header too long to fragment");
841 reason = OVS_DROP_FRAG_L2_TOO_LONG;
842 goto err;
843 }
844
845 if (key->eth.type == htons(ETH_P_IP)) {
846 struct rtable ovs_rt = { 0 };
847 unsigned long orig_dst;
848
849 prepare_frag(vport, skb, orig_network_offset,
850 ovs_key_mac_proto(key));
851 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
852 DST_OBSOLETE_NONE, DST_NOCOUNT);
853 ovs_rt.dst.dev = vport->dev;
854
855 orig_dst = skb->_skb_refdst;
856 skb_dst_set_noref(skb, &ovs_rt.dst);
857 IPCB(skb)->frag_max_size = mru;
858
859 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
860 refdst_drop(orig_dst);
861 } else if (key->eth.type == htons(ETH_P_IPV6)) {
862 unsigned long orig_dst;
863 struct rt6_info ovs_rt;
864
865 prepare_frag(vport, skb, orig_network_offset,
866 ovs_key_mac_proto(key));
867 memset(&ovs_rt, 0, sizeof(ovs_rt));
868 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL,
869 DST_OBSOLETE_NONE, DST_NOCOUNT);
870 ovs_rt.dst.dev = vport->dev;
871
872 orig_dst = skb->_skb_refdst;
873 skb_dst_set_noref(skb, &ovs_rt.dst);
874 IP6CB(skb)->frag_max_size = mru;
875
876 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
877 refdst_drop(orig_dst);
878 } else {
879 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
880 ovs_vport_name(vport), ntohs(key->eth.type), mru,
881 vport->dev->mtu);
882 reason = OVS_DROP_FRAG_INVALID_PROTO;
883 goto err;
884 }
885
886 return;
887 err:
888 ovs_kfree_skb_reason(skb, reason);
889 }
890
do_output(struct datapath * dp,struct sk_buff * skb,int out_port,struct sw_flow_key * key)891 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
892 struct sw_flow_key *key)
893 {
894 struct vport *vport = ovs_vport_rcu(dp, out_port);
895
896 if (likely(vport &&
897 netif_running(vport->dev) &&
898 netif_carrier_ok(vport->dev))) {
899 u16 mru = OVS_CB(skb)->mru;
900 u32 cutlen = OVS_CB(skb)->cutlen;
901
902 if (unlikely(cutlen > 0)) {
903 if (skb->len - cutlen > ovs_mac_header_len(key))
904 pskb_trim(skb, skb->len - cutlen);
905 else
906 pskb_trim(skb, ovs_mac_header_len(key));
907 }
908
909 if (likely(!mru ||
910 (skb->len <= mru + vport->dev->hard_header_len))) {
911 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
912 } else if (mru <= vport->dev->mtu) {
913 struct net *net = read_pnet(&dp->net);
914
915 ovs_fragment(net, vport, skb, mru, key);
916 } else {
917 kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
918 }
919 } else {
920 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
921 }
922 }
923
output_userspace(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,const struct nlattr * actions,int actions_len,uint32_t cutlen)924 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
925 struct sw_flow_key *key, const struct nlattr *attr,
926 const struct nlattr *actions, int actions_len,
927 uint32_t cutlen)
928 {
929 struct dp_upcall_info upcall;
930 const struct nlattr *a;
931 int rem;
932
933 memset(&upcall, 0, sizeof(upcall));
934 upcall.cmd = OVS_PACKET_CMD_ACTION;
935 upcall.mru = OVS_CB(skb)->mru;
936
937 nla_for_each_nested(a, attr, rem) {
938 switch (nla_type(a)) {
939 case OVS_USERSPACE_ATTR_USERDATA:
940 upcall.userdata = a;
941 break;
942
943 case OVS_USERSPACE_ATTR_PID:
944 if (dp->user_features &
945 OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
946 upcall.portid =
947 ovs_dp_get_upcall_portid(dp,
948 smp_processor_id());
949 else
950 upcall.portid = nla_get_u32(a);
951 break;
952
953 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
954 /* Get out tunnel info. */
955 struct vport *vport;
956
957 vport = ovs_vport_rcu(dp, nla_get_u32(a));
958 if (vport) {
959 int err;
960
961 err = dev_fill_metadata_dst(vport->dev, skb);
962 if (!err)
963 upcall.egress_tun_info = skb_tunnel_info(skb);
964 }
965
966 break;
967 }
968
969 case OVS_USERSPACE_ATTR_ACTIONS: {
970 /* Include actions. */
971 upcall.actions = actions;
972 upcall.actions_len = actions_len;
973 break;
974 }
975
976 } /* End of switch. */
977 }
978
979 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
980 }
981
dec_ttl_exception_handler(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)982 static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
983 struct sw_flow_key *key,
984 const struct nlattr *attr)
985 {
986 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
987 struct nlattr *actions = nla_data(attr);
988
989 if (nla_len(actions))
990 return clone_execute(dp, skb, key, 0, nla_data(actions),
991 nla_len(actions), true, false);
992
993 ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
994 return 0;
995 }
996
997 /* When 'last' is true, sample() should always consume the 'skb'.
998 * Otherwise, sample() should keep 'skb' intact regardless what
999 * actions are executed within sample().
1000 */
sample(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1001 static int sample(struct datapath *dp, struct sk_buff *skb,
1002 struct sw_flow_key *key, const struct nlattr *attr,
1003 bool last)
1004 {
1005 struct nlattr *actions;
1006 struct nlattr *sample_arg;
1007 int rem = nla_len(attr);
1008 const struct sample_arg *arg;
1009 u32 init_probability;
1010 bool clone_flow_key;
1011 int err;
1012
1013 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1014 sample_arg = nla_data(attr);
1015 arg = nla_data(sample_arg);
1016 actions = nla_next(sample_arg, &rem);
1017 init_probability = OVS_CB(skb)->probability;
1018
1019 if ((arg->probability != U32_MAX) &&
1020 (!arg->probability || get_random_u32() > arg->probability)) {
1021 if (last)
1022 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1023 return 0;
1024 }
1025
1026 OVS_CB(skb)->probability = arg->probability;
1027
1028 clone_flow_key = !arg->exec;
1029 err = clone_execute(dp, skb, key, 0, actions, rem, last,
1030 clone_flow_key);
1031
1032 if (!last)
1033 OVS_CB(skb)->probability = init_probability;
1034
1035 return err;
1036 }
1037
1038 /* When 'last' is true, clone() should always consume the 'skb'.
1039 * Otherwise, clone() should keep 'skb' intact regardless what
1040 * actions are executed within clone().
1041 */
clone(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1042 static int clone(struct datapath *dp, struct sk_buff *skb,
1043 struct sw_flow_key *key, const struct nlattr *attr,
1044 bool last)
1045 {
1046 struct nlattr *actions;
1047 struct nlattr *clone_arg;
1048 int rem = nla_len(attr);
1049 bool dont_clone_flow_key;
1050
1051 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1052 clone_arg = nla_data(attr);
1053 dont_clone_flow_key = nla_get_u32(clone_arg);
1054 actions = nla_next(clone_arg, &rem);
1055
1056 return clone_execute(dp, skb, key, 0, actions, rem, last,
1057 !dont_clone_flow_key);
1058 }
1059
execute_hash(struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr)1060 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1061 const struct nlattr *attr)
1062 {
1063 struct ovs_action_hash *hash_act = nla_data(attr);
1064 u32 hash = 0;
1065
1066 if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
1067 /* OVS_HASH_ALG_L4 hasing type. */
1068 hash = skb_get_hash(skb);
1069 } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
1070 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1071 * extend past an encapsulated header.
1072 */
1073 hash = __skb_get_hash_symmetric(skb);
1074 }
1075
1076 hash = jhash_1word(hash, hash_act->hash_basis);
1077 if (!hash)
1078 hash = 0x1;
1079
1080 key->ovs_flow_hash = hash;
1081 }
1082
execute_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1083 static int execute_set_action(struct sk_buff *skb,
1084 struct sw_flow_key *flow_key,
1085 const struct nlattr *a)
1086 {
1087 /* Only tunnel set execution is supported without a mask. */
1088 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1089 struct ovs_tunnel_info *tun = nla_data(a);
1090
1091 skb_dst_drop(skb);
1092 dst_hold((struct dst_entry *)tun->tun_dst);
1093 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1094 return 0;
1095 }
1096
1097 return -EINVAL;
1098 }
1099
1100 /* Mask is at the midpoint of the data. */
1101 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1102
execute_masked_set_action(struct sk_buff * skb,struct sw_flow_key * flow_key,const struct nlattr * a)1103 static int execute_masked_set_action(struct sk_buff *skb,
1104 struct sw_flow_key *flow_key,
1105 const struct nlattr *a)
1106 {
1107 int err = 0;
1108
1109 switch (nla_type(a)) {
1110 case OVS_KEY_ATTR_PRIORITY:
1111 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1112 *get_mask(a, u32 *));
1113 flow_key->phy.priority = skb->priority;
1114 break;
1115
1116 case OVS_KEY_ATTR_SKB_MARK:
1117 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1118 flow_key->phy.skb_mark = skb->mark;
1119 break;
1120
1121 case OVS_KEY_ATTR_TUNNEL_INFO:
1122 /* Masked data not supported for tunnel. */
1123 err = -EINVAL;
1124 break;
1125
1126 case OVS_KEY_ATTR_ETHERNET:
1127 err = set_eth_addr(skb, flow_key, nla_data(a),
1128 get_mask(a, struct ovs_key_ethernet *));
1129 break;
1130
1131 case OVS_KEY_ATTR_NSH:
1132 err = set_nsh(skb, flow_key, a);
1133 break;
1134
1135 case OVS_KEY_ATTR_IPV4:
1136 err = set_ipv4(skb, flow_key, nla_data(a),
1137 get_mask(a, struct ovs_key_ipv4 *));
1138 break;
1139
1140 case OVS_KEY_ATTR_IPV6:
1141 err = set_ipv6(skb, flow_key, nla_data(a),
1142 get_mask(a, struct ovs_key_ipv6 *));
1143 break;
1144
1145 case OVS_KEY_ATTR_TCP:
1146 err = set_tcp(skb, flow_key, nla_data(a),
1147 get_mask(a, struct ovs_key_tcp *));
1148 break;
1149
1150 case OVS_KEY_ATTR_UDP:
1151 err = set_udp(skb, flow_key, nla_data(a),
1152 get_mask(a, struct ovs_key_udp *));
1153 break;
1154
1155 case OVS_KEY_ATTR_SCTP:
1156 err = set_sctp(skb, flow_key, nla_data(a),
1157 get_mask(a, struct ovs_key_sctp *));
1158 break;
1159
1160 case OVS_KEY_ATTR_MPLS:
1161 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1162 __be32 *));
1163 break;
1164
1165 case OVS_KEY_ATTR_CT_STATE:
1166 case OVS_KEY_ATTR_CT_ZONE:
1167 case OVS_KEY_ATTR_CT_MARK:
1168 case OVS_KEY_ATTR_CT_LABELS:
1169 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1170 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1171 err = -EINVAL;
1172 break;
1173 }
1174
1175 return err;
1176 }
1177
execute_recirc(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * a,bool last)1178 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1179 struct sw_flow_key *key,
1180 const struct nlattr *a, bool last)
1181 {
1182 u32 recirc_id;
1183
1184 if (!is_flow_key_valid(key)) {
1185 int err;
1186
1187 err = ovs_flow_key_update(skb, key);
1188 if (err)
1189 return err;
1190 }
1191 BUG_ON(!is_flow_key_valid(key));
1192
1193 recirc_id = nla_get_u32(a);
1194 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1195 }
1196
execute_check_pkt_len(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,bool last)1197 static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1198 struct sw_flow_key *key,
1199 const struct nlattr *attr, bool last)
1200 {
1201 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
1202 const struct nlattr *actions, *cpl_arg;
1203 int len, max_len, rem = nla_len(attr);
1204 const struct check_pkt_len_arg *arg;
1205 bool clone_flow_key;
1206
1207 /* The first netlink attribute in 'attr' is always
1208 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1209 */
1210 cpl_arg = nla_data(attr);
1211 arg = nla_data(cpl_arg);
1212
1213 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1214 max_len = arg->pkt_len;
1215
1216 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1217 len <= max_len) {
1218 /* Second netlink attribute in 'attr' is always
1219 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1220 */
1221 actions = nla_next(cpl_arg, &rem);
1222 clone_flow_key = !arg->exec_for_lesser_equal;
1223 } else {
1224 /* Third netlink attribute in 'attr' is always
1225 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1226 */
1227 actions = nla_next(cpl_arg, &rem);
1228 actions = nla_next(actions, &rem);
1229 clone_flow_key = !arg->exec_for_greater;
1230 }
1231
1232 return clone_execute(dp, skb, key, 0, nla_data(actions),
1233 nla_len(actions), last, clone_flow_key);
1234 }
1235
execute_dec_ttl(struct sk_buff * skb,struct sw_flow_key * key)1236 static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1237 {
1238 int err;
1239
1240 if (skb->protocol == htons(ETH_P_IPV6)) {
1241 struct ipv6hdr *nh;
1242
1243 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1244 sizeof(*nh));
1245 if (unlikely(err))
1246 return err;
1247
1248 nh = ipv6_hdr(skb);
1249
1250 if (nh->hop_limit <= 1)
1251 return -EHOSTUNREACH;
1252
1253 key->ip.ttl = --nh->hop_limit;
1254 } else if (skb->protocol == htons(ETH_P_IP)) {
1255 struct iphdr *nh;
1256 u8 old_ttl;
1257
1258 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1259 sizeof(*nh));
1260 if (unlikely(err))
1261 return err;
1262
1263 nh = ip_hdr(skb);
1264 if (nh->ttl <= 1)
1265 return -EHOSTUNREACH;
1266
1267 old_ttl = nh->ttl--;
1268 csum_replace2(&nh->check, htons(old_ttl << 8),
1269 htons(nh->ttl << 8));
1270 key->ip.ttl = nh->ttl;
1271 }
1272 return 0;
1273 }
1274
1275 #if IS_ENABLED(CONFIG_PSAMPLE)
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1276 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1277 const struct nlattr *attr)
1278 {
1279 struct psample_group psample_group = {};
1280 struct psample_metadata md = {};
1281 const struct nlattr *a;
1282 u32 rate;
1283 int rem;
1284
1285 nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
1286 switch (nla_type(a)) {
1287 case OVS_PSAMPLE_ATTR_GROUP:
1288 psample_group.group_num = nla_get_u32(a);
1289 break;
1290
1291 case OVS_PSAMPLE_ATTR_COOKIE:
1292 md.user_cookie = nla_data(a);
1293 md.user_cookie_len = nla_len(a);
1294 break;
1295 }
1296 }
1297
1298 psample_group.net = ovs_dp_get_net(dp);
1299 md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex;
1300 md.trunc_size = skb->len - OVS_CB(skb)->cutlen;
1301 md.rate_as_probability = 1;
1302
1303 rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX;
1304
1305 psample_sample_packet(&psample_group, skb, rate, &md);
1306 }
1307 #else
execute_psample(struct datapath * dp,struct sk_buff * skb,const struct nlattr * attr)1308 static void execute_psample(struct datapath *dp, struct sk_buff *skb,
1309 const struct nlattr *attr)
1310 {}
1311 #endif
1312
1313 /* Execute a list of actions against 'skb'. */
do_execute_actions(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,const struct nlattr * attr,int len)1314 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1315 struct sw_flow_key *key,
1316 const struct nlattr *attr, int len)
1317 {
1318 const struct nlattr *a;
1319 int rem;
1320
1321 for (a = attr, rem = len; rem > 0;
1322 a = nla_next(a, &rem)) {
1323 int err = 0;
1324
1325 if (trace_ovs_do_execute_action_enabled())
1326 trace_ovs_do_execute_action(dp, skb, key, a, rem);
1327
1328 /* Actions that rightfully have to consume the skb should do it
1329 * and return directly.
1330 */
1331 switch (nla_type(a)) {
1332 case OVS_ACTION_ATTR_OUTPUT: {
1333 int port = nla_get_u32(a);
1334 struct sk_buff *clone;
1335
1336 /* Every output action needs a separate clone
1337 * of 'skb', In case the output action is the
1338 * last action, cloning can be avoided.
1339 */
1340 if (nla_is_last(a, rem)) {
1341 do_output(dp, skb, port, key);
1342 /* 'skb' has been used for output.
1343 */
1344 return 0;
1345 }
1346
1347 clone = skb_clone(skb, GFP_ATOMIC);
1348 if (clone)
1349 do_output(dp, clone, port, key);
1350 OVS_CB(skb)->cutlen = 0;
1351 break;
1352 }
1353
1354 case OVS_ACTION_ATTR_TRUNC: {
1355 struct ovs_action_trunc *trunc = nla_data(a);
1356
1357 if (skb->len > trunc->max_len)
1358 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1359 break;
1360 }
1361
1362 case OVS_ACTION_ATTR_USERSPACE:
1363 output_userspace(dp, skb, key, a, attr,
1364 len, OVS_CB(skb)->cutlen);
1365 OVS_CB(skb)->cutlen = 0;
1366 if (nla_is_last(a, rem)) {
1367 consume_skb(skb);
1368 return 0;
1369 }
1370 break;
1371
1372 case OVS_ACTION_ATTR_HASH:
1373 execute_hash(skb, key, a);
1374 break;
1375
1376 case OVS_ACTION_ATTR_PUSH_MPLS: {
1377 struct ovs_action_push_mpls *mpls = nla_data(a);
1378
1379 err = push_mpls(skb, key, mpls->mpls_lse,
1380 mpls->mpls_ethertype, skb->mac_len);
1381 break;
1382 }
1383 case OVS_ACTION_ATTR_ADD_MPLS: {
1384 struct ovs_action_add_mpls *mpls = nla_data(a);
1385 __u16 mac_len = 0;
1386
1387 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1388 mac_len = skb->mac_len;
1389
1390 err = push_mpls(skb, key, mpls->mpls_lse,
1391 mpls->mpls_ethertype, mac_len);
1392 break;
1393 }
1394 case OVS_ACTION_ATTR_POP_MPLS:
1395 err = pop_mpls(skb, key, nla_get_be16(a));
1396 break;
1397
1398 case OVS_ACTION_ATTR_PUSH_VLAN:
1399 err = push_vlan(skb, key, nla_data(a));
1400 break;
1401
1402 case OVS_ACTION_ATTR_POP_VLAN:
1403 err = pop_vlan(skb, key);
1404 break;
1405
1406 case OVS_ACTION_ATTR_RECIRC: {
1407 bool last = nla_is_last(a, rem);
1408
1409 err = execute_recirc(dp, skb, key, a, last);
1410 if (last) {
1411 /* If this is the last action, the skb has
1412 * been consumed or freed.
1413 * Return immediately.
1414 */
1415 return err;
1416 }
1417 break;
1418 }
1419
1420 case OVS_ACTION_ATTR_SET:
1421 err = execute_set_action(skb, key, nla_data(a));
1422 break;
1423
1424 case OVS_ACTION_ATTR_SET_MASKED:
1425 case OVS_ACTION_ATTR_SET_TO_MASKED:
1426 err = execute_masked_set_action(skb, key, nla_data(a));
1427 break;
1428
1429 case OVS_ACTION_ATTR_SAMPLE: {
1430 bool last = nla_is_last(a, rem);
1431
1432 err = sample(dp, skb, key, a, last);
1433 if (last)
1434 return err;
1435
1436 break;
1437 }
1438
1439 case OVS_ACTION_ATTR_CT:
1440 if (!is_flow_key_valid(key)) {
1441 err = ovs_flow_key_update(skb, key);
1442 if (err)
1443 return err;
1444 }
1445
1446 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1447 nla_data(a));
1448
1449 /* Hide stolen IP fragments from user space. */
1450 if (err)
1451 return err == -EINPROGRESS ? 0 : err;
1452 break;
1453
1454 case OVS_ACTION_ATTR_CT_CLEAR:
1455 err = ovs_ct_clear(skb, key);
1456 break;
1457
1458 case OVS_ACTION_ATTR_PUSH_ETH:
1459 err = push_eth(skb, key, nla_data(a));
1460 break;
1461
1462 case OVS_ACTION_ATTR_POP_ETH:
1463 err = pop_eth(skb, key);
1464 break;
1465
1466 case OVS_ACTION_ATTR_PUSH_NSH:
1467 err = push_nsh(skb, key, nla_data(a));
1468 break;
1469
1470 case OVS_ACTION_ATTR_POP_NSH:
1471 err = pop_nsh(skb, key);
1472 break;
1473
1474 case OVS_ACTION_ATTR_METER:
1475 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1476 ovs_kfree_skb_reason(skb, OVS_DROP_METER);
1477 return 0;
1478 }
1479 break;
1480
1481 case OVS_ACTION_ATTR_CLONE: {
1482 bool last = nla_is_last(a, rem);
1483
1484 err = clone(dp, skb, key, a, last);
1485 if (last)
1486 return err;
1487
1488 break;
1489 }
1490
1491 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1492 bool last = nla_is_last(a, rem);
1493
1494 err = execute_check_pkt_len(dp, skb, key, a, last);
1495 if (last)
1496 return err;
1497
1498 break;
1499 }
1500
1501 case OVS_ACTION_ATTR_DEC_TTL:
1502 err = execute_dec_ttl(skb, key);
1503 if (err == -EHOSTUNREACH)
1504 return dec_ttl_exception_handler(dp, skb,
1505 key, a);
1506 break;
1507
1508 case OVS_ACTION_ATTR_DROP: {
1509 enum ovs_drop_reason reason = nla_get_u32(a)
1510 ? OVS_DROP_EXPLICIT_WITH_ERROR
1511 : OVS_DROP_EXPLICIT;
1512
1513 ovs_kfree_skb_reason(skb, reason);
1514 return 0;
1515 }
1516
1517 case OVS_ACTION_ATTR_PSAMPLE:
1518 execute_psample(dp, skb, a);
1519 OVS_CB(skb)->cutlen = 0;
1520 if (nla_is_last(a, rem)) {
1521 consume_skb(skb);
1522 return 0;
1523 }
1524 break;
1525 }
1526
1527 if (unlikely(err)) {
1528 ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
1529 return err;
1530 }
1531 }
1532
1533 ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
1534 return 0;
1535 }
1536
1537 /* Execute the actions on the clone of the packet. The effect of the
1538 * execution does not affect the original 'skb' nor the original 'key'.
1539 *
1540 * The execution may be deferred in case the actions can not be executed
1541 * immediately.
1542 */
clone_execute(struct datapath * dp,struct sk_buff * skb,struct sw_flow_key * key,u32 recirc_id,const struct nlattr * actions,int len,bool last,bool clone_flow_key)1543 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1544 struct sw_flow_key *key, u32 recirc_id,
1545 const struct nlattr *actions, int len,
1546 bool last, bool clone_flow_key)
1547 {
1548 struct deferred_action *da;
1549 struct sw_flow_key *clone;
1550
1551 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1552 if (!skb) {
1553 /* Out of memory, skip this action.
1554 */
1555 return 0;
1556 }
1557
1558 /* When clone_flow_key is false, the 'key' will not be change
1559 * by the actions, then the 'key' can be used directly.
1560 * Otherwise, try to clone key from the next recursion level of
1561 * 'flow_keys'. If clone is successful, execute the actions
1562 * without deferring.
1563 */
1564 clone = clone_flow_key ? clone_key(key) : key;
1565 if (clone) {
1566 int err = 0;
1567 if (actions) { /* Sample action */
1568 if (clone_flow_key)
1569 __this_cpu_inc(ovs_pcpu_storage->exec_level);
1570
1571 err = do_execute_actions(dp, skb, clone,
1572 actions, len);
1573
1574 if (clone_flow_key)
1575 __this_cpu_dec(ovs_pcpu_storage->exec_level);
1576 } else { /* Recirc action */
1577 clone->recirc_id = recirc_id;
1578 ovs_dp_process_packet(skb, clone);
1579 }
1580 return err;
1581 }
1582
1583 /* Out of 'flow_keys' space. Defer actions */
1584 da = add_deferred_actions(skb, key, actions, len);
1585 if (da) {
1586 if (!actions) { /* Recirc action */
1587 key = &da->pkt_key;
1588 key->recirc_id = recirc_id;
1589 }
1590 } else {
1591 /* Out of per CPU action FIFO space. Drop the 'skb' and
1592 * log an error.
1593 */
1594 ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
1595
1596 if (net_ratelimit()) {
1597 if (actions) { /* Sample action */
1598 pr_warn("%s: deferred action limit reached, drop sample action\n",
1599 ovs_dp_name(dp));
1600 } else { /* Recirc action */
1601 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1602 ovs_dp_name(dp), recirc_id);
1603 }
1604 }
1605 }
1606 return 0;
1607 }
1608
process_deferred_actions(struct datapath * dp)1609 static void process_deferred_actions(struct datapath *dp)
1610 {
1611 struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
1612
1613 /* Do not touch the FIFO in case there is no deferred actions. */
1614 if (action_fifo_is_empty(fifo))
1615 return;
1616
1617 /* Finishing executing all deferred actions. */
1618 do {
1619 struct deferred_action *da = action_fifo_get(fifo);
1620 struct sk_buff *skb = da->skb;
1621 struct sw_flow_key *key = &da->pkt_key;
1622 const struct nlattr *actions = da->actions;
1623 int actions_len = da->actions_len;
1624
1625 if (actions)
1626 do_execute_actions(dp, skb, key, actions, actions_len);
1627 else
1628 ovs_dp_process_packet(skb, key);
1629 } while (!action_fifo_is_empty(fifo));
1630
1631 /* Reset FIFO for the next packet. */
1632 action_fifo_init(fifo);
1633 }
1634
1635 /* Execute a list of actions against 'skb'. */
ovs_execute_actions(struct datapath * dp,struct sk_buff * skb,const struct sw_flow_actions * acts,struct sw_flow_key * key)1636 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1637 const struct sw_flow_actions *acts,
1638 struct sw_flow_key *key)
1639 {
1640 int err, level;
1641
1642 level = __this_cpu_inc_return(ovs_pcpu_storage->exec_level);
1643 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1644 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1645 ovs_dp_name(dp));
1646 ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
1647 err = -ENETDOWN;
1648 goto out;
1649 }
1650
1651 OVS_CB(skb)->acts_origlen = acts->orig_len;
1652 err = do_execute_actions(dp, skb, key,
1653 acts->actions, acts->actions_len);
1654
1655 if (level == 1)
1656 process_deferred_actions(dp);
1657
1658 out:
1659 __this_cpu_dec(ovs_pcpu_storage->exec_level);
1660 return err;
1661 }
1662