1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_IP_TUNNELS_H 3 #define __NET_IP_TUNNELS_H 1 4 5 #include <linux/if_tunnel.h> 6 #include <linux/netdevice.h> 7 #include <linux/skbuff.h> 8 #include <linux/socket.h> 9 #include <linux/types.h> 10 #include <linux/u64_stats_sync.h> 11 #include <linux/bitops.h> 12 13 #include <net/dsfield.h> 14 #include <net/gro_cells.h> 15 #include <net/inet_ecn.h> 16 #include <net/netns/generic.h> 17 #include <net/rtnetlink.h> 18 #include <net/lwtunnel.h> 19 #include <net/dst_cache.h> 20 21 #if IS_ENABLED(CONFIG_IPV6) 22 #include <net/ipv6.h> 23 #include <net/ip6_fib.h> 24 #include <net/ip6_route.h> 25 #endif 26 27 /* Keep error state on tunnel for 30 sec */ 28 #define IPTUNNEL_ERR_TIMEO (30*HZ) 29 30 /* Used to memset ip_tunnel padding. */ 31 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst) 32 33 /* Used to memset ipv4 address padding. */ 34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst) 35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \ 36 (sizeof_field(struct ip_tunnel_key, u) - \ 37 sizeof_field(struct ip_tunnel_key, u.ipv4)) 38 39 #define __ipt_flag_op(op, ...) \ 40 op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) 41 42 #define IP_TUNNEL_DECLARE_FLAGS(...) \ 43 __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) 44 45 #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) 46 #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) 47 #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) 48 #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) 49 50 #define ip_tunnel_flags_empty(...) \ 51 __ipt_flag_op(bitmap_empty, __VA_ARGS__) 52 #define ip_tunnel_flags_intersect(...) \ 53 __ipt_flag_op(bitmap_intersects, __VA_ARGS__) 54 #define ip_tunnel_flags_subset(...) \ 55 __ipt_flag_op(bitmap_subset, __VA_ARGS__) 56 57 struct ip_tunnel_key { 58 __be64 tun_id; 59 union { 60 struct { 61 __be32 src; 62 __be32 dst; 63 } ipv4; 64 struct { 65 struct in6_addr src; 66 struct in6_addr dst; 67 } ipv6; 68 } u; 69 IP_TUNNEL_DECLARE_FLAGS(tun_flags); 70 __be32 label; /* Flow Label for IPv6 */ 71 u32 nhid; 72 u8 tos; /* TOS for IPv4, TC for IPv6 */ 73 u8 ttl; /* TTL for IPv4, HL for IPv6 */ 74 __be16 tp_src; 75 __be16 tp_dst; 76 __u8 flow_flags; 77 }; 78 79 struct ip_tunnel_encap { 80 u16 type; 81 u16 flags; 82 __be16 sport; 83 __be16 dport; 84 }; 85 86 /* Flags for ip_tunnel_info mode. */ 87 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ 88 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ 89 #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */ 90 91 /* Maximum tunnel options length. */ 92 #define IP_TUNNEL_OPTS_MAX \ 93 GENMASK((sizeof_field(struct ip_tunnel_info, \ 94 options_len) * BITS_PER_BYTE) - 1, 0) 95 96 #define ip_tunnel_info_opts(info) \ 97 _Generic(info, \ 98 const struct ip_tunnel_info * : ((const void *)(info)->options),\ 99 struct ip_tunnel_info * : ((void *)(info)->options)\ 100 ) 101 102 struct ip_tunnel_info { 103 struct ip_tunnel_key key; 104 struct ip_tunnel_encap encap; 105 #ifdef CONFIG_DST_CACHE 106 struct dst_cache dst_cache; 107 #endif 108 u8 options_len; 109 u8 mode; 110 u8 options[] __aligned_largest __counted_by(options_len); 111 }; 112 113 /* 6rd prefix/relay information */ 114 #ifdef CONFIG_IPV6_SIT_6RD 115 struct ip_tunnel_6rd_parm { 116 struct in6_addr prefix; 117 __be32 relay_prefix; 118 u16 prefixlen; 119 u16 relay_prefixlen; 120 }; 121 #endif 122 123 struct ip_tunnel_prl_entry { 124 struct ip_tunnel_prl_entry __rcu *next; 125 __be32 addr; 126 u16 flags; 127 struct rcu_head rcu_head; 128 }; 129 130 struct metadata_dst; 131 132 /* Kernel-side variant of ip_tunnel_parm */ 133 struct ip_tunnel_parm_kern { 134 char name[IFNAMSIZ]; 135 IP_TUNNEL_DECLARE_FLAGS(i_flags); 136 IP_TUNNEL_DECLARE_FLAGS(o_flags); 137 __be32 i_key; 138 __be32 o_key; 139 int link; 140 struct iphdr iph; 141 }; 142 143 struct ip_tunnel { 144 struct ip_tunnel __rcu *next; 145 struct hlist_node hash_node; 146 147 struct net_device *dev; 148 netdevice_tracker dev_tracker; 149 150 struct net *net; /* netns for packet i/o */ 151 152 unsigned long err_time; /* Time when the last ICMP error 153 * arrived */ 154 int err_count; /* Number of arrived ICMP errors */ 155 156 /* These four fields used only by GRE */ 157 u32 i_seqno; /* The last seen seqno */ 158 atomic_t o_seqno; /* The last output seqno */ 159 int tun_hlen; /* Precalculated header length */ 160 161 /* These four fields used only by ERSPAN */ 162 u32 index; /* ERSPAN type II index */ 163 u8 erspan_ver; /* ERSPAN version */ 164 u8 dir; /* ERSPAN direction */ 165 u16 hwid; /* ERSPAN hardware ID */ 166 167 struct dst_cache dst_cache; 168 169 struct ip_tunnel_parm_kern parms; 170 171 int mlink; 172 int encap_hlen; /* Encap header length (FOU,GUE) */ 173 int hlen; /* tun_hlen + encap_hlen */ 174 struct ip_tunnel_encap encap; 175 176 /* for SIT */ 177 #ifdef CONFIG_IPV6_SIT_6RD 178 struct ip_tunnel_6rd_parm ip6rd; 179 #endif 180 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */ 181 unsigned int prl_count; /* # of entries in PRL */ 182 unsigned int ip_tnl_net_id; 183 struct gro_cells gro_cells; 184 __u32 fwmark; 185 bool collect_md; 186 bool ignore_df; 187 }; 188 189 struct tnl_ptk_info { 190 IP_TUNNEL_DECLARE_FLAGS(flags); 191 __be16 proto; 192 __be32 key; 193 __be32 seq; 194 int hdr_len; 195 }; 196 197 #define PACKET_RCVD 0 198 #define PACKET_REJECT 1 199 #define PACKET_NEXT 2 200 201 #define IP_TNL_HASH_BITS 7 202 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) 203 204 struct ip_tunnel_net { 205 struct net_device *fb_tunnel_dev; 206 struct rtnl_link_ops *rtnl_link_ops; 207 struct hlist_head tunnels[IP_TNL_HASH_SIZE]; 208 struct ip_tunnel __rcu *collect_md_tun; 209 int type; 210 }; 211 212 static inline void ip_tunnel_set_options_present(unsigned long *flags) 213 { 214 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 215 216 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 217 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 218 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 219 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 220 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 221 222 ip_tunnel_flags_or(flags, flags, present); 223 } 224 225 static inline void ip_tunnel_clear_options_present(unsigned long *flags) 226 { 227 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 228 229 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 230 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 231 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 232 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 233 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 234 235 __ipt_flag_op(bitmap_andnot, flags, flags, present); 236 } 237 238 static inline bool ip_tunnel_is_options_present(const unsigned long *flags) 239 { 240 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 241 242 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 243 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 244 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 245 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 246 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 247 248 return ip_tunnel_flags_intersect(flags, present); 249 } 250 251 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) 252 { 253 IP_TUNNEL_DECLARE_FLAGS(supp) = { }; 254 255 bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); 256 __set_bit(IP_TUNNEL_VTI_BIT, supp); 257 258 return ip_tunnel_flags_subset(flags, supp); 259 } 260 261 static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) 262 { 263 ip_tunnel_flags_zero(dst); 264 265 bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); 266 __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); 267 } 268 269 static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) 270 { 271 __be16 ret; 272 273 ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); 274 if (test_bit(IP_TUNNEL_VTI_BIT, flags)) 275 ret |= VTI_ISVTI; 276 277 return ret; 278 } 279 280 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 281 __be32 saddr, __be32 daddr, 282 u8 tos, u8 ttl, __be32 label, 283 __be16 tp_src, __be16 tp_dst, 284 __be64 tun_id, 285 const unsigned long *tun_flags) 286 { 287 key->tun_id = tun_id; 288 key->u.ipv4.src = saddr; 289 key->u.ipv4.dst = daddr; 290 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD, 291 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); 292 key->tos = tos; 293 key->ttl = ttl; 294 key->label = label; 295 ip_tunnel_flags_copy(key->tun_flags, tun_flags); 296 297 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of 298 * the upper tunnel are used. 299 * E.g: GRE over IPSEC, the tp_src and tp_port are zero. 300 */ 301 key->tp_src = tp_src; 302 key->tp_dst = tp_dst; 303 304 /* Clear struct padding. */ 305 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE) 306 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE, 307 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); 308 } 309 310 static inline bool 311 ip_tunnel_dst_cache_usable(const struct sk_buff *skb, 312 const struct ip_tunnel_info *info) 313 { 314 if (skb->mark) 315 return false; 316 317 return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); 318 } 319 320 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info 321 *tun_info) 322 { 323 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET; 324 } 325 326 static inline __be64 key32_to_tunnel_id(__be32 key) 327 { 328 #ifdef __BIG_ENDIAN 329 return (__force __be64)key; 330 #else 331 return (__force __be64)((__force u64)key << 32); 332 #endif 333 } 334 335 /* Returns the least-significant 32 bits of a __be64. */ 336 static inline __be32 tunnel_id_to_key32(__be64 tun_id) 337 { 338 #ifdef __BIG_ENDIAN 339 return (__force __be32)tun_id; 340 #else 341 return (__force __be32)((__force u64)tun_id >> 32); 342 #endif 343 } 344 345 #ifdef CONFIG_INET 346 347 static inline void ip_tunnel_init_flow(struct flowi4 *fl4, 348 int proto, 349 __be32 daddr, __be32 saddr, 350 __be32 key, __u8 tos, 351 struct net *net, int oif, 352 __u32 mark, __u32 tun_inner_hash, 353 __u8 flow_flags) 354 { 355 memset(fl4, 0, sizeof(*fl4)); 356 357 if (oif) { 358 fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index(net, oif); 359 /* Legacy VRF/l3mdev use case */ 360 fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif; 361 } 362 363 fl4->daddr = daddr; 364 fl4->saddr = saddr; 365 fl4->flowi4_tos = tos; 366 fl4->flowi4_proto = proto; 367 fl4->fl4_gre_key = key; 368 fl4->flowi4_mark = mark; 369 fl4->flowi4_multipath_hash = tun_inner_hash; 370 fl4->flowi4_flags = flow_flags; 371 } 372 373 int ip_tunnel_init(struct net_device *dev); 374 void ip_tunnel_uninit(struct net_device *dev); 375 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 376 struct net *ip_tunnel_get_link_net(const struct net_device *dev); 377 int ip_tunnel_get_iflink(const struct net_device *dev); 378 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, 379 struct rtnl_link_ops *ops, char *devname); 380 381 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id, 382 struct rtnl_link_ops *ops, 383 struct list_head *dev_to_kill); 384 385 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 386 const struct iphdr *tnl_params, const u8 protocol); 387 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 388 const u8 proto, int tunnel_hlen); 389 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 390 int cmd); 391 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, 392 const void __user *data); 393 bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); 394 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 395 void __user *data, int cmd); 396 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 397 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 398 399 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, 400 int link, const unsigned long *flags, 401 __be32 remote, __be32 local, 402 __be32 key); 403 404 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info); 405 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 406 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, 407 bool log_ecn_error); 408 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 409 struct ip_tunnel_parm_kern *p, __u32 fwmark); 410 int ip_tunnel_newlink(struct net *net, struct net_device *dev, 411 struct nlattr *tb[], struct ip_tunnel_parm_kern *p, 412 __u32 fwmark); 413 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); 414 415 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], 416 struct ip_tunnel_encap *encap); 417 418 void ip_tunnel_netlink_parms(struct nlattr *data[], 419 struct ip_tunnel_parm_kern *parms); 420 421 extern const struct header_ops ip_tunnel_header_ops; 422 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); 423 424 struct ip_tunnel_encap_ops { 425 size_t (*encap_hlen)(struct ip_tunnel_encap *e); 426 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, 427 u8 *protocol, struct flowi4 *fl4); 428 int (*err_handler)(struct sk_buff *skb, u32 info); 429 }; 430 431 #define MAX_IPTUN_ENCAP_OPS 8 432 433 extern const struct ip_tunnel_encap_ops __rcu * 434 iptun_encaps[MAX_IPTUN_ENCAP_OPS]; 435 436 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, 437 unsigned int num); 438 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, 439 unsigned int num); 440 441 int ip_tunnel_encap_setup(struct ip_tunnel *t, 442 struct ip_tunnel_encap *ipencap); 443 444 static inline enum skb_drop_reason 445 pskb_inet_may_pull_reason(struct sk_buff *skb) 446 { 447 int nhlen; 448 449 switch (skb->protocol) { 450 #if IS_ENABLED(CONFIG_IPV6) 451 case htons(ETH_P_IPV6): 452 nhlen = sizeof(struct ipv6hdr); 453 break; 454 #endif 455 case htons(ETH_P_IP): 456 nhlen = sizeof(struct iphdr); 457 break; 458 default: 459 nhlen = 0; 460 } 461 462 return pskb_network_may_pull_reason(skb, nhlen); 463 } 464 465 static inline bool pskb_inet_may_pull(struct sk_buff *skb) 466 { 467 return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET; 468 } 469 470 /* Variant of pskb_inet_may_pull(). 471 */ 472 static inline enum skb_drop_reason 473 skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit) 474 { 475 int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN; 476 __be16 type = skb->protocol; 477 enum skb_drop_reason reason; 478 479 /* Essentially this is skb_protocol(skb, true) 480 * And we get MAC len. 481 */ 482 if (eth_type_vlan(type)) 483 type = __vlan_get_protocol(skb, type, &maclen); 484 485 switch (type) { 486 #if IS_ENABLED(CONFIG_IPV6) 487 case htons(ETH_P_IPV6): 488 nhlen = sizeof(struct ipv6hdr); 489 break; 490 #endif 491 case htons(ETH_P_IP): 492 nhlen = sizeof(struct iphdr); 493 break; 494 } 495 /* For ETH_P_IPV6/ETH_P_IP we make sure to pull 496 * a base network header in skb->head. 497 */ 498 reason = pskb_may_pull_reason(skb, maclen + nhlen); 499 if (reason) 500 return reason; 501 502 skb_set_network_header(skb, maclen); 503 504 return SKB_NOT_DROPPED_YET; 505 } 506 507 static inline int ip_encap_hlen(struct ip_tunnel_encap *e) 508 { 509 const struct ip_tunnel_encap_ops *ops; 510 int hlen = -EINVAL; 511 512 if (e->type == TUNNEL_ENCAP_NONE) 513 return 0; 514 515 if (e->type >= MAX_IPTUN_ENCAP_OPS) 516 return -EINVAL; 517 518 rcu_read_lock(); 519 ops = rcu_dereference(iptun_encaps[e->type]); 520 if (likely(ops && ops->encap_hlen)) 521 hlen = ops->encap_hlen(e); 522 rcu_read_unlock(); 523 524 return hlen; 525 } 526 527 static inline int ip_tunnel_encap(struct sk_buff *skb, 528 struct ip_tunnel_encap *e, 529 u8 *protocol, struct flowi4 *fl4) 530 { 531 const struct ip_tunnel_encap_ops *ops; 532 int ret = -EINVAL; 533 534 if (e->type == TUNNEL_ENCAP_NONE) 535 return 0; 536 537 if (e->type >= MAX_IPTUN_ENCAP_OPS) 538 return -EINVAL; 539 540 rcu_read_lock(); 541 ops = rcu_dereference(iptun_encaps[e->type]); 542 if (likely(ops && ops->build_header)) 543 ret = ops->build_header(skb, e, protocol, fl4); 544 rcu_read_unlock(); 545 546 return ret; 547 } 548 549 /* Extract dsfield from inner protocol */ 550 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, 551 const struct sk_buff *skb) 552 { 553 __be16 payload_protocol = skb_protocol(skb, true); 554 555 if (payload_protocol == htons(ETH_P_IP)) 556 return iph->tos; 557 else if (payload_protocol == htons(ETH_P_IPV6)) 558 return ipv6_get_dsfield((const struct ipv6hdr *)iph); 559 else 560 return 0; 561 } 562 563 static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph, 564 const struct sk_buff *skb) 565 { 566 __be16 payload_protocol = skb_protocol(skb, true); 567 568 if (payload_protocol == htons(ETH_P_IPV6)) 569 return ip6_flowlabel((const struct ipv6hdr *)iph); 570 else 571 return 0; 572 } 573 574 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, 575 const struct sk_buff *skb) 576 { 577 __be16 payload_protocol = skb_protocol(skb, true); 578 579 if (payload_protocol == htons(ETH_P_IP)) 580 return iph->ttl; 581 else if (payload_protocol == htons(ETH_P_IPV6)) 582 return ((const struct ipv6hdr *)iph)->hop_limit; 583 else 584 return 0; 585 } 586 587 /* Propagate ECN bits out */ 588 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, 589 const struct sk_buff *skb) 590 { 591 u8 inner = ip_tunnel_get_dsfield(iph, skb); 592 593 return INET_ECN_encapsulate(tos, inner); 594 } 595 596 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 597 __be16 inner_proto, bool raw_proto, bool xnet); 598 599 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 600 __be16 inner_proto, bool xnet) 601 { 602 return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet); 603 } 604 605 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 606 __be32 src, __be32 dst, u8 proto, 607 u8 tos, u8 ttl, __be16 df, bool xnet); 608 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 609 gfp_t flags); 610 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, 611 int headroom, bool reply); 612 613 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); 614 615 static inline int iptunnel_pull_offloads(struct sk_buff *skb) 616 { 617 if (skb_is_gso(skb)) { 618 int err; 619 620 err = skb_unclone(skb, GFP_ATOMIC); 621 if (unlikely(err)) 622 return err; 623 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >> 624 NETIF_F_GSO_SHIFT); 625 } 626 627 skb->encapsulation = 0; 628 return 0; 629 } 630 631 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) 632 { 633 if (pkt_len > 0) { 634 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 635 636 u64_stats_update_begin(&tstats->syncp); 637 u64_stats_add(&tstats->tx_bytes, pkt_len); 638 u64_stats_inc(&tstats->tx_packets); 639 u64_stats_update_end(&tstats->syncp); 640 put_cpu_ptr(tstats); 641 return; 642 } 643 644 if (pkt_len < 0) { 645 DEV_STATS_INC(dev, tx_errors); 646 DEV_STATS_INC(dev, tx_aborted_errors); 647 } else { 648 DEV_STATS_INC(dev, tx_dropped); 649 } 650 } 651 652 static inline void ip_tunnel_info_opts_get(void *to, 653 const struct ip_tunnel_info *info) 654 { 655 memcpy(to, ip_tunnel_info_opts(info), info->options_len); 656 } 657 658 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 659 const void *from, int len, 660 const unsigned long *flags) 661 { 662 info->options_len = len; 663 if (len > 0) { 664 memcpy(ip_tunnel_info_opts(info), from, len); 665 ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, 666 flags); 667 } 668 } 669 670 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 671 { 672 return (struct ip_tunnel_info *)lwtstate->data; 673 } 674 675 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt); 676 677 /* Returns > 0 if metadata should be collected */ 678 static inline int ip_tunnel_collect_metadata(void) 679 { 680 return static_branch_unlikely(&ip_tunnel_metadata_cnt); 681 } 682 683 void __init ip_tunnel_core_init(void); 684 685 void ip_tunnel_need_metadata(void); 686 void ip_tunnel_unneed_metadata(void); 687 688 #else /* CONFIG_INET */ 689 690 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 691 { 692 return NULL; 693 } 694 695 static inline void ip_tunnel_need_metadata(void) 696 { 697 } 698 699 static inline void ip_tunnel_unneed_metadata(void) 700 { 701 } 702 703 static inline void ip_tunnel_info_opts_get(void *to, 704 const struct ip_tunnel_info *info) 705 { 706 } 707 708 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 709 const void *from, int len, 710 const unsigned long *flags) 711 { 712 info->options_len = 0; 713 } 714 715 #endif /* CONFIG_INET */ 716 717 #endif /* __NET_IP_TUNNELS_H */ 718