1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_IP_TUNNELS_H 3 #define __NET_IP_TUNNELS_H 1 4 5 #include <linux/if_tunnel.h> 6 #include <linux/netdevice.h> 7 #include <linux/skbuff.h> 8 #include <linux/socket.h> 9 #include <linux/types.h> 10 #include <linux/u64_stats_sync.h> 11 #include <linux/bitops.h> 12 13 #include <net/dsfield.h> 14 #include <net/gro_cells.h> 15 #include <net/inet_ecn.h> 16 #include <net/netns/generic.h> 17 #include <net/rtnetlink.h> 18 #include <net/lwtunnel.h> 19 #include <net/dst_cache.h> 20 21 #if IS_ENABLED(CONFIG_IPV6) 22 #include <net/ipv6.h> 23 #include <net/ip6_fib.h> 24 #include <net/ip6_route.h> 25 #endif 26 27 /* Keep error state on tunnel for 30 sec */ 28 #define IPTUNNEL_ERR_TIMEO (30*HZ) 29 30 /* Used to memset ip_tunnel padding. */ 31 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst) 32 33 /* Used to memset ipv4 address padding. */ 34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst) 35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \ 36 (sizeof_field(struct ip_tunnel_key, u) - \ 37 sizeof_field(struct ip_tunnel_key, u.ipv4)) 38 39 #define __ipt_flag_op(op, ...) \ 40 op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) 41 42 #define IP_TUNNEL_DECLARE_FLAGS(...) \ 43 __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) 44 45 #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) 46 #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) 47 #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) 48 #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) 49 50 #define ip_tunnel_flags_empty(...) \ 51 __ipt_flag_op(bitmap_empty, __VA_ARGS__) 52 #define ip_tunnel_flags_intersect(...) \ 53 __ipt_flag_op(bitmap_intersects, __VA_ARGS__) 54 #define ip_tunnel_flags_subset(...) \ 55 __ipt_flag_op(bitmap_subset, __VA_ARGS__) 56 57 struct ip_tunnel_key { 58 __be64 tun_id; 59 union { 60 struct { 61 __be32 src; 62 __be32 dst; 63 } ipv4; 64 struct { 65 struct in6_addr src; 66 struct in6_addr dst; 67 } ipv6; 68 } u; 69 IP_TUNNEL_DECLARE_FLAGS(tun_flags); 70 __be32 label; /* Flow Label for IPv6 */ 71 u32 nhid; 72 u8 tos; /* TOS for IPv4, TC for IPv6 */ 73 u8 ttl; /* TTL for IPv4, HL for IPv6 */ 74 __be16 tp_src; 75 __be16 tp_dst; 76 __u8 flow_flags; 77 }; 78 79 struct ip_tunnel_encap { 80 u16 type; 81 u16 flags; 82 __be16 sport; 83 __be16 dport; 84 }; 85 86 /* Flags for ip_tunnel_info mode. */ 87 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ 88 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ 89 #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */ 90 91 /* Maximum tunnel options length. */ 92 #define IP_TUNNEL_OPTS_MAX \ 93 GENMASK((sizeof_field(struct ip_tunnel_info, \ 94 options_len) * BITS_PER_BYTE) - 1, 0) 95 96 #define ip_tunnel_info_opts(info) \ 97 _Generic(info, \ 98 const struct ip_tunnel_info * : ((const void *)((info) + 1)),\ 99 struct ip_tunnel_info * : ((void *)((info) + 1))\ 100 ) 101 102 struct ip_tunnel_info { 103 struct ip_tunnel_key key; 104 struct ip_tunnel_encap encap; 105 #ifdef CONFIG_DST_CACHE 106 struct dst_cache dst_cache; 107 #endif 108 u8 options_len; 109 u8 mode; 110 }; 111 112 /* 6rd prefix/relay information */ 113 #ifdef CONFIG_IPV6_SIT_6RD 114 struct ip_tunnel_6rd_parm { 115 struct in6_addr prefix; 116 __be32 relay_prefix; 117 u16 prefixlen; 118 u16 relay_prefixlen; 119 }; 120 #endif 121 122 struct ip_tunnel_prl_entry { 123 struct ip_tunnel_prl_entry __rcu *next; 124 __be32 addr; 125 u16 flags; 126 struct rcu_head rcu_head; 127 }; 128 129 struct metadata_dst; 130 131 /* Kernel-side variant of ip_tunnel_parm */ 132 struct ip_tunnel_parm_kern { 133 char name[IFNAMSIZ]; 134 IP_TUNNEL_DECLARE_FLAGS(i_flags); 135 IP_TUNNEL_DECLARE_FLAGS(o_flags); 136 __be32 i_key; 137 __be32 o_key; 138 int link; 139 struct iphdr iph; 140 }; 141 142 struct ip_tunnel { 143 struct ip_tunnel __rcu *next; 144 struct hlist_node hash_node; 145 146 struct net_device *dev; 147 netdevice_tracker dev_tracker; 148 149 struct net *net; /* netns for packet i/o */ 150 151 unsigned long err_time; /* Time when the last ICMP error 152 * arrived */ 153 int err_count; /* Number of arrived ICMP errors */ 154 155 /* These four fields used only by GRE */ 156 u32 i_seqno; /* The last seen seqno */ 157 atomic_t o_seqno; /* The last output seqno */ 158 int tun_hlen; /* Precalculated header length */ 159 160 /* These four fields used only by ERSPAN */ 161 u32 index; /* ERSPAN type II index */ 162 u8 erspan_ver; /* ERSPAN version */ 163 u8 dir; /* ERSPAN direction */ 164 u16 hwid; /* ERSPAN hardware ID */ 165 166 struct dst_cache dst_cache; 167 168 struct ip_tunnel_parm_kern parms; 169 170 int mlink; 171 int encap_hlen; /* Encap header length (FOU,GUE) */ 172 int hlen; /* tun_hlen + encap_hlen */ 173 struct ip_tunnel_encap encap; 174 175 /* for SIT */ 176 #ifdef CONFIG_IPV6_SIT_6RD 177 struct ip_tunnel_6rd_parm ip6rd; 178 #endif 179 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */ 180 unsigned int prl_count; /* # of entries in PRL */ 181 unsigned int ip_tnl_net_id; 182 struct gro_cells gro_cells; 183 __u32 fwmark; 184 bool collect_md; 185 bool ignore_df; 186 }; 187 188 struct tnl_ptk_info { 189 IP_TUNNEL_DECLARE_FLAGS(flags); 190 __be16 proto; 191 __be32 key; 192 __be32 seq; 193 int hdr_len; 194 }; 195 196 #define PACKET_RCVD 0 197 #define PACKET_REJECT 1 198 #define PACKET_NEXT 2 199 200 #define IP_TNL_HASH_BITS 7 201 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) 202 203 struct ip_tunnel_net { 204 struct net_device *fb_tunnel_dev; 205 struct rtnl_link_ops *rtnl_link_ops; 206 struct hlist_head tunnels[IP_TNL_HASH_SIZE]; 207 struct ip_tunnel __rcu *collect_md_tun; 208 int type; 209 }; 210 211 static inline void ip_tunnel_set_options_present(unsigned long *flags) 212 { 213 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 214 215 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 216 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 217 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 218 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 219 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 220 221 ip_tunnel_flags_or(flags, flags, present); 222 } 223 224 static inline void ip_tunnel_clear_options_present(unsigned long *flags) 225 { 226 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 227 228 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 229 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 230 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 231 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 232 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 233 234 __ipt_flag_op(bitmap_andnot, flags, flags, present); 235 } 236 237 static inline bool ip_tunnel_is_options_present(const unsigned long *flags) 238 { 239 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 240 241 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 242 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 243 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 244 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 245 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); 246 247 return ip_tunnel_flags_intersect(flags, present); 248 } 249 250 static inline void ip_tunnel_set_encflags_present(unsigned long *flags) 251 { 252 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 253 254 __set_bit(IP_TUNNEL_CSUM_BIT, present); 255 __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, present); 256 __set_bit(IP_TUNNEL_OAM_BIT, present); 257 __set_bit(IP_TUNNEL_CRIT_OPT_BIT, present); 258 259 ip_tunnel_flags_or(flags, flags, present); 260 } 261 262 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) 263 { 264 IP_TUNNEL_DECLARE_FLAGS(supp) = { }; 265 266 bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); 267 __set_bit(IP_TUNNEL_VTI_BIT, supp); 268 269 return ip_tunnel_flags_subset(flags, supp); 270 } 271 272 static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) 273 { 274 ip_tunnel_flags_zero(dst); 275 276 bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); 277 __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); 278 } 279 280 static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) 281 { 282 __be16 ret; 283 284 ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); 285 if (test_bit(IP_TUNNEL_VTI_BIT, flags)) 286 ret |= VTI_ISVTI; 287 288 return ret; 289 } 290 291 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 292 __be32 saddr, __be32 daddr, 293 u8 tos, u8 ttl, __be32 label, 294 __be16 tp_src, __be16 tp_dst, 295 __be64 tun_id, 296 const unsigned long *tun_flags) 297 { 298 key->tun_id = tun_id; 299 key->u.ipv4.src = saddr; 300 key->u.ipv4.dst = daddr; 301 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD, 302 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); 303 key->tos = tos; 304 key->ttl = ttl; 305 key->label = label; 306 ip_tunnel_flags_copy(key->tun_flags, tun_flags); 307 308 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of 309 * the upper tunnel are used. 310 * E.g: GRE over IPSEC, the tp_src and tp_port are zero. 311 */ 312 key->tp_src = tp_src; 313 key->tp_dst = tp_dst; 314 315 /* Clear struct padding. */ 316 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE) 317 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE, 318 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); 319 } 320 321 static inline bool 322 ip_tunnel_dst_cache_usable(const struct sk_buff *skb, 323 const struct ip_tunnel_info *info) 324 { 325 if (skb->mark) 326 return false; 327 328 return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); 329 } 330 331 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info 332 *tun_info) 333 { 334 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET; 335 } 336 337 static inline __be64 key32_to_tunnel_id(__be32 key) 338 { 339 #ifdef __BIG_ENDIAN 340 return (__force __be64)key; 341 #else 342 return (__force __be64)((__force u64)key << 32); 343 #endif 344 } 345 346 /* Returns the least-significant 32 bits of a __be64. */ 347 static inline __be32 tunnel_id_to_key32(__be64 tun_id) 348 { 349 #ifdef __BIG_ENDIAN 350 return (__force __be32)tun_id; 351 #else 352 return (__force __be32)((__force u64)tun_id >> 32); 353 #endif 354 } 355 356 #ifdef CONFIG_INET 357 358 static inline void ip_tunnel_init_flow(struct flowi4 *fl4, 359 int proto, 360 __be32 daddr, __be32 saddr, 361 __be32 key, __u8 tos, 362 struct net *net, int oif, 363 __u32 mark, __u32 tun_inner_hash, 364 __u8 flow_flags) 365 { 366 memset(fl4, 0, sizeof(*fl4)); 367 368 if (oif) { 369 fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif); 370 /* Legacy VRF/l3mdev use case */ 371 fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif; 372 } 373 374 fl4->daddr = daddr; 375 fl4->saddr = saddr; 376 fl4->flowi4_tos = tos; 377 fl4->flowi4_proto = proto; 378 fl4->fl4_gre_key = key; 379 fl4->flowi4_mark = mark; 380 fl4->flowi4_multipath_hash = tun_inner_hash; 381 fl4->flowi4_flags = flow_flags; 382 } 383 384 int ip_tunnel_init(struct net_device *dev); 385 void ip_tunnel_uninit(struct net_device *dev); 386 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 387 struct net *ip_tunnel_get_link_net(const struct net_device *dev); 388 int ip_tunnel_get_iflink(const struct net_device *dev); 389 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, 390 struct rtnl_link_ops *ops, char *devname); 391 392 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id, 393 struct rtnl_link_ops *ops, 394 struct list_head *dev_to_kill); 395 396 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 397 const struct iphdr *tnl_params, const u8 protocol); 398 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 399 const u8 proto, int tunnel_hlen); 400 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 401 int cmd); 402 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, 403 const void __user *data); 404 bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); 405 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 406 void __user *data, int cmd); 407 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 408 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 409 410 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, 411 int link, const unsigned long *flags, 412 __be32 remote, __be32 local, 413 __be32 key); 414 415 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info); 416 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 417 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, 418 bool log_ecn_error); 419 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 420 struct ip_tunnel_parm_kern *p, __u32 fwmark); 421 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 422 struct ip_tunnel_parm_kern *p, __u32 fwmark); 423 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); 424 425 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], 426 struct ip_tunnel_encap *encap); 427 428 void ip_tunnel_netlink_parms(struct nlattr *data[], 429 struct ip_tunnel_parm_kern *parms); 430 431 extern const struct header_ops ip_tunnel_header_ops; 432 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); 433 434 struct ip_tunnel_encap_ops { 435 size_t (*encap_hlen)(struct ip_tunnel_encap *e); 436 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, 437 u8 *protocol, struct flowi4 *fl4); 438 int (*err_handler)(struct sk_buff *skb, u32 info); 439 }; 440 441 #define MAX_IPTUN_ENCAP_OPS 8 442 443 extern const struct ip_tunnel_encap_ops __rcu * 444 iptun_encaps[MAX_IPTUN_ENCAP_OPS]; 445 446 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, 447 unsigned int num); 448 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, 449 unsigned int num); 450 451 int ip_tunnel_encap_setup(struct ip_tunnel *t, 452 struct ip_tunnel_encap *ipencap); 453 454 static inline bool pskb_inet_may_pull(struct sk_buff *skb) 455 { 456 int nhlen; 457 458 switch (skb->protocol) { 459 #if IS_ENABLED(CONFIG_IPV6) 460 case htons(ETH_P_IPV6): 461 nhlen = sizeof(struct ipv6hdr); 462 break; 463 #endif 464 case htons(ETH_P_IP): 465 nhlen = sizeof(struct iphdr); 466 break; 467 default: 468 nhlen = 0; 469 } 470 471 return pskb_network_may_pull(skb, nhlen); 472 } 473 474 /* Variant of pskb_inet_may_pull(). 475 */ 476 static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, 477 bool inner_proto_inherit) 478 { 479 int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN; 480 __be16 type = skb->protocol; 481 482 /* Essentially this is skb_protocol(skb, true) 483 * And we get MAC len. 484 */ 485 if (eth_type_vlan(type)) 486 type = __vlan_get_protocol(skb, type, &maclen); 487 488 switch (type) { 489 #if IS_ENABLED(CONFIG_IPV6) 490 case htons(ETH_P_IPV6): 491 nhlen = sizeof(struct ipv6hdr); 492 break; 493 #endif 494 case htons(ETH_P_IP): 495 nhlen = sizeof(struct iphdr); 496 break; 497 } 498 /* For ETH_P_IPV6/ETH_P_IP we make sure to pull 499 * a base network header in skb->head. 500 */ 501 if (!pskb_may_pull(skb, maclen + nhlen)) 502 return false; 503 504 skb_set_network_header(skb, maclen); 505 return true; 506 } 507 508 static inline int ip_encap_hlen(struct ip_tunnel_encap *e) 509 { 510 const struct ip_tunnel_encap_ops *ops; 511 int hlen = -EINVAL; 512 513 if (e->type == TUNNEL_ENCAP_NONE) 514 return 0; 515 516 if (e->type >= MAX_IPTUN_ENCAP_OPS) 517 return -EINVAL; 518 519 rcu_read_lock(); 520 ops = rcu_dereference(iptun_encaps[e->type]); 521 if (likely(ops && ops->encap_hlen)) 522 hlen = ops->encap_hlen(e); 523 rcu_read_unlock(); 524 525 return hlen; 526 } 527 528 static inline int ip_tunnel_encap(struct sk_buff *skb, 529 struct ip_tunnel_encap *e, 530 u8 *protocol, struct flowi4 *fl4) 531 { 532 const struct ip_tunnel_encap_ops *ops; 533 int ret = -EINVAL; 534 535 if (e->type == TUNNEL_ENCAP_NONE) 536 return 0; 537 538 if (e->type >= MAX_IPTUN_ENCAP_OPS) 539 return -EINVAL; 540 541 rcu_read_lock(); 542 ops = rcu_dereference(iptun_encaps[e->type]); 543 if (likely(ops && ops->build_header)) 544 ret = ops->build_header(skb, e, protocol, fl4); 545 rcu_read_unlock(); 546 547 return ret; 548 } 549 550 /* Extract dsfield from inner protocol */ 551 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, 552 const struct sk_buff *skb) 553 { 554 __be16 payload_protocol = skb_protocol(skb, true); 555 556 if (payload_protocol == htons(ETH_P_IP)) 557 return iph->tos; 558 else if (payload_protocol == htons(ETH_P_IPV6)) 559 return ipv6_get_dsfield((const struct ipv6hdr *)iph); 560 else 561 return 0; 562 } 563 564 static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph, 565 const struct sk_buff *skb) 566 { 567 __be16 payload_protocol = skb_protocol(skb, true); 568 569 if (payload_protocol == htons(ETH_P_IPV6)) 570 return ip6_flowlabel((const struct ipv6hdr *)iph); 571 else 572 return 0; 573 } 574 575 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, 576 const struct sk_buff *skb) 577 { 578 __be16 payload_protocol = skb_protocol(skb, true); 579 580 if (payload_protocol == htons(ETH_P_IP)) 581 return iph->ttl; 582 else if (payload_protocol == htons(ETH_P_IPV6)) 583 return ((const struct ipv6hdr *)iph)->hop_limit; 584 else 585 return 0; 586 } 587 588 /* Propogate ECN bits out */ 589 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, 590 const struct sk_buff *skb) 591 { 592 u8 inner = ip_tunnel_get_dsfield(iph, skb); 593 594 return INET_ECN_encapsulate(tos, inner); 595 } 596 597 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 598 __be16 inner_proto, bool raw_proto, bool xnet); 599 600 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 601 __be16 inner_proto, bool xnet) 602 { 603 return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet); 604 } 605 606 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 607 __be32 src, __be32 dst, u8 proto, 608 u8 tos, u8 ttl, __be16 df, bool xnet); 609 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 610 gfp_t flags); 611 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, 612 int headroom, bool reply); 613 614 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); 615 616 static inline int iptunnel_pull_offloads(struct sk_buff *skb) 617 { 618 if (skb_is_gso(skb)) { 619 int err; 620 621 err = skb_unclone(skb, GFP_ATOMIC); 622 if (unlikely(err)) 623 return err; 624 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >> 625 NETIF_F_GSO_SHIFT); 626 } 627 628 skb->encapsulation = 0; 629 return 0; 630 } 631 632 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) 633 { 634 if (pkt_len > 0) { 635 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 636 637 u64_stats_update_begin(&tstats->syncp); 638 u64_stats_add(&tstats->tx_bytes, pkt_len); 639 u64_stats_inc(&tstats->tx_packets); 640 u64_stats_update_end(&tstats->syncp); 641 put_cpu_ptr(tstats); 642 return; 643 } 644 645 if (pkt_len < 0) { 646 DEV_STATS_INC(dev, tx_errors); 647 DEV_STATS_INC(dev, tx_aborted_errors); 648 } else { 649 DEV_STATS_INC(dev, tx_dropped); 650 } 651 } 652 653 static inline void ip_tunnel_info_opts_get(void *to, 654 const struct ip_tunnel_info *info) 655 { 656 memcpy(to, info + 1, info->options_len); 657 } 658 659 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 660 const void *from, int len, 661 const unsigned long *flags) 662 { 663 info->options_len = len; 664 if (len > 0) { 665 memcpy(ip_tunnel_info_opts(info), from, len); 666 ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, 667 flags); 668 } 669 } 670 671 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 672 { 673 return (struct ip_tunnel_info *)lwtstate->data; 674 } 675 676 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt); 677 678 /* Returns > 0 if metadata should be collected */ 679 static inline int ip_tunnel_collect_metadata(void) 680 { 681 return static_branch_unlikely(&ip_tunnel_metadata_cnt); 682 } 683 684 void __init ip_tunnel_core_init(void); 685 686 void ip_tunnel_need_metadata(void); 687 void ip_tunnel_unneed_metadata(void); 688 689 #else /* CONFIG_INET */ 690 691 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 692 { 693 return NULL; 694 } 695 696 static inline void ip_tunnel_need_metadata(void) 697 { 698 } 699 700 static inline void ip_tunnel_unneed_metadata(void) 701 { 702 } 703 704 static inline void ip_tunnel_info_opts_get(void *to, 705 const struct ip_tunnel_info *info) 706 { 707 } 708 709 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 710 const void *from, int len, 711 const unsigned long *flags) 712 { 713 info->options_len = 0; 714 } 715 716 #endif /* CONFIG_INET */ 717 718 #endif /* __NET_IP_TUNNELS_H */ 719