1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_IP_TUNNELS_H 3 #define __NET_IP_TUNNELS_H 1 4 5 #include <linux/if_tunnel.h> 6 #include <linux/netdevice.h> 7 #include <linux/skbuff.h> 8 #include <linux/socket.h> 9 #include <linux/types.h> 10 #include <linux/u64_stats_sync.h> 11 #include <linux/bitops.h> 12 13 #include <net/dsfield.h> 14 #include <net/gro_cells.h> 15 #include <net/inet_ecn.h> 16 #include <net/netns/generic.h> 17 #include <net/rtnetlink.h> 18 #include <net/lwtunnel.h> 19 #include <net/dst_cache.h> 20 21 #if IS_ENABLED(CONFIG_IPV6) 22 #include <net/ipv6.h> 23 #include <net/ip6_fib.h> 24 #include <net/ip6_route.h> 25 #endif 26 27 /* Keep error state on tunnel for 30 sec */ 28 #define IPTUNNEL_ERR_TIMEO (30*HZ) 29 30 /* Used to memset ip_tunnel padding. */ 31 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst) 32 33 /* Used to memset ipv4 address padding. */ 34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst) 35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \ 36 (sizeof_field(struct ip_tunnel_key, u) - \ 37 sizeof_field(struct ip_tunnel_key, u.ipv4)) 38 39 #define __ipt_flag_op(op, ...) \ 40 op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) 41 42 #define IP_TUNNEL_DECLARE_FLAGS(...) \ 43 __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) 44 45 #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) 46 #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) 47 #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) 48 #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) 49 50 #define ip_tunnel_flags_empty(...) \ 51 __ipt_flag_op(bitmap_empty, __VA_ARGS__) 52 #define ip_tunnel_flags_intersect(...) \ 53 __ipt_flag_op(bitmap_intersects, __VA_ARGS__) 54 #define ip_tunnel_flags_subset(...) \ 55 __ipt_flag_op(bitmap_subset, __VA_ARGS__) 56 57 struct ip_tunnel_key { 58 __be64 tun_id; 59 union { 60 struct { 61 __be32 src; 62 __be32 dst; 63 } ipv4; 64 struct { 65 struct in6_addr src; 66 struct in6_addr dst; 67 } ipv6; 68 } u; 69 IP_TUNNEL_DECLARE_FLAGS(tun_flags); 70 __be32 label; /* Flow Label for IPv6 */ 71 u32 nhid; 72 u8 tos; /* TOS for IPv4, TC for IPv6 */ 73 u8 ttl; /* TTL for IPv4, HL for IPv6 */ 74 __be16 tp_src; 75 __be16 tp_dst; 76 __u8 flow_flags; 77 }; 78 79 struct ip_tunnel_encap { 80 u16 type; 81 u16 flags; 82 __be16 sport; 83 __be16 dport; 84 }; 85 86 /* Flags for ip_tunnel_info mode. */ 87 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ 88 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ 89 #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */ 90 91 /* Maximum tunnel options length. */ 92 #define IP_TUNNEL_OPTS_MAX \ 93 GENMASK((sizeof_field(struct ip_tunnel_info, \ 94 options_len) * BITS_PER_BYTE) - 1, 0) 95 96 #define ip_tunnel_info_opts(info) \ 97 _Generic(info, \ 98 const struct ip_tunnel_info * : ((const void *)((info) + 1)),\ 99 struct ip_tunnel_info * : ((void *)((info) + 1))\ 100 ) 101 102 struct ip_tunnel_info { 103 struct ip_tunnel_key key; 104 struct ip_tunnel_encap encap; 105 #ifdef CONFIG_DST_CACHE 106 struct dst_cache dst_cache; 107 #endif 108 u8 options_len; 109 u8 mode; 110 }; 111 112 /* 6rd prefix/relay information */ 113 #ifdef CONFIG_IPV6_SIT_6RD 114 struct ip_tunnel_6rd_parm { 115 struct in6_addr prefix; 116 __be32 relay_prefix; 117 u16 prefixlen; 118 u16 relay_prefixlen; 119 }; 120 #endif 121 122 struct ip_tunnel_prl_entry { 123 struct ip_tunnel_prl_entry __rcu *next; 124 __be32 addr; 125 u16 flags; 126 struct rcu_head rcu_head; 127 }; 128 129 struct metadata_dst; 130 131 /* Kernel-side variant of ip_tunnel_parm */ 132 struct ip_tunnel_parm_kern { 133 char name[IFNAMSIZ]; 134 IP_TUNNEL_DECLARE_FLAGS(i_flags); 135 IP_TUNNEL_DECLARE_FLAGS(o_flags); 136 __be32 i_key; 137 __be32 o_key; 138 int link; 139 struct iphdr iph; 140 }; 141 142 struct ip_tunnel { 143 struct ip_tunnel __rcu *next; 144 struct hlist_node hash_node; 145 146 struct net_device *dev; 147 netdevice_tracker dev_tracker; 148 149 struct net *net; /* netns for packet i/o */ 150 151 unsigned long err_time; /* Time when the last ICMP error 152 * arrived */ 153 int err_count; /* Number of arrived ICMP errors */ 154 155 /* These four fields used only by GRE */ 156 u32 i_seqno; /* The last seen seqno */ 157 atomic_t o_seqno; /* The last output seqno */ 158 int tun_hlen; /* Precalculated header length */ 159 160 /* These four fields used only by ERSPAN */ 161 u32 index; /* ERSPAN type II index */ 162 u8 erspan_ver; /* ERSPAN version */ 163 u8 dir; /* ERSPAN direction */ 164 u16 hwid; /* ERSPAN hardware ID */ 165 166 struct dst_cache dst_cache; 167 168 struct ip_tunnel_parm_kern parms; 169 170 int mlink; 171 int encap_hlen; /* Encap header length (FOU,GUE) */ 172 int hlen; /* tun_hlen + encap_hlen */ 173 struct ip_tunnel_encap encap; 174 175 /* for SIT */ 176 #ifdef CONFIG_IPV6_SIT_6RD 177 struct ip_tunnel_6rd_parm ip6rd; 178 #endif 179 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */ 180 unsigned int prl_count; /* # of entries in PRL */ 181 unsigned int ip_tnl_net_id; 182 struct gro_cells gro_cells; 183 __u32 fwmark; 184 bool collect_md; 185 bool ignore_df; 186 }; 187 188 struct tnl_ptk_info { 189 IP_TUNNEL_DECLARE_FLAGS(flags); 190 __be16 proto; 191 __be32 key; 192 __be32 seq; 193 int hdr_len; 194 }; 195 196 #define PACKET_RCVD 0 197 #define PACKET_REJECT 1 198 #define PACKET_NEXT 2 199 200 #define IP_TNL_HASH_BITS 7 201 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS) 202 203 struct ip_tunnel_net { 204 struct net_device *fb_tunnel_dev; 205 struct rtnl_link_ops *rtnl_link_ops; 206 struct hlist_head tunnels[IP_TNL_HASH_SIZE]; 207 struct ip_tunnel __rcu *collect_md_tun; 208 int type; 209 }; 210 211 static inline void ip_tunnel_set_options_present(unsigned long *flags) 212 { 213 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 214 215 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 216 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 217 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 218 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 219 220 ip_tunnel_flags_or(flags, flags, present); 221 } 222 223 static inline void ip_tunnel_clear_options_present(unsigned long *flags) 224 { 225 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 226 227 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 228 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 229 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 230 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 231 232 __ipt_flag_op(bitmap_andnot, flags, flags, present); 233 } 234 235 static inline bool ip_tunnel_is_options_present(const unsigned long *flags) 236 { 237 IP_TUNNEL_DECLARE_FLAGS(present) = { }; 238 239 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); 240 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); 241 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); 242 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); 243 244 return ip_tunnel_flags_intersect(flags, present); 245 } 246 247 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) 248 { 249 IP_TUNNEL_DECLARE_FLAGS(supp) = { }; 250 251 bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); 252 __set_bit(IP_TUNNEL_VTI_BIT, supp); 253 254 return ip_tunnel_flags_subset(flags, supp); 255 } 256 257 static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) 258 { 259 ip_tunnel_flags_zero(dst); 260 261 bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); 262 __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); 263 } 264 265 static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) 266 { 267 __be16 ret; 268 269 ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); 270 if (test_bit(IP_TUNNEL_VTI_BIT, flags)) 271 ret |= VTI_ISVTI; 272 273 return ret; 274 } 275 276 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 277 __be32 saddr, __be32 daddr, 278 u8 tos, u8 ttl, __be32 label, 279 __be16 tp_src, __be16 tp_dst, 280 __be64 tun_id, 281 const unsigned long *tun_flags) 282 { 283 key->tun_id = tun_id; 284 key->u.ipv4.src = saddr; 285 key->u.ipv4.dst = daddr; 286 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD, 287 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); 288 key->tos = tos; 289 key->ttl = ttl; 290 key->label = label; 291 ip_tunnel_flags_copy(key->tun_flags, tun_flags); 292 293 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of 294 * the upper tunnel are used. 295 * E.g: GRE over IPSEC, the tp_src and tp_port are zero. 296 */ 297 key->tp_src = tp_src; 298 key->tp_dst = tp_dst; 299 300 /* Clear struct padding. */ 301 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE) 302 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE, 303 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); 304 } 305 306 static inline bool 307 ip_tunnel_dst_cache_usable(const struct sk_buff *skb, 308 const struct ip_tunnel_info *info) 309 { 310 if (skb->mark) 311 return false; 312 313 return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); 314 } 315 316 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info 317 *tun_info) 318 { 319 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET; 320 } 321 322 static inline __be64 key32_to_tunnel_id(__be32 key) 323 { 324 #ifdef __BIG_ENDIAN 325 return (__force __be64)key; 326 #else 327 return (__force __be64)((__force u64)key << 32); 328 #endif 329 } 330 331 /* Returns the least-significant 32 bits of a __be64. */ 332 static inline __be32 tunnel_id_to_key32(__be64 tun_id) 333 { 334 #ifdef __BIG_ENDIAN 335 return (__force __be32)tun_id; 336 #else 337 return (__force __be32)((__force u64)tun_id >> 32); 338 #endif 339 } 340 341 #ifdef CONFIG_INET 342 343 static inline void ip_tunnel_init_flow(struct flowi4 *fl4, 344 int proto, 345 __be32 daddr, __be32 saddr, 346 __be32 key, __u8 tos, 347 struct net *net, int oif, 348 __u32 mark, __u32 tun_inner_hash, 349 __u8 flow_flags) 350 { 351 memset(fl4, 0, sizeof(*fl4)); 352 353 if (oif) { 354 fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif); 355 /* Legacy VRF/l3mdev use case */ 356 fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif; 357 } 358 359 fl4->daddr = daddr; 360 fl4->saddr = saddr; 361 fl4->flowi4_tos = tos; 362 fl4->flowi4_proto = proto; 363 fl4->fl4_gre_key = key; 364 fl4->flowi4_mark = mark; 365 fl4->flowi4_multipath_hash = tun_inner_hash; 366 fl4->flowi4_flags = flow_flags; 367 } 368 369 int ip_tunnel_init(struct net_device *dev); 370 void ip_tunnel_uninit(struct net_device *dev); 371 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 372 struct net *ip_tunnel_get_link_net(const struct net_device *dev); 373 int ip_tunnel_get_iflink(const struct net_device *dev); 374 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, 375 struct rtnl_link_ops *ops, char *devname); 376 377 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id, 378 struct rtnl_link_ops *ops, 379 struct list_head *dev_to_kill); 380 381 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 382 const struct iphdr *tnl_params, const u8 protocol); 383 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 384 const u8 proto, int tunnel_hlen); 385 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, 386 int cmd); 387 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, 388 const void __user *data); 389 bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); 390 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, 391 void __user *data, int cmd); 392 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 393 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 394 395 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, 396 int link, const unsigned long *flags, 397 __be32 remote, __be32 local, 398 __be32 key); 399 400 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info); 401 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 402 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, 403 bool log_ecn_error); 404 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 405 struct ip_tunnel_parm_kern *p, __u32 fwmark); 406 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 407 struct ip_tunnel_parm_kern *p, __u32 fwmark); 408 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); 409 410 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], 411 struct ip_tunnel_encap *encap); 412 413 void ip_tunnel_netlink_parms(struct nlattr *data[], 414 struct ip_tunnel_parm_kern *parms); 415 416 extern const struct header_ops ip_tunnel_header_ops; 417 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); 418 419 struct ip_tunnel_encap_ops { 420 size_t (*encap_hlen)(struct ip_tunnel_encap *e); 421 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, 422 u8 *protocol, struct flowi4 *fl4); 423 int (*err_handler)(struct sk_buff *skb, u32 info); 424 }; 425 426 #define MAX_IPTUN_ENCAP_OPS 8 427 428 extern const struct ip_tunnel_encap_ops __rcu * 429 iptun_encaps[MAX_IPTUN_ENCAP_OPS]; 430 431 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, 432 unsigned int num); 433 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, 434 unsigned int num); 435 436 int ip_tunnel_encap_setup(struct ip_tunnel *t, 437 struct ip_tunnel_encap *ipencap); 438 439 static inline bool pskb_inet_may_pull(struct sk_buff *skb) 440 { 441 int nhlen; 442 443 switch (skb->protocol) { 444 #if IS_ENABLED(CONFIG_IPV6) 445 case htons(ETH_P_IPV6): 446 nhlen = sizeof(struct ipv6hdr); 447 break; 448 #endif 449 case htons(ETH_P_IP): 450 nhlen = sizeof(struct iphdr); 451 break; 452 default: 453 nhlen = 0; 454 } 455 456 return pskb_network_may_pull(skb, nhlen); 457 } 458 459 static inline int ip_encap_hlen(struct ip_tunnel_encap *e) 460 { 461 const struct ip_tunnel_encap_ops *ops; 462 int hlen = -EINVAL; 463 464 if (e->type == TUNNEL_ENCAP_NONE) 465 return 0; 466 467 if (e->type >= MAX_IPTUN_ENCAP_OPS) 468 return -EINVAL; 469 470 rcu_read_lock(); 471 ops = rcu_dereference(iptun_encaps[e->type]); 472 if (likely(ops && ops->encap_hlen)) 473 hlen = ops->encap_hlen(e); 474 rcu_read_unlock(); 475 476 return hlen; 477 } 478 479 static inline int ip_tunnel_encap(struct sk_buff *skb, 480 struct ip_tunnel_encap *e, 481 u8 *protocol, struct flowi4 *fl4) 482 { 483 const struct ip_tunnel_encap_ops *ops; 484 int ret = -EINVAL; 485 486 if (e->type == TUNNEL_ENCAP_NONE) 487 return 0; 488 489 if (e->type >= MAX_IPTUN_ENCAP_OPS) 490 return -EINVAL; 491 492 rcu_read_lock(); 493 ops = rcu_dereference(iptun_encaps[e->type]); 494 if (likely(ops && ops->build_header)) 495 ret = ops->build_header(skb, e, protocol, fl4); 496 rcu_read_unlock(); 497 498 return ret; 499 } 500 501 /* Extract dsfield from inner protocol */ 502 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, 503 const struct sk_buff *skb) 504 { 505 __be16 payload_protocol = skb_protocol(skb, true); 506 507 if (payload_protocol == htons(ETH_P_IP)) 508 return iph->tos; 509 else if (payload_protocol == htons(ETH_P_IPV6)) 510 return ipv6_get_dsfield((const struct ipv6hdr *)iph); 511 else 512 return 0; 513 } 514 515 static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph, 516 const struct sk_buff *skb) 517 { 518 __be16 payload_protocol = skb_protocol(skb, true); 519 520 if (payload_protocol == htons(ETH_P_IPV6)) 521 return ip6_flowlabel((const struct ipv6hdr *)iph); 522 else 523 return 0; 524 } 525 526 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, 527 const struct sk_buff *skb) 528 { 529 __be16 payload_protocol = skb_protocol(skb, true); 530 531 if (payload_protocol == htons(ETH_P_IP)) 532 return iph->ttl; 533 else if (payload_protocol == htons(ETH_P_IPV6)) 534 return ((const struct ipv6hdr *)iph)->hop_limit; 535 else 536 return 0; 537 } 538 539 /* Propogate ECN bits out */ 540 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, 541 const struct sk_buff *skb) 542 { 543 u8 inner = ip_tunnel_get_dsfield(iph, skb); 544 545 return INET_ECN_encapsulate(tos, inner); 546 } 547 548 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 549 __be16 inner_proto, bool raw_proto, bool xnet); 550 551 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, 552 __be16 inner_proto, bool xnet) 553 { 554 return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet); 555 } 556 557 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 558 __be32 src, __be32 dst, u8 proto, 559 u8 tos, u8 ttl, __be16 df, bool xnet); 560 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 561 gfp_t flags); 562 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, 563 int headroom, bool reply); 564 565 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); 566 567 static inline int iptunnel_pull_offloads(struct sk_buff *skb) 568 { 569 if (skb_is_gso(skb)) { 570 int err; 571 572 err = skb_unclone(skb, GFP_ATOMIC); 573 if (unlikely(err)) 574 return err; 575 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >> 576 NETIF_F_GSO_SHIFT); 577 } 578 579 skb->encapsulation = 0; 580 return 0; 581 } 582 583 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) 584 { 585 if (pkt_len > 0) { 586 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 587 588 u64_stats_update_begin(&tstats->syncp); 589 u64_stats_add(&tstats->tx_bytes, pkt_len); 590 u64_stats_inc(&tstats->tx_packets); 591 u64_stats_update_end(&tstats->syncp); 592 put_cpu_ptr(tstats); 593 return; 594 } 595 596 if (pkt_len < 0) { 597 DEV_STATS_INC(dev, tx_errors); 598 DEV_STATS_INC(dev, tx_aborted_errors); 599 } else { 600 DEV_STATS_INC(dev, tx_dropped); 601 } 602 } 603 604 static inline void ip_tunnel_info_opts_get(void *to, 605 const struct ip_tunnel_info *info) 606 { 607 memcpy(to, info + 1, info->options_len); 608 } 609 610 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 611 const void *from, int len, 612 const unsigned long *flags) 613 { 614 info->options_len = len; 615 if (len > 0) { 616 memcpy(ip_tunnel_info_opts(info), from, len); 617 ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, 618 flags); 619 } 620 } 621 622 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 623 { 624 return (struct ip_tunnel_info *)lwtstate->data; 625 } 626 627 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt); 628 629 /* Returns > 0 if metadata should be collected */ 630 static inline int ip_tunnel_collect_metadata(void) 631 { 632 return static_branch_unlikely(&ip_tunnel_metadata_cnt); 633 } 634 635 void __init ip_tunnel_core_init(void); 636 637 void ip_tunnel_need_metadata(void); 638 void ip_tunnel_unneed_metadata(void); 639 640 #else /* CONFIG_INET */ 641 642 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 643 { 644 return NULL; 645 } 646 647 static inline void ip_tunnel_need_metadata(void) 648 { 649 } 650 651 static inline void ip_tunnel_unneed_metadata(void) 652 { 653 } 654 655 static inline void ip_tunnel_info_opts_get(void *to, 656 const struct ip_tunnel_info *info) 657 { 658 } 659 660 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 661 const void *from, int len, 662 const unsigned long *flags) 663 { 664 info->options_len = 0; 665 } 666 667 #endif /* CONFIG_INET */ 668 669 #endif /* __NET_IP_TUNNELS_H */ 670