1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the IP module. 7 * 8 * Version: @(#)ip.h 1.0.2 05/07/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 13 * 14 * Changes: 15 * Mike McLagan : Routing by source 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 */ 22 #ifndef _IP_H 23 #define _IP_H 24 25 #include <linux/types.h> 26 #include <linux/ip.h> 27 #include <linux/in.h> 28 #include <linux/skbuff.h> 29 #include <linux/jhash.h> 30 31 #include <net/inet_sock.h> 32 #include <net/route.h> 33 #include <net/snmp.h> 34 #include <net/flow.h> 35 #include <net/flow_dissector.h> 36 #include <net/netns/hash.h> 37 38 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 39 #define IPV4_MIN_MTU 68 /* RFC 791 */ 40 41 struct sock; 42 43 struct inet_skb_parm { 44 int iif; 45 struct ip_options opt; /* Compiled IP options */ 46 u16 flags; 47 48 #define IPSKB_FORWARDED BIT(0) 49 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) 50 #define IPSKB_XFRM_TRANSFORMED BIT(2) 51 #define IPSKB_FRAG_COMPLETE BIT(3) 52 #define IPSKB_REROUTED BIT(4) 53 #define IPSKB_DOREDIRECT BIT(5) 54 #define IPSKB_FRAG_PMTU BIT(6) 55 #define IPSKB_L3SLAVE BIT(7) 56 57 u16 frag_max_size; 58 }; 59 60 static inline bool ipv4_l3mdev_skb(u16 flags) 61 { 62 return !!(flags & IPSKB_L3SLAVE); 63 } 64 65 static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 66 { 67 return ip_hdr(skb)->ihl * 4; 68 } 69 70 struct ipcm_cookie { 71 struct sockcm_cookie sockc; 72 __be32 addr; 73 int oif; 74 struct ip_options_rcu *opt; 75 __u8 tx_flags; 76 __u8 ttl; 77 __s16 tos; 78 char priority; 79 __u16 gso_size; 80 }; 81 82 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 83 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) 84 85 /* return enslaved device index if relevant */ 86 static inline int inet_sdif(struct sk_buff *skb) 87 { 88 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 89 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 90 return IPCB(skb)->iif; 91 #endif 92 return 0; 93 } 94 95 /* Special input handler for packets caught by router alert option. 96 They are selected only by protocol field, and then processed likely 97 local ones; but only if someone wants them! Otherwise, router 98 not running rsvpd will kill RSVP. 99 100 It is user level problem, what it will make with them. 101 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), 102 but receiver should be enough clever f.e. to forward mtrace requests, 103 sent to multicast group to reach destination designated router. 104 */ 105 106 struct ip_ra_chain { 107 struct ip_ra_chain __rcu *next; 108 struct sock *sk; 109 union { 110 void (*destructor)(struct sock *); 111 struct sock *saved_sk; 112 }; 113 struct rcu_head rcu; 114 }; 115 116 /* IP flags. */ 117 #define IP_CE 0x8000 /* Flag: "Congestion" */ 118 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */ 119 #define IP_MF 0x2000 /* Flag: "More Fragments" */ 120 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ 121 122 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ 123 124 struct msghdr; 125 struct net_device; 126 struct packet_type; 127 struct rtable; 128 struct sockaddr; 129 130 int igmp_mc_init(void); 131 132 /* 133 * Functions provided by ip.c 134 */ 135 136 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 137 __be32 saddr, __be32 daddr, 138 struct ip_options_rcu *opt); 139 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 140 struct net_device *orig_dev); 141 void ip_list_rcv(struct list_head *head, struct packet_type *pt, 142 struct net_device *orig_dev); 143 int ip_local_deliver(struct sk_buff *skb); 144 int ip_mr_input(struct sk_buff *skb); 145 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); 146 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); 147 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 148 int (*output)(struct net *, struct sock *, struct sk_buff *)); 149 void ip_send_check(struct iphdr *ip); 150 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 151 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 152 153 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 154 __u8 tos); 155 void ip_init(void); 156 int ip_append_data(struct sock *sk, struct flowi4 *fl4, 157 int getfrag(void *from, char *to, int offset, int len, 158 int odd, struct sk_buff *skb), 159 void *from, int len, int protolen, 160 struct ipcm_cookie *ipc, 161 struct rtable **rt, 162 unsigned int flags); 163 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, 164 struct sk_buff *skb); 165 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 166 int offset, size_t size, int flags); 167 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, 168 struct sk_buff_head *queue, 169 struct inet_cork *cork); 170 int ip_send_skb(struct net *net, struct sk_buff *skb); 171 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); 172 void ip_flush_pending_frames(struct sock *sk); 173 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, 174 int getfrag(void *from, char *to, int offset, 175 int len, int odd, struct sk_buff *skb), 176 void *from, int length, int transhdrlen, 177 struct ipcm_cookie *ipc, struct rtable **rtp, 178 struct inet_cork *cork, unsigned int flags); 179 180 static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, 181 struct flowi *fl) 182 { 183 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); 184 } 185 186 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) 187 { 188 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 189 } 190 191 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) 192 { 193 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); 194 } 195 196 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) 197 { 198 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); 199 } 200 201 /* datagram.c */ 202 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 203 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 204 205 void ip4_datagram_release_cb(struct sock *sk); 206 207 struct ip_reply_arg { 208 struct kvec iov[1]; 209 int flags; 210 __wsum csum; 211 int csumoffset; /* u16 offset of csum in iov[0].iov_base */ 212 /* -1 if not needed */ 213 int bound_dev_if; 214 u8 tos; 215 kuid_t uid; 216 }; 217 218 #define IP_REPLY_ARG_NOSRCCHECK 1 219 220 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) 221 { 222 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 223 } 224 225 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 226 const struct ip_options *sopt, 227 __be32 daddr, __be32 saddr, 228 const struct ip_reply_arg *arg, 229 unsigned int len); 230 231 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) 232 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) 233 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 234 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 235 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 236 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 237 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) 238 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) 239 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 240 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 241 242 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); 243 unsigned long snmp_fold_field(void __percpu *mib, int offt); 244 #if BITS_PER_LONG==32 245 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 246 size_t syncp_offset); 247 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); 248 #else 249 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 250 size_t syncp_offset) 251 { 252 return snmp_get_cpu_field(mib, cpu, offct); 253 254 } 255 256 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) 257 { 258 return snmp_fold_field(mib, offt); 259 } 260 #endif 261 262 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \ 263 { \ 264 int i, c; \ 265 for_each_possible_cpu(c) { \ 266 for (i = 0; stats_list[i].name; i++) \ 267 buff64[i] += snmp_get_cpu_field64( \ 268 mib_statistic, \ 269 c, stats_list[i].entry, \ 270 offset); \ 271 } \ 272 } 273 274 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \ 275 { \ 276 int i, c; \ 277 for_each_possible_cpu(c) { \ 278 for (i = 0; stats_list[i].name; i++) \ 279 buff[i] += snmp_get_cpu_field( \ 280 mib_statistic, \ 281 c, stats_list[i].entry); \ 282 } \ 283 } 284 285 void inet_get_local_port_range(struct net *net, int *low, int *high); 286 287 #ifdef CONFIG_SYSCTL 288 static inline int inet_is_local_reserved_port(struct net *net, int port) 289 { 290 if (!net->ipv4.sysctl_local_reserved_ports) 291 return 0; 292 return test_bit(port, net->ipv4.sysctl_local_reserved_ports); 293 } 294 295 static inline bool sysctl_dev_name_is_allowed(const char *name) 296 { 297 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; 298 } 299 300 static inline int inet_prot_sock(struct net *net) 301 { 302 return net->ipv4.sysctl_ip_prot_sock; 303 } 304 305 #else 306 static inline int inet_is_local_reserved_port(struct net *net, int port) 307 { 308 return 0; 309 } 310 311 static inline int inet_prot_sock(struct net *net) 312 { 313 return PROT_SOCK; 314 } 315 #endif 316 317 __be32 inet_current_timestamp(void); 318 319 /* From inetpeer.c */ 320 extern int inet_peer_threshold; 321 extern int inet_peer_minttl; 322 extern int inet_peer_maxttl; 323 324 void ipfrag_init(void); 325 326 void ip_static_sysctl_init(void); 327 328 #define IP4_REPLY_MARK(net, mark) \ 329 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) 330 331 static inline bool ip_is_fragment(const struct iphdr *iph) 332 { 333 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; 334 } 335 336 #ifdef CONFIG_INET 337 #include <net/dst.h> 338 339 /* The function in 2.2 was invalid, producing wrong result for 340 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ 341 static inline 342 int ip_decrease_ttl(struct iphdr *iph) 343 { 344 u32 check = (__force u32)iph->check; 345 check += (__force u32)htons(0x0100); 346 iph->check = (__force __sum16)(check + (check>=0xFFFF)); 347 return --iph->ttl; 348 } 349 350 static inline int ip_mtu_locked(const struct dst_entry *dst) 351 { 352 const struct rtable *rt = (const struct rtable *)dst; 353 354 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); 355 } 356 357 static inline 358 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 359 { 360 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); 361 362 return pmtudisc == IP_PMTUDISC_DO || 363 (pmtudisc == IP_PMTUDISC_WANT && 364 !ip_mtu_locked(dst)); 365 } 366 367 static inline bool ip_sk_accept_pmtu(const struct sock *sk) 368 { 369 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && 370 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; 371 } 372 373 static inline bool ip_sk_use_pmtu(const struct sock *sk) 374 { 375 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; 376 } 377 378 static inline bool ip_sk_ignore_df(const struct sock *sk) 379 { 380 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || 381 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; 382 } 383 384 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, 385 bool forwarding) 386 { 387 struct net *net = dev_net(dst->dev); 388 389 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 390 ip_mtu_locked(dst) || 391 !forwarding) 392 return dst_mtu(dst); 393 394 return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 395 } 396 397 static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 398 const struct sk_buff *skb) 399 { 400 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 401 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 402 403 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 404 } 405 406 return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 407 } 408 409 int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, 410 u32 *metrics); 411 412 u32 ip_idents_reserve(u32 hash, int segs); 413 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); 414 415 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, 416 struct sock *sk, int segs) 417 { 418 struct iphdr *iph = ip_hdr(skb); 419 420 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { 421 /* This is only to work around buggy Windows95/2000 422 * VJ compression implementations. If the ID field 423 * does not change, they drop every other packet in 424 * a TCP stream using header compression. 425 */ 426 if (sk && inet_sk(sk)->inet_daddr) { 427 iph->id = htons(inet_sk(sk)->inet_id); 428 inet_sk(sk)->inet_id += segs; 429 } else { 430 iph->id = 0; 431 } 432 } else { 433 __ip_select_ident(net, iph, segs); 434 } 435 } 436 437 static inline void ip_select_ident(struct net *net, struct sk_buff *skb, 438 struct sock *sk) 439 { 440 ip_select_ident_segs(net, skb, sk, 1); 441 } 442 443 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) 444 { 445 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 446 skb->len, proto, 0); 447 } 448 449 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store 450 * Equivalent to : flow->v4addrs.src = iph->saddr; 451 * flow->v4addrs.dst = iph->daddr; 452 */ 453 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, 454 const struct iphdr *iph) 455 { 456 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != 457 offsetof(typeof(flow->addrs), v4addrs.src) + 458 sizeof(flow->addrs.v4addrs.src)); 459 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); 460 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 461 } 462 463 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) 464 { 465 const struct iphdr *iph = skb_gro_network_header(skb); 466 467 return csum_tcpudp_nofold(iph->saddr, iph->daddr, 468 skb_gro_len(skb), proto, 0); 469 } 470 471 /* 472 * Map a multicast IP onto multicast MAC for type ethernet. 473 */ 474 475 static inline void ip_eth_mc_map(__be32 naddr, char *buf) 476 { 477 __u32 addr=ntohl(naddr); 478 buf[0]=0x01; 479 buf[1]=0x00; 480 buf[2]=0x5e; 481 buf[5]=addr&0xFF; 482 addr>>=8; 483 buf[4]=addr&0xFF; 484 addr>>=8; 485 buf[3]=addr&0x7F; 486 } 487 488 /* 489 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. 490 * Leave P_Key as 0 to be filled in by driver. 491 */ 492 493 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 494 { 495 __u32 addr; 496 unsigned char scope = broadcast[5] & 0xF; 497 498 buf[0] = 0; /* Reserved */ 499 buf[1] = 0xff; /* Multicast QPN */ 500 buf[2] = 0xff; 501 buf[3] = 0xff; 502 addr = ntohl(naddr); 503 buf[4] = 0xff; 504 buf[5] = 0x10 | scope; /* scope from broadcast address */ 505 buf[6] = 0x40; /* IPv4 signature */ 506 buf[7] = 0x1b; 507 buf[8] = broadcast[8]; /* P_Key */ 508 buf[9] = broadcast[9]; 509 buf[10] = 0; 510 buf[11] = 0; 511 buf[12] = 0; 512 buf[13] = 0; 513 buf[14] = 0; 514 buf[15] = 0; 515 buf[19] = addr & 0xff; 516 addr >>= 8; 517 buf[18] = addr & 0xff; 518 addr >>= 8; 519 buf[17] = addr & 0xff; 520 addr >>= 8; 521 buf[16] = addr & 0x0f; 522 } 523 524 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 525 { 526 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) 527 memcpy(buf, broadcast, 4); 528 else 529 memcpy(buf, &naddr, sizeof(naddr)); 530 } 531 532 #if IS_ENABLED(CONFIG_IPV6) 533 #include <linux/ipv6.h> 534 #endif 535 536 static __inline__ void inet_reset_saddr(struct sock *sk) 537 { 538 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; 539 #if IS_ENABLED(CONFIG_IPV6) 540 if (sk->sk_family == PF_INET6) { 541 struct ipv6_pinfo *np = inet6_sk(sk); 542 543 memset(&np->saddr, 0, sizeof(np->saddr)); 544 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); 545 } 546 #endif 547 } 548 549 #endif 550 551 static inline unsigned int ipv4_addr_hash(__be32 ip) 552 { 553 return (__force unsigned int) ip; 554 } 555 556 static inline u32 ipv4_portaddr_hash(const struct net *net, 557 __be32 saddr, 558 unsigned int port) 559 { 560 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 561 } 562 563 bool ip_call_ra_chain(struct sk_buff *skb); 564 565 /* 566 * Functions provided by ip_fragment.c 567 */ 568 569 enum ip_defrag_users { 570 IP_DEFRAG_LOCAL_DELIVER, 571 IP_DEFRAG_CALL_RA_CHAIN, 572 IP_DEFRAG_CONNTRACK_IN, 573 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, 574 IP_DEFRAG_CONNTRACK_OUT, 575 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, 576 IP_DEFRAG_CONNTRACK_BRIDGE_IN, 577 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, 578 IP_DEFRAG_VS_IN, 579 IP_DEFRAG_VS_OUT, 580 IP_DEFRAG_VS_FWD, 581 IP_DEFRAG_AF_PACKET, 582 IP_DEFRAG_MACVLAN, 583 }; 584 585 /* Return true if the value of 'user' is between 'lower_bond' 586 * and 'upper_bond' inclusively. 587 */ 588 static inline bool ip_defrag_user_in_between(u32 user, 589 enum ip_defrag_users lower_bond, 590 enum ip_defrag_users upper_bond) 591 { 592 return user >= lower_bond && user <= upper_bond; 593 } 594 595 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); 596 #ifdef CONFIG_INET 597 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); 598 #else 599 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 600 { 601 return skb; 602 } 603 #endif 604 605 /* 606 * Functions provided by ip_forward.c 607 */ 608 609 int ip_forward(struct sk_buff *skb); 610 611 /* 612 * Functions provided by ip_options.c 613 */ 614 615 void ip_options_build(struct sk_buff *skb, struct ip_options *opt, 616 __be32 daddr, struct rtable *rt, int is_frag); 617 618 int __ip_options_echo(struct net *net, struct ip_options *dopt, 619 struct sk_buff *skb, const struct ip_options *sopt); 620 static inline int ip_options_echo(struct net *net, struct ip_options *dopt, 621 struct sk_buff *skb) 622 { 623 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); 624 } 625 626 void ip_options_fragment(struct sk_buff *skb); 627 int ip_options_compile(struct net *net, struct ip_options *opt, 628 struct sk_buff *skb); 629 int ip_options_get(struct net *net, struct ip_options_rcu **optp, 630 unsigned char *data, int optlen); 631 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, 632 unsigned char __user *data, int optlen); 633 void ip_options_undo(struct ip_options *opt); 634 void ip_forward_options(struct sk_buff *skb); 635 int ip_options_rcv_srr(struct sk_buff *skb); 636 637 /* 638 * Functions provided by ip_sockglue.c 639 */ 640 641 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 642 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, 643 struct sk_buff *skb, int tlen, int offset); 644 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, 645 struct ipcm_cookie *ipc, bool allow_ipv6); 646 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 647 unsigned int optlen); 648 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 649 int __user *optlen); 650 int compat_ip_setsockopt(struct sock *sk, int level, int optname, 651 char __user *optval, unsigned int optlen); 652 int compat_ip_getsockopt(struct sock *sk, int level, int optname, 653 char __user *optval, int __user *optlen); 654 int ip_ra_control(struct sock *sk, unsigned char on, 655 void (*destructor)(struct sock *)); 656 657 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 658 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 659 u32 info, u8 *payload); 660 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 661 u32 info); 662 663 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 664 { 665 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); 666 } 667 668 bool icmp_global_allow(void); 669 extern int sysctl_icmp_msgs_per_sec; 670 extern int sysctl_icmp_msgs_burst; 671 672 #ifdef CONFIG_PROC_FS 673 int ip_misc_proc_init(void); 674 #endif 675 676 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, 677 struct netlink_ext_ack *extack); 678 679 #endif /* _IP_H */ 680