1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The Internet Protocol (IP) output module. 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Donald Becker, <becker@super.org> 12 * Alan Cox, <Alan.Cox@linux.org> 13 * Richard Underwood 14 * Stefan Becker, <stefanb@yello.ping.de> 15 * Jorge Cwik, <jorge@laser.satlink.net> 16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 17 * Hirokazu Takahashi, <taka@valinux.co.jp> 18 * 19 * See ip_input.c for original log 20 * 21 * Fixes: 22 * Alan Cox : Missing nonblock feature in ip_build_xmit. 23 * Mike Kilburn : htons() missing in ip_build_xmit. 24 * Bradford Johnson: Fix faulty handling of some frames when 25 * no route is found. 26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 27 * (in case if packet not accepted by 28 * output firewall rules) 29 * Mike McLagan : Routing by source 30 * Alexey Kuznetsov: use new route cache 31 * Andi Kleen: Fix broken PMTU recovery and remove 32 * some redundant tests. 33 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 34 * Andi Kleen : Replace ip_reply with ip_send_reply. 35 * Andi Kleen : Split fast and slow ip_build_xmit path 36 * for decreased register pressure on x86 37 * and more readability. 38 * Marc Boucher : When call_out_firewall returns FW_QUEUE, 39 * silently drop skb instead of failing with -EPERM. 40 * Detlev Wengorz : Copy protocol for fragments. 41 * Hirokazu Takahashi: HW checksumming for outgoing UDP 42 * datagrams. 43 * Hirokazu Takahashi: sendfile() on UDP works now. 44 */ 45 46 #include <linux/uaccess.h> 47 #include <linux/module.h> 48 #include <linux/types.h> 49 #include <linux/kernel.h> 50 #include <linux/mm.h> 51 #include <linux/string.h> 52 #include <linux/errno.h> 53 #include <linux/highmem.h> 54 #include <linux/slab.h> 55 56 #include <linux/socket.h> 57 #include <linux/sockios.h> 58 #include <linux/in.h> 59 #include <linux/inet.h> 60 #include <linux/netdevice.h> 61 #include <linux/etherdevice.h> 62 #include <linux/proc_fs.h> 63 #include <linux/stat.h> 64 #include <linux/init.h> 65 66 #include <net/flow.h> 67 #include <net/snmp.h> 68 #include <net/ip.h> 69 #include <net/protocol.h> 70 #include <net/route.h> 71 #include <net/xfrm.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <net/arp.h> 75 #include <net/icmp.h> 76 #include <net/checksum.h> 77 #include <net/gso.h> 78 #include <net/inetpeer.h> 79 #include <net/lwtunnel.h> 80 #include <net/inet_dscp.h> 81 #include <linux/bpf-cgroup.h> 82 #include <linux/igmp.h> 83 #include <linux/netfilter_ipv4.h> 84 #include <linux/netfilter_bridge.h> 85 #include <linux/netlink.h> 86 #include <linux/tcp.h> 87 #include <net/psp.h> 88 89 static int 90 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 91 unsigned int mtu, 92 int (*output)(struct net *, struct sock *, struct sk_buff *)); 93 94 /* Generate a checksum for an outgoing IP datagram. */ 95 void ip_send_check(struct iphdr *iph) 96 { 97 iph->check = 0; 98 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 99 } 100 EXPORT_SYMBOL(ip_send_check); 101 102 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 103 { 104 struct iphdr *iph = ip_hdr(skb); 105 106 IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS); 107 108 iph_set_totlen(iph, skb->len); 109 ip_send_check(iph); 110 111 /* if egress device is enslaved to an L3 master device pass the 112 * skb to its handler for processing 113 */ 114 skb = l3mdev_ip_out(sk, skb); 115 if (unlikely(!skb)) 116 return 0; 117 118 skb->protocol = htons(ETH_P_IP); 119 120 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 121 net, sk, skb, NULL, skb_dst_dev(skb), 122 dst_output); 123 } 124 125 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) 126 { 127 int err; 128 129 err = __ip_local_out(net, sk, skb); 130 if (likely(err == 1)) 131 err = dst_output(net, sk, skb); 132 133 return err; 134 } 135 EXPORT_SYMBOL_GPL(ip_local_out); 136 137 static inline int ip_select_ttl(const struct inet_sock *inet, 138 const struct dst_entry *dst) 139 { 140 int ttl = READ_ONCE(inet->uc_ttl); 141 142 if (ttl < 0) 143 ttl = ip4_dst_hoplimit(dst); 144 return ttl; 145 } 146 147 /* 148 * Add an ip header to a skbuff and send it out. 149 * 150 */ 151 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 152 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt, 153 u8 tos) 154 { 155 const struct inet_sock *inet = inet_sk(sk); 156 struct rtable *rt = skb_rtable(skb); 157 struct net *net = sock_net(sk); 158 struct iphdr *iph; 159 160 /* Build the IP header. */ 161 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); 162 skb_reset_network_header(skb); 163 iph = ip_hdr(skb); 164 iph->version = 4; 165 iph->ihl = 5; 166 iph->tos = tos; 167 iph->ttl = ip_select_ttl(inet, &rt->dst); 168 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 169 iph->saddr = saddr; 170 iph->protocol = sk->sk_protocol; 171 /* Do not bother generating IPID for small packets (eg SYNACK) */ 172 if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { 173 iph->frag_off = htons(IP_DF); 174 iph->id = 0; 175 } else { 176 iph->frag_off = 0; 177 /* TCP packets here are SYNACK with fat IPv4/TCP options. 178 * Avoid using the hashed IP ident generator. 179 */ 180 if (sk->sk_protocol == IPPROTO_TCP) 181 iph->id = (__force __be16)get_random_u16(); 182 else 183 __ip_select_ident(net, iph, 1); 184 } 185 186 if (opt && opt->opt.optlen) { 187 iph->ihl += opt->opt.optlen>>2; 188 ip_options_build(skb, &opt->opt, daddr, rt); 189 } 190 191 skb->priority = READ_ONCE(sk->sk_priority); 192 if (!skb->mark) 193 skb->mark = READ_ONCE(sk->sk_mark); 194 195 /* Send it out. */ 196 return ip_local_out(net, skb->sk, skb); 197 } 198 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); 199 200 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) 201 { 202 struct dst_entry *dst = skb_dst(skb); 203 struct rtable *rt = dst_rtable(dst); 204 struct net_device *dev = dst_dev(dst); 205 unsigned int hh_len = LL_RESERVED_SPACE(dev); 206 struct neighbour *neigh; 207 bool is_v6gw = false; 208 209 if (rt->rt_type == RTN_MULTICAST) { 210 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); 211 } else if (rt->rt_type == RTN_BROADCAST) 212 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); 213 214 /* OUTOCTETS should be counted after fragment */ 215 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); 216 217 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 218 skb = skb_expand_head(skb, hh_len); 219 if (!skb) 220 return -ENOMEM; 221 } 222 223 if (lwtunnel_xmit_redirect(dst->lwtstate)) { 224 int res = lwtunnel_xmit(skb); 225 226 if (res != LWTUNNEL_XMIT_CONTINUE) 227 return res; 228 } 229 230 rcu_read_lock(); 231 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 232 if (!IS_ERR(neigh)) { 233 int res; 234 235 sock_confirm_neigh(skb, neigh); 236 /* if crossing protocols, can not use the cached header */ 237 res = neigh_output(neigh, skb, is_v6gw); 238 rcu_read_unlock(); 239 return res; 240 } 241 rcu_read_unlock(); 242 243 net_dbg_ratelimited("%s: No header cache and no neighbour!\n", 244 __func__); 245 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL); 246 return PTR_ERR(neigh); 247 } 248 249 static int ip_finish_output_gso(struct net *net, struct sock *sk, 250 struct sk_buff *skb, unsigned int mtu) 251 { 252 struct sk_buff *segs, *nskb; 253 netdev_features_t features; 254 int ret = 0; 255 256 /* common case: seglen is <= mtu 257 */ 258 if (skb_gso_validate_network_len(skb, mtu)) 259 return ip_finish_output2(net, sk, skb); 260 261 /* Slowpath - GSO segment length exceeds the egress MTU. 262 * 263 * This can happen in several cases: 264 * - Forwarding of a TCP GRO skb, when DF flag is not set. 265 * - Forwarding of an skb that arrived on a virtualization interface 266 * (virtio-net/vhost/tap) with TSO/GSO size set by other network 267 * stack. 268 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an 269 * interface with a smaller MTU. 270 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is 271 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an 272 * insufficient MTU. 273 */ 274 features = netif_skb_features(skb); 275 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET); 276 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 277 if (IS_ERR_OR_NULL(segs)) { 278 kfree_skb(skb); 279 return -ENOMEM; 280 } 281 282 consume_skb(skb); 283 284 skb_list_walk_safe(segs, segs, nskb) { 285 int err; 286 287 skb_mark_not_on_list(segs); 288 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); 289 290 if (err && ret == 0) 291 ret = err; 292 } 293 294 return ret; 295 } 296 297 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 298 { 299 unsigned int mtu; 300 301 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 302 /* Policy lookup after SNAT yielded a new policy */ 303 if (skb_dst(skb)->xfrm) { 304 IPCB(skb)->flags |= IPSKB_REROUTED; 305 return dst_output(net, sk, skb); 306 } 307 #endif 308 mtu = ip_skb_dst_mtu(sk, skb); 309 if (skb_is_gso(skb)) 310 return ip_finish_output_gso(net, sk, skb, mtu); 311 312 if (skb->len > mtu || IPCB(skb)->frag_max_size) 313 return ip_fragment(net, sk, skb, mtu, ip_finish_output2); 314 315 return ip_finish_output2(net, sk, skb); 316 } 317 318 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 319 { 320 int ret; 321 322 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 323 switch (ret) { 324 case NET_XMIT_SUCCESS: 325 return __ip_finish_output(net, sk, skb); 326 case NET_XMIT_CN: 327 return __ip_finish_output(net, sk, skb) ? : ret; 328 default: 329 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); 330 return ret; 331 } 332 } 333 334 static int ip_mc_finish_output(struct net *net, struct sock *sk, 335 struct sk_buff *skb) 336 { 337 struct rtable *new_rt; 338 bool do_cn = false; 339 int ret, err; 340 341 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 342 switch (ret) { 343 case NET_XMIT_CN: 344 do_cn = true; 345 fallthrough; 346 case NET_XMIT_SUCCESS: 347 break; 348 default: 349 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS); 350 return ret; 351 } 352 353 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting 354 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten, 355 * see ipv4_pktinfo_prepare(). 356 */ 357 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); 358 if (new_rt) { 359 new_rt->rt_iif = 0; 360 skb_dst_drop(skb); 361 skb_dst_set(skb, &new_rt->dst); 362 } 363 364 err = dev_loopback_xmit(net, sk, skb); 365 return (do_cn && err) ? ret : err; 366 } 367 368 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) 369 { 370 struct rtable *rt = skb_rtable(skb); 371 struct net_device *dev = rt->dst.dev; 372 373 /* 374 * If the indicated interface is up and running, send the packet. 375 */ 376 skb->dev = dev; 377 skb->protocol = htons(ETH_P_IP); 378 379 /* 380 * Multicasts are looped back for other local users 381 */ 382 383 if (rt->rt_flags&RTCF_MULTICAST) { 384 if (sk_mc_loop(sk) 385 #ifdef CONFIG_IP_MROUTE 386 /* Small optimization: do not loopback not local frames, 387 which returned after forwarding; they will be dropped 388 by ip_mr_input in any case. 389 Note, that local frames are looped back to be delivered 390 to local recipients. 391 392 This check is duplicated in ip_mr_input at the moment. 393 */ 394 && 395 ((rt->rt_flags & RTCF_LOCAL) || 396 !(IPCB(skb)->flags & IPSKB_FORWARDED)) 397 #endif 398 ) { 399 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 400 if (newskb) 401 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 402 net, sk, newskb, NULL, newskb->dev, 403 ip_mc_finish_output); 404 } 405 406 /* Multicasts with ttl 0 must not go beyond the host */ 407 408 if (ip_hdr(skb)->ttl == 0) { 409 kfree_skb(skb); 410 return 0; 411 } 412 } 413 414 if (rt->rt_flags&RTCF_BROADCAST) { 415 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 416 if (newskb) 417 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 418 net, sk, newskb, NULL, newskb->dev, 419 ip_mc_finish_output); 420 } 421 422 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 423 net, sk, skb, NULL, skb->dev, 424 ip_finish_output, 425 !(IPCB(skb)->flags & IPSKB_REROUTED)); 426 } 427 428 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) 429 { 430 struct net_device *dev, *indev = skb->dev; 431 int ret_val; 432 433 rcu_read_lock(); 434 dev = skb_dst_dev_rcu(skb); 435 skb->dev = dev; 436 skb->protocol = htons(ETH_P_IP); 437 438 ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 439 net, sk, skb, indev, dev, 440 ip_finish_output, 441 !(IPCB(skb)->flags & IPSKB_REROUTED)); 442 rcu_read_unlock(); 443 return ret_val; 444 } 445 EXPORT_SYMBOL(ip_output); 446 447 /* 448 * copy saddr and daddr, possibly using 64bit load/stores 449 * Equivalent to : 450 * iph->saddr = fl4->saddr; 451 * iph->daddr = fl4->daddr; 452 */ 453 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) 454 { 455 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != 456 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); 457 458 iph->saddr = fl4->saddr; 459 iph->daddr = fl4->daddr; 460 } 461 462 /* Note: skb->sk can be different from sk, in case of tunnels */ 463 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 464 __u8 tos) 465 { 466 struct inet_sock *inet = inet_sk(sk); 467 struct net *net = sock_net(sk); 468 struct ip_options_rcu *inet_opt; 469 struct flowi4 *fl4; 470 struct rtable *rt; 471 struct iphdr *iph; 472 int res; 473 474 /* Skip all of this if the packet is already routed, 475 * f.e. by something like SCTP. 476 */ 477 rcu_read_lock(); 478 inet_opt = rcu_dereference(inet->inet_opt); 479 fl4 = &fl->u.ip4; 480 rt = skb_rtable(skb); 481 if (rt) 482 goto packet_routed; 483 484 /* Make sure we can route this packet. */ 485 rt = dst_rtable(__sk_dst_check(sk, 0)); 486 if (!rt) { 487 inet_sk_init_flowi4(inet, fl4); 488 489 /* sctp_v4_xmit() uses its own DSCP value */ 490 fl4->flowi4_dscp = inet_dsfield_to_dscp(tos); 491 492 /* If this fails, retransmit mechanism of transport layer will 493 * keep trying until route appears or the connection times 494 * itself out. 495 */ 496 rt = ip_route_output_flow(net, fl4, sk); 497 if (IS_ERR(rt)) 498 goto no_route; 499 sk_setup_caps(sk, &rt->dst); 500 } 501 skb_dst_set_noref(skb, &rt->dst); 502 503 packet_routed: 504 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) 505 goto no_route; 506 507 /* OK, we know where to send it, allocate and build IP header. */ 508 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); 509 skb_reset_network_header(skb); 510 iph = ip_hdr(skb); 511 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff)); 512 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) 513 iph->frag_off = htons(IP_DF); 514 else 515 iph->frag_off = 0; 516 iph->ttl = ip_select_ttl(inet, &rt->dst); 517 iph->protocol = sk->sk_protocol; 518 ip_copy_addrs(iph, fl4); 519 520 /* Transport layer set skb->h.foo itself. */ 521 522 if (inet_opt && inet_opt->opt.optlen) { 523 iph->ihl += inet_opt->opt.optlen >> 2; 524 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt); 525 } 526 527 ip_select_ident_segs(net, skb, sk, 528 skb_shinfo(skb)->gso_segs ?: 1); 529 530 /* TODO : should we use skb->sk here instead of sk ? */ 531 skb->priority = READ_ONCE(sk->sk_priority); 532 skb->mark = READ_ONCE(sk->sk_mark); 533 534 res = ip_local_out(net, sk, skb); 535 rcu_read_unlock(); 536 return res; 537 538 no_route: 539 rcu_read_unlock(); 540 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 541 kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES); 542 return -EHOSTUNREACH; 543 } 544 EXPORT_SYMBOL(__ip_queue_xmit); 545 546 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) 547 { 548 return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos)); 549 } 550 EXPORT_SYMBOL(ip_queue_xmit); 551 552 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) 553 { 554 to->pkt_type = from->pkt_type; 555 to->priority = from->priority; 556 to->protocol = from->protocol; 557 to->skb_iif = from->skb_iif; 558 skb_dst_drop(to); 559 skb_dst_copy(to, from); 560 to->dev = from->dev; 561 to->mark = from->mark; 562 563 skb_copy_hash(to, from); 564 565 #ifdef CONFIG_NET_SCHED 566 to->tc_index = from->tc_index; 567 #endif 568 nf_copy(to, from); 569 skb_ext_copy(to, from); 570 #if IS_ENABLED(CONFIG_IP_VS) 571 to->ipvs_property = from->ipvs_property; 572 #endif 573 skb_copy_secmark(to, from); 574 } 575 576 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 577 unsigned int mtu, 578 int (*output)(struct net *, struct sock *, struct sk_buff *)) 579 { 580 struct iphdr *iph = ip_hdr(skb); 581 582 if ((iph->frag_off & htons(IP_DF)) == 0) 583 return ip_do_fragment(net, sk, skb, output); 584 585 if (unlikely(!skb->ignore_df || 586 (IPCB(skb)->frag_max_size && 587 IPCB(skb)->frag_max_size > mtu))) { 588 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 589 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 590 htonl(mtu)); 591 kfree_skb(skb); 592 return -EMSGSIZE; 593 } 594 595 return ip_do_fragment(net, sk, skb, output); 596 } 597 598 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, 599 unsigned int hlen, struct ip_fraglist_iter *iter) 600 { 601 unsigned int first_len = skb_pagelen(skb); 602 603 iter->frag = skb_shinfo(skb)->frag_list; 604 skb_frag_list_init(skb); 605 606 iter->offset = 0; 607 iter->iph = iph; 608 iter->hlen = hlen; 609 610 skb->data_len = first_len - skb_headlen(skb); 611 skb->len = first_len; 612 iph->tot_len = htons(first_len); 613 iph->frag_off = htons(IP_MF); 614 ip_send_check(iph); 615 } 616 EXPORT_SYMBOL(ip_fraglist_init); 617 618 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) 619 { 620 unsigned int hlen = iter->hlen; 621 struct iphdr *iph = iter->iph; 622 struct sk_buff *frag; 623 624 frag = iter->frag; 625 frag->ip_summed = CHECKSUM_NONE; 626 skb_reset_transport_header(frag); 627 __skb_push(frag, hlen); 628 skb_reset_network_header(frag); 629 memcpy(skb_network_header(frag), iph, hlen); 630 iter->iph = ip_hdr(frag); 631 iph = iter->iph; 632 iph->tot_len = htons(frag->len); 633 ip_copy_metadata(frag, skb); 634 iter->offset += skb->len - hlen; 635 iph->frag_off = htons(iter->offset >> 3); 636 if (frag->next) 637 iph->frag_off |= htons(IP_MF); 638 /* Ready, complete checksum */ 639 ip_send_check(iph); 640 } 641 EXPORT_SYMBOL(ip_fraglist_prepare); 642 643 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, 644 unsigned int ll_rs, unsigned int mtu, bool DF, 645 struct ip_frag_state *state) 646 { 647 struct iphdr *iph = ip_hdr(skb); 648 649 state->DF = DF; 650 state->hlen = hlen; 651 state->ll_rs = ll_rs; 652 state->mtu = mtu; 653 654 state->left = skb->len - hlen; /* Space per frame */ 655 state->ptr = hlen; /* Where to start from */ 656 657 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3; 658 state->not_last_frag = iph->frag_off & htons(IP_MF); 659 } 660 EXPORT_SYMBOL(ip_frag_init); 661 662 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, 663 bool first_frag) 664 { 665 /* Copy the flags to each fragment. */ 666 IPCB(to)->flags = IPCB(from)->flags; 667 668 /* ANK: dirty, but effective trick. Upgrade options only if 669 * the segment to be fragmented was THE FIRST (otherwise, 670 * options are already fixed) and make it ONCE 671 * on the initial skb, so that all the following fragments 672 * will inherit fixed options. 673 */ 674 if (first_frag) 675 ip_options_fragment(from); 676 } 677 678 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) 679 { 680 unsigned int len = state->left; 681 struct sk_buff *skb2; 682 struct iphdr *iph; 683 684 /* IF: it doesn't fit, use 'mtu' - the data space left */ 685 if (len > state->mtu) 686 len = state->mtu; 687 /* IF: we are not sending up to and including the packet end 688 then align the next start on an eight byte boundary */ 689 if (len < state->left) { 690 len &= ~7; 691 } 692 693 /* Allocate buffer */ 694 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC); 695 if (!skb2) 696 return ERR_PTR(-ENOMEM); 697 698 /* 699 * Set up data on packet 700 */ 701 702 ip_copy_metadata(skb2, skb); 703 skb_reserve(skb2, state->ll_rs); 704 skb_put(skb2, len + state->hlen); 705 skb_reset_network_header(skb2); 706 skb2->transport_header = skb2->network_header + state->hlen; 707 708 /* 709 * Charge the memory for the fragment to any owner 710 * it might possess 711 */ 712 713 if (skb->sk) 714 skb_set_owner_w(skb2, skb->sk); 715 716 /* 717 * Copy the packet header into the new buffer. 718 */ 719 720 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen); 721 722 /* 723 * Copy a block of the IP datagram. 724 */ 725 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len)) 726 BUG(); 727 state->left -= len; 728 729 /* 730 * Fill in the new header fields. 731 */ 732 iph = ip_hdr(skb2); 733 iph->frag_off = htons((state->offset >> 3)); 734 if (state->DF) 735 iph->frag_off |= htons(IP_DF); 736 737 /* 738 * Added AC : If we are fragmenting a fragment that's not the 739 * last fragment then keep MF on each bit 740 */ 741 if (state->left > 0 || state->not_last_frag) 742 iph->frag_off |= htons(IP_MF); 743 state->ptr += len; 744 state->offset += len; 745 746 iph->tot_len = htons(len + state->hlen); 747 748 ip_send_check(iph); 749 750 return skb2; 751 } 752 EXPORT_SYMBOL(ip_frag_next); 753 754 /* 755 * This IP datagram is too large to be sent in one piece. Break it up into 756 * smaller pieces (each of size equal to IP header plus 757 * a block of the data of the original IP data part) that will yet fit in a 758 * single device frame, and queue such a frame for sending. 759 */ 760 761 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 762 int (*output)(struct net *, struct sock *, struct sk_buff *)) 763 { 764 struct iphdr *iph; 765 struct sk_buff *skb2; 766 u8 tstamp_type = skb->tstamp_type; 767 struct rtable *rt = skb_rtable(skb); 768 unsigned int mtu, hlen, ll_rs; 769 struct ip_fraglist_iter iter; 770 ktime_t tstamp = skb->tstamp; 771 struct ip_frag_state state; 772 int err = 0; 773 774 /* for offloaded checksums cleanup checksum before fragmentation */ 775 if (skb->ip_summed == CHECKSUM_PARTIAL && 776 (err = skb_checksum_help(skb))) 777 goto fail; 778 779 /* 780 * Point into the IP datagram header. 781 */ 782 783 iph = ip_hdr(skb); 784 785 mtu = ip_skb_dst_mtu(sk, skb); 786 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 787 mtu = IPCB(skb)->frag_max_size; 788 789 /* 790 * Setup starting values. 791 */ 792 793 hlen = iph->ihl * 4; 794 mtu = mtu - hlen; /* Size of data space */ 795 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; 796 ll_rs = LL_RESERVED_SPACE(rt->dst.dev); 797 798 /* When frag_list is given, use it. First, check its validity: 799 * some transformers could create wrong frag_list or break existing 800 * one, it is not prohibited. In this case fall back to copying. 801 * 802 * LATER: this step can be merged to real generation of fragments, 803 * we can switch to copy when see the first bad fragment. 804 */ 805 if (skb_has_frag_list(skb)) { 806 struct sk_buff *frag, *frag2; 807 unsigned int first_len = skb_pagelen(skb); 808 809 if (first_len - hlen > mtu || 810 ((first_len - hlen) & 7) || 811 ip_is_fragment(iph) || 812 skb_cloned(skb) || 813 skb_headroom(skb) < ll_rs) 814 goto slow_path; 815 816 skb_walk_frags(skb, frag) { 817 /* Correct geometry. */ 818 if (frag->len > mtu || 819 ((frag->len & 7) && frag->next) || 820 skb_headroom(frag) < hlen + ll_rs) 821 goto slow_path_clean; 822 823 /* Partially cloned skb? */ 824 if (skb_shared(frag)) 825 goto slow_path_clean; 826 827 BUG_ON(frag->sk); 828 if (skb->sk) { 829 frag->sk = skb->sk; 830 frag->destructor = sock_wfree; 831 } 832 skb->truesize -= frag->truesize; 833 } 834 835 /* Everything is OK. Generate! */ 836 ip_fraglist_init(skb, iph, hlen, &iter); 837 838 for (;;) { 839 /* Prepare header of the next frame, 840 * before previous one went down. */ 841 if (iter.frag) { 842 bool first_frag = (iter.offset == 0); 843 844 IPCB(iter.frag)->flags = IPCB(skb)->flags; 845 ip_fraglist_prepare(skb, &iter); 846 if (first_frag && IPCB(skb)->opt.optlen) { 847 /* ipcb->opt is not populated for frags 848 * coming from __ip_make_skb(), 849 * ip_options_fragment() needs optlen 850 */ 851 IPCB(iter.frag)->opt.optlen = 852 IPCB(skb)->opt.optlen; 853 ip_options_fragment(iter.frag); 854 ip_send_check(iter.iph); 855 } 856 } 857 858 skb_set_delivery_time(skb, tstamp, tstamp_type); 859 err = output(net, sk, skb); 860 861 if (!err) 862 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 863 if (err || !iter.frag) 864 break; 865 866 skb = ip_fraglist_next(&iter); 867 } 868 869 if (err == 0) { 870 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 871 return 0; 872 } 873 874 kfree_skb_list(iter.frag); 875 876 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 877 return err; 878 879 slow_path_clean: 880 skb_walk_frags(skb, frag2) { 881 if (frag2 == frag) 882 break; 883 frag2->sk = NULL; 884 frag2->destructor = NULL; 885 skb->truesize += frag2->truesize; 886 } 887 } 888 889 slow_path: 890 /* 891 * Fragment the datagram. 892 */ 893 894 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU, 895 &state); 896 897 /* 898 * Keep copying data until we run out. 899 */ 900 901 while (state.left > 0) { 902 bool first_frag = (state.offset == 0); 903 904 skb2 = ip_frag_next(skb, &state); 905 if (IS_ERR(skb2)) { 906 err = PTR_ERR(skb2); 907 goto fail; 908 } 909 ip_frag_ipcb(skb, skb2, first_frag); 910 911 /* 912 * Put this fragment into the sending queue. 913 */ 914 skb_set_delivery_time(skb2, tstamp, tstamp_type); 915 err = output(net, sk, skb2); 916 if (err) 917 goto fail; 918 919 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); 920 } 921 consume_skb(skb); 922 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); 923 return err; 924 925 fail: 926 kfree_skb(skb); 927 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 928 return err; 929 } 930 EXPORT_SYMBOL(ip_do_fragment); 931 932 int 933 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 934 { 935 struct msghdr *msg = from; 936 937 if (skb->ip_summed == CHECKSUM_PARTIAL) { 938 if (!copy_from_iter_full(to, len, &msg->msg_iter)) 939 return -EFAULT; 940 } else { 941 __wsum csum = 0; 942 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter)) 943 return -EFAULT; 944 skb->csum = csum_block_add(skb->csum, csum, odd); 945 } 946 return 0; 947 } 948 EXPORT_SYMBOL(ip_generic_getfrag); 949 950 static int __ip_append_data(struct sock *sk, 951 struct flowi4 *fl4, 952 struct sk_buff_head *queue, 953 struct inet_cork *cork, 954 struct page_frag *pfrag, 955 int getfrag(void *from, char *to, int offset, 956 int len, int odd, struct sk_buff *skb), 957 void *from, int length, int transhdrlen, 958 unsigned int flags) 959 { 960 struct inet_sock *inet = inet_sk(sk); 961 struct ubuf_info *uarg = NULL; 962 struct sk_buff *skb; 963 struct ip_options *opt = cork->opt; 964 int hh_len; 965 int exthdrlen; 966 int mtu; 967 int copy; 968 int err; 969 int offset = 0; 970 bool zc = false; 971 unsigned int maxfraglen, fragheaderlen, maxnonfragsize; 972 int csummode = CHECKSUM_NONE; 973 struct rtable *rt = dst_rtable(cork->dst); 974 bool paged, hold_tskey = false, extra_uref = false; 975 unsigned int wmem_alloc_delta = 0; 976 u32 tskey = 0; 977 978 skb = skb_peek_tail(queue); 979 980 exthdrlen = !skb ? rt->dst.header_len : 0; 981 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; 982 paged = !!cork->gso_size; 983 984 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 985 986 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 987 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 988 maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu; 989 990 if (cork->length + length > maxnonfragsize - fragheaderlen) { 991 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, 992 mtu - (opt ? opt->optlen : 0)); 993 return -EMSGSIZE; 994 } 995 996 /* 997 * transhdrlen > 0 means that this is the first fragment and we wish 998 * it won't be fragmented in the future. 999 */ 1000 if (transhdrlen && 1001 length + fragheaderlen <= mtu && 1002 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) && 1003 (!(flags & MSG_MORE) || cork->gso_size) && 1004 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM))) 1005 csummode = CHECKSUM_PARTIAL; 1006 1007 if ((flags & MSG_ZEROCOPY) && length) { 1008 struct msghdr *msg = from; 1009 1010 if (getfrag == ip_generic_getfrag && msg->msg_ubuf) { 1011 if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb)) 1012 return -EINVAL; 1013 1014 /* Leave uarg NULL if can't zerocopy, callers should 1015 * be able to handle it. 1016 */ 1017 if ((rt->dst.dev->features & NETIF_F_SG) && 1018 csummode == CHECKSUM_PARTIAL) { 1019 paged = true; 1020 zc = true; 1021 uarg = msg->msg_ubuf; 1022 } 1023 } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1024 uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb), 1025 false); 1026 if (!uarg) 1027 return -ENOBUFS; 1028 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ 1029 if (rt->dst.dev->features & NETIF_F_SG && 1030 csummode == CHECKSUM_PARTIAL) { 1031 paged = true; 1032 zc = true; 1033 } else { 1034 uarg_to_msgzc(uarg)->zerocopy = 0; 1035 skb_zcopy_set(skb, uarg, &extra_uref); 1036 } 1037 } 1038 } else if ((flags & MSG_SPLICE_PAGES) && length) { 1039 if (inet_test_bit(HDRINCL, sk)) 1040 return -EPERM; 1041 if (rt->dst.dev->features & NETIF_F_SG && 1042 getfrag == ip_generic_getfrag) 1043 /* We need an empty buffer to attach stuff to */ 1044 paged = true; 1045 else 1046 flags &= ~MSG_SPLICE_PAGES; 1047 } 1048 1049 cork->length += length; 1050 1051 if (cork->tx_flags & SKBTX_ANY_TSTAMP && 1052 READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { 1053 if (cork->flags & IPCORK_TS_OPT_ID) { 1054 tskey = cork->ts_opt_id; 1055 } else { 1056 tskey = atomic_inc_return(&sk->sk_tskey) - 1; 1057 hold_tskey = true; 1058 } 1059 } 1060 1061 /* So, what's going on in the loop below? 1062 * 1063 * We use calculated fragment length to generate chained skb, 1064 * each of segments is IP fragment ready for sending to network after 1065 * adding appropriate IP header. 1066 */ 1067 1068 if (!skb) 1069 goto alloc_new_skb; 1070 1071 while (length > 0) { 1072 /* Check if the remaining data fits into current packet. */ 1073 copy = mtu - skb->len; 1074 if (copy < length) 1075 copy = maxfraglen - skb->len; 1076 if (copy <= 0) { 1077 char *data; 1078 unsigned int datalen; 1079 unsigned int fraglen; 1080 unsigned int fraggap; 1081 unsigned int alloclen, alloc_extra; 1082 unsigned int pagedlen; 1083 struct sk_buff *skb_prev; 1084 alloc_new_skb: 1085 skb_prev = skb; 1086 if (skb_prev) 1087 fraggap = skb_prev->len - maxfraglen; 1088 else 1089 fraggap = 0; 1090 1091 /* 1092 * If remaining data exceeds the mtu, 1093 * we know we need more fragment(s). 1094 */ 1095 datalen = length + fraggap; 1096 if (datalen > mtu - fragheaderlen) 1097 datalen = maxfraglen - fragheaderlen; 1098 fraglen = datalen + fragheaderlen; 1099 pagedlen = 0; 1100 1101 alloc_extra = hh_len + 15; 1102 alloc_extra += exthdrlen; 1103 1104 /* The last fragment gets additional space at tail. 1105 * Note, with MSG_MORE we overallocate on fragments, 1106 * because we have no idea what fragment will be 1107 * the last. 1108 */ 1109 if (datalen == length + fraggap) 1110 alloc_extra += rt->dst.trailer_len; 1111 1112 if ((flags & MSG_MORE) && 1113 !(rt->dst.dev->features&NETIF_F_SG)) 1114 alloclen = mtu; 1115 else if (!paged && 1116 (fraglen + alloc_extra < SKB_MAX_ALLOC || 1117 !(rt->dst.dev->features & NETIF_F_SG))) 1118 alloclen = fraglen; 1119 else { 1120 alloclen = fragheaderlen + transhdrlen; 1121 pagedlen = datalen - transhdrlen; 1122 } 1123 1124 alloclen += alloc_extra; 1125 1126 if (transhdrlen) { 1127 skb = sock_alloc_send_skb(sk, alloclen, 1128 (flags & MSG_DONTWAIT), &err); 1129 } else { 1130 skb = NULL; 1131 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= 1132 2 * sk->sk_sndbuf) 1133 skb = alloc_skb(alloclen, 1134 sk->sk_allocation); 1135 if (unlikely(!skb)) 1136 err = -ENOBUFS; 1137 } 1138 if (!skb) 1139 goto error; 1140 1141 /* 1142 * Fill in the control structures 1143 */ 1144 skb->ip_summed = csummode; 1145 skb->csum = 0; 1146 skb_reserve(skb, hh_len); 1147 1148 /* 1149 * Find where to start putting bytes. 1150 */ 1151 data = skb_put(skb, fraglen + exthdrlen - pagedlen); 1152 skb_set_network_header(skb, exthdrlen); 1153 skb->transport_header = (skb->network_header + 1154 fragheaderlen); 1155 data += fragheaderlen + exthdrlen; 1156 1157 if (fraggap) { 1158 skb->csum = skb_copy_and_csum_bits( 1159 skb_prev, maxfraglen, 1160 data + transhdrlen, fraggap); 1161 skb_prev->csum = csum_sub(skb_prev->csum, 1162 skb->csum); 1163 data += fraggap; 1164 pskb_trim_unique(skb_prev, maxfraglen); 1165 } 1166 1167 copy = datalen - transhdrlen - fraggap - pagedlen; 1168 /* [!] NOTE: copy will be negative if pagedlen>0 1169 * because then the equation reduces to -fraggap. 1170 */ 1171 if (copy > 0 && 1172 INDIRECT_CALL_1(getfrag, ip_generic_getfrag, 1173 from, data + transhdrlen, offset, 1174 copy, fraggap, skb) < 0) { 1175 err = -EFAULT; 1176 kfree_skb(skb); 1177 goto error; 1178 } else if (flags & MSG_SPLICE_PAGES) { 1179 copy = 0; 1180 } 1181 1182 offset += copy; 1183 length -= copy + transhdrlen; 1184 transhdrlen = 0; 1185 exthdrlen = 0; 1186 csummode = CHECKSUM_NONE; 1187 1188 /* only the initial fragment is time stamped */ 1189 skb_shinfo(skb)->tx_flags = cork->tx_flags; 1190 cork->tx_flags = 0; 1191 skb_shinfo(skb)->tskey = tskey; 1192 tskey = 0; 1193 skb_zcopy_set(skb, uarg, &extra_uref); 1194 1195 if ((flags & MSG_CONFIRM) && !skb_prev) 1196 skb_set_dst_pending_confirm(skb, 1); 1197 1198 /* 1199 * Put the packet on the pending queue. 1200 */ 1201 if (!skb->destructor) { 1202 skb->destructor = sock_wfree; 1203 skb->sk = sk; 1204 wmem_alloc_delta += skb->truesize; 1205 } 1206 __skb_queue_tail(queue, skb); 1207 continue; 1208 } 1209 1210 if (copy > length) 1211 copy = length; 1212 1213 if (!(rt->dst.dev->features&NETIF_F_SG) && 1214 skb_tailroom(skb) >= copy) { 1215 unsigned int off; 1216 1217 off = skb->len; 1218 if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag, 1219 from, skb_put(skb, copy), 1220 offset, copy, off, skb) < 0) { 1221 __skb_trim(skb, off); 1222 err = -EFAULT; 1223 goto error; 1224 } 1225 } else if (flags & MSG_SPLICE_PAGES) { 1226 struct msghdr *msg = from; 1227 1228 err = -EIO; 1229 if (WARN_ON_ONCE(copy > msg->msg_iter.count)) 1230 goto error; 1231 1232 err = skb_splice_from_iter(skb, &msg->msg_iter, copy); 1233 if (err < 0) 1234 goto error; 1235 copy = err; 1236 wmem_alloc_delta += copy; 1237 } else if (!zc) { 1238 int i = skb_shinfo(skb)->nr_frags; 1239 1240 err = -ENOMEM; 1241 if (!sk_page_frag_refill(sk, pfrag)) 1242 goto error; 1243 1244 skb_zcopy_downgrade_managed(skb); 1245 if (!skb_can_coalesce(skb, i, pfrag->page, 1246 pfrag->offset)) { 1247 err = -EMSGSIZE; 1248 if (i == MAX_SKB_FRAGS) 1249 goto error; 1250 1251 __skb_fill_page_desc(skb, i, pfrag->page, 1252 pfrag->offset, 0); 1253 skb_shinfo(skb)->nr_frags = ++i; 1254 get_page(pfrag->page); 1255 } 1256 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1257 if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag, 1258 from, 1259 page_address(pfrag->page) + pfrag->offset, 1260 offset, copy, skb->len, skb) < 0) 1261 goto error_efault; 1262 1263 pfrag->offset += copy; 1264 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1265 skb_len_add(skb, copy); 1266 wmem_alloc_delta += copy; 1267 } else { 1268 err = skb_zerocopy_iter_dgram(skb, from, copy); 1269 if (err < 0) 1270 goto error; 1271 } 1272 offset += copy; 1273 length -= copy; 1274 } 1275 1276 if (wmem_alloc_delta) 1277 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1278 return 0; 1279 1280 error_efault: 1281 err = -EFAULT; 1282 error: 1283 net_zcopy_put_abort(uarg, extra_uref); 1284 cork->length -= length; 1285 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1286 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1287 if (hold_tskey) 1288 atomic_dec(&sk->sk_tskey); 1289 return err; 1290 } 1291 1292 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, 1293 struct ipcm_cookie *ipc, struct rtable **rtp) 1294 { 1295 struct ip_options_rcu *opt; 1296 struct rtable *rt; 1297 1298 rt = *rtp; 1299 if (unlikely(!rt)) 1300 return -EFAULT; 1301 1302 cork->fragsize = ip_sk_use_pmtu(sk) ? 1303 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu); 1304 1305 if (!inetdev_valid_mtu(cork->fragsize)) 1306 return -ENETUNREACH; 1307 1308 /* 1309 * setup for corking. 1310 */ 1311 opt = ipc->opt; 1312 if (opt) { 1313 if (!cork->opt) { 1314 cork->opt = kmalloc(sizeof(struct ip_options) + 40, 1315 sk->sk_allocation); 1316 if (unlikely(!cork->opt)) 1317 return -ENOBUFS; 1318 } 1319 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); 1320 cork->flags |= IPCORK_OPT; 1321 cork->addr = ipc->addr; 1322 } 1323 1324 cork->gso_size = ipc->gso_size; 1325 1326 cork->dst = &rt->dst; 1327 /* We stole this route, caller should not release it. */ 1328 *rtp = NULL; 1329 1330 cork->length = 0; 1331 cork->ttl = ipc->ttl; 1332 cork->tos = ipc->tos; 1333 cork->mark = ipc->sockc.mark; 1334 cork->priority = ipc->sockc.priority; 1335 cork->transmit_time = ipc->sockc.transmit_time; 1336 cork->tx_flags = 0; 1337 sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags); 1338 if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) { 1339 cork->flags |= IPCORK_TS_OPT_ID; 1340 cork->ts_opt_id = ipc->sockc.ts_opt_id; 1341 } 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * ip_append_data() can make one large IP datagram from many pieces of 1348 * data. Each piece will be held on the socket until 1349 * ip_push_pending_frames() is called. Each piece can be a page or 1350 * non-page data. 1351 * 1352 * Not only UDP, other transport protocols - e.g. raw sockets - can use 1353 * this interface potentially. 1354 * 1355 * LATER: length must be adjusted by pad at tail, when it is required. 1356 */ 1357 int ip_append_data(struct sock *sk, struct flowi4 *fl4, 1358 int getfrag(void *from, char *to, int offset, int len, 1359 int odd, struct sk_buff *skb), 1360 void *from, int length, int transhdrlen, 1361 struct ipcm_cookie *ipc, struct rtable **rtp, 1362 unsigned int flags) 1363 { 1364 struct inet_sock *inet = inet_sk(sk); 1365 int err; 1366 1367 if (flags&MSG_PROBE) 1368 return 0; 1369 1370 if (skb_queue_empty(&sk->sk_write_queue)) { 1371 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); 1372 if (err) 1373 return err; 1374 } else { 1375 transhdrlen = 0; 1376 } 1377 1378 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, 1379 sk_page_frag(sk), getfrag, 1380 from, length, transhdrlen, flags); 1381 } 1382 1383 static void ip_cork_release(struct inet_cork *cork) 1384 { 1385 cork->flags &= ~IPCORK_OPT; 1386 kfree(cork->opt); 1387 cork->opt = NULL; 1388 dst_release(cork->dst); 1389 cork->dst = NULL; 1390 } 1391 1392 /* 1393 * Combined all pending IP fragments on the socket as one IP datagram 1394 * and push them out. 1395 */ 1396 struct sk_buff *__ip_make_skb(struct sock *sk, 1397 struct flowi4 *fl4, 1398 struct sk_buff_head *queue, 1399 struct inet_cork *cork) 1400 { 1401 struct sk_buff *skb, *tmp_skb; 1402 struct sk_buff **tail_skb; 1403 struct inet_sock *inet = inet_sk(sk); 1404 struct net *net = sock_net(sk); 1405 struct ip_options *opt = NULL; 1406 struct rtable *rt = dst_rtable(cork->dst); 1407 struct iphdr *iph; 1408 u8 pmtudisc, ttl; 1409 __be16 df = 0; 1410 1411 skb = __skb_dequeue(queue); 1412 if (!skb) 1413 goto out; 1414 tail_skb = &(skb_shinfo(skb)->frag_list); 1415 1416 /* move skb->data to ip header from ext header */ 1417 if (skb->data < skb_network_header(skb)) 1418 __skb_pull(skb, skb_network_offset(skb)); 1419 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { 1420 __skb_pull(tmp_skb, skb_network_header_len(skb)); 1421 *tail_skb = tmp_skb; 1422 tail_skb = &(tmp_skb->next); 1423 skb->len += tmp_skb->len; 1424 skb->data_len += tmp_skb->len; 1425 skb->truesize += tmp_skb->truesize; 1426 tmp_skb->destructor = NULL; 1427 tmp_skb->sk = NULL; 1428 } 1429 1430 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow 1431 * to fragment the frame generated here. No matter, what transforms 1432 * how transforms change size of the packet, it will come out. 1433 */ 1434 skb->ignore_df = ip_sk_ignore_df(sk); 1435 1436 /* DF bit is set when we want to see DF on outgoing frames. 1437 * If ignore_df is set too, we still allow to fragment this frame 1438 * locally. */ 1439 pmtudisc = READ_ONCE(inet->pmtudisc); 1440 if (pmtudisc == IP_PMTUDISC_DO || 1441 pmtudisc == IP_PMTUDISC_PROBE || 1442 (skb->len <= dst_mtu(&rt->dst) && 1443 ip_dont_fragment(sk, &rt->dst))) 1444 df = htons(IP_DF); 1445 1446 if (cork->flags & IPCORK_OPT) 1447 opt = cork->opt; 1448 1449 if (cork->ttl != 0) 1450 ttl = cork->ttl; 1451 else if (rt->rt_type == RTN_MULTICAST) 1452 ttl = READ_ONCE(inet->mc_ttl); 1453 else 1454 ttl = ip_select_ttl(inet, &rt->dst); 1455 1456 iph = ip_hdr(skb); 1457 iph->version = 4; 1458 iph->ihl = 5; 1459 iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos); 1460 iph->frag_off = df; 1461 iph->ttl = ttl; 1462 iph->protocol = sk->sk_protocol; 1463 ip_copy_addrs(iph, fl4); 1464 ip_select_ident(net, skb, sk); 1465 1466 if (opt) { 1467 iph->ihl += opt->optlen >> 2; 1468 ip_options_build(skb, opt, cork->addr, rt); 1469 } 1470 1471 skb->priority = cork->priority; 1472 skb->mark = cork->mark; 1473 if (sk_is_tcp(sk)) 1474 skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC); 1475 else 1476 skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid); 1477 /* 1478 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec 1479 * on dst refcount 1480 */ 1481 cork->dst = NULL; 1482 skb_dst_set(skb, &rt->dst); 1483 1484 if (iph->protocol == IPPROTO_ICMP) { 1485 u8 icmp_type; 1486 1487 /* For such sockets, transhdrlen is zero when do ip_append_data(), 1488 * so icmphdr does not in skb linear region and can not get icmp_type 1489 * by icmp_hdr(skb)->type. 1490 */ 1491 if (sk->sk_type == SOCK_RAW && 1492 !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH)) 1493 icmp_type = fl4->fl4_icmp_type; 1494 else 1495 icmp_type = icmp_hdr(skb)->type; 1496 icmp_out_count(net, icmp_type); 1497 } 1498 1499 ip_cork_release(cork); 1500 out: 1501 return skb; 1502 } 1503 1504 int ip_send_skb(struct net *net, struct sk_buff *skb) 1505 { 1506 int err; 1507 1508 err = ip_local_out(net, skb->sk, skb); 1509 if (err) { 1510 if (err > 0) 1511 err = net_xmit_errno(err); 1512 if (err) 1513 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); 1514 } 1515 1516 return err; 1517 } 1518 1519 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) 1520 { 1521 struct sk_buff *skb; 1522 1523 skb = ip_finish_skb(sk, fl4); 1524 if (!skb) 1525 return 0; 1526 1527 /* Netfilter gets whole the not fragmented skb. */ 1528 return ip_send_skb(sock_net(sk), skb); 1529 } 1530 1531 /* 1532 * Throw away all pending data on the socket. 1533 */ 1534 static void __ip_flush_pending_frames(struct sock *sk, 1535 struct sk_buff_head *queue, 1536 struct inet_cork *cork) 1537 { 1538 struct sk_buff *skb; 1539 1540 while ((skb = __skb_dequeue_tail(queue)) != NULL) 1541 kfree_skb(skb); 1542 1543 ip_cork_release(cork); 1544 } 1545 1546 void ip_flush_pending_frames(struct sock *sk) 1547 { 1548 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 1549 } 1550 1551 struct sk_buff *ip_make_skb(struct sock *sk, 1552 struct flowi4 *fl4, 1553 int getfrag(void *from, char *to, int offset, 1554 int len, int odd, struct sk_buff *skb), 1555 void *from, int length, int transhdrlen, 1556 struct ipcm_cookie *ipc, struct rtable **rtp, 1557 struct inet_cork *cork, unsigned int flags) 1558 { 1559 struct sk_buff_head queue; 1560 int err; 1561 1562 if (flags & MSG_PROBE) 1563 return NULL; 1564 1565 __skb_queue_head_init(&queue); 1566 1567 cork->flags = 0; 1568 cork->addr = 0; 1569 cork->opt = NULL; 1570 err = ip_setup_cork(sk, cork, ipc, rtp); 1571 if (err) 1572 return ERR_PTR(err); 1573 1574 err = __ip_append_data(sk, fl4, &queue, cork, 1575 ¤t->task_frag, getfrag, 1576 from, length, transhdrlen, flags); 1577 if (err) { 1578 __ip_flush_pending_frames(sk, &queue, cork); 1579 return ERR_PTR(err); 1580 } 1581 1582 return __ip_make_skb(sk, fl4, &queue, cork); 1583 } 1584 1585 /* 1586 * Fetch data from kernel space and fill in checksum if needed. 1587 */ 1588 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 1589 int len, int odd, struct sk_buff *skb) 1590 { 1591 __wsum csum; 1592 1593 csum = csum_partial_copy_nocheck(dptr+offset, to, len); 1594 skb->csum = csum_block_add(skb->csum, csum, odd); 1595 return 0; 1596 } 1597 1598 /* 1599 * Generic function to send a packet as reply to another packet. 1600 * Used to send some TCP resets/acks so far. 1601 */ 1602 void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk, 1603 struct sk_buff *skb, 1604 const struct ip_options *sopt, 1605 __be32 daddr, __be32 saddr, 1606 const struct ip_reply_arg *arg, 1607 unsigned int len, u64 transmit_time, u32 txhash) 1608 { 1609 struct ip_options_data replyopts; 1610 struct ipcm_cookie ipc; 1611 struct flowi4 fl4; 1612 struct rtable *rt = skb_rtable(skb); 1613 struct net *net = sock_net(sk); 1614 struct sk_buff *nskb; 1615 int err; 1616 int oif; 1617 1618 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) 1619 return; 1620 1621 ipcm_init(&ipc); 1622 ipc.addr = daddr; 1623 ipc.sockc.transmit_time = transmit_time; 1624 1625 if (replyopts.opt.opt.optlen) { 1626 ipc.opt = &replyopts.opt; 1627 1628 if (replyopts.opt.opt.srr) 1629 daddr = replyopts.opt.opt.faddr; 1630 } 1631 1632 oif = arg->bound_dev_if; 1633 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 1634 oif = skb->skb_iif; 1635 1636 flowi4_init_output(&fl4, oif, 1637 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark, 1638 arg->tos & INET_DSCP_MASK, 1639 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, 1640 ip_reply_arg_flowi_flags(arg), 1641 daddr, saddr, 1642 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, 1643 arg->uid); 1644 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 1645 rt = ip_route_output_flow(net, &fl4, sk); 1646 if (IS_ERR(rt)) 1647 return; 1648 1649 inet_sk(sk)->tos = arg->tos; 1650 1651 sk->sk_protocol = ip_hdr(skb)->protocol; 1652 sk->sk_bound_dev_if = arg->bound_dev_if; 1653 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 1654 ipc.sockc.mark = fl4.flowi4_mark; 1655 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1656 len, 0, &ipc, &rt, MSG_DONTWAIT); 1657 if (unlikely(err)) { 1658 ip_flush_pending_frames(sk); 1659 goto out; 1660 } 1661 1662 nskb = skb_peek(&sk->sk_write_queue); 1663 if (nskb) { 1664 if (arg->csumoffset >= 0) 1665 *((__sum16 *)skb_transport_header(nskb) + 1666 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1667 arg->csum)); 1668 nskb->ip_summed = CHECKSUM_NONE; 1669 if (orig_sk) { 1670 skb_set_owner_edemux(nskb, (struct sock *)orig_sk); 1671 psp_reply_set_decrypted(nskb); 1672 } 1673 if (transmit_time) 1674 nskb->tstamp_type = SKB_CLOCK_MONOTONIC; 1675 if (txhash) 1676 skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); 1677 ip_push_pending_frames(sk, &fl4); 1678 } 1679 out: 1680 ip_rt_put(rt); 1681 } 1682 1683 void __init ip_init(void) 1684 { 1685 ip_rt_init(); 1686 inet_initpeers(); 1687 1688 #if defined(CONFIG_IP_MULTICAST) 1689 igmp_mc_init(); 1690 #endif 1691 } 1692