1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket
51da177e4SLinus Torvalds * interface as the means of communication with the user level.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * The Internet Protocol (IP) output module.
81da177e4SLinus Torvalds *
902c30a84SJesper Juhl * Authors: Ross Biro
101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds * Donald Becker, <becker@super.org>
121da177e4SLinus Torvalds * Alan Cox, <Alan.Cox@linux.org>
131da177e4SLinus Torvalds * Richard Underwood
141da177e4SLinus Torvalds * Stefan Becker, <stefanb@yello.ping.de>
151da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net>
161da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
171da177e4SLinus Torvalds * Hirokazu Takahashi, <taka@valinux.co.jp>
181da177e4SLinus Torvalds *
191da177e4SLinus Torvalds * See ip_input.c for original log
201da177e4SLinus Torvalds *
211da177e4SLinus Torvalds * Fixes:
221da177e4SLinus Torvalds * Alan Cox : Missing nonblock feature in ip_build_xmit.
231da177e4SLinus Torvalds * Mike Kilburn : htons() missing in ip_build_xmit.
241da177e4SLinus Torvalds * Bradford Johnson: Fix faulty handling of some frames when
251da177e4SLinus Torvalds * no route is found.
261da177e4SLinus Torvalds * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
271da177e4SLinus Torvalds * (in case if packet not accepted by
281da177e4SLinus Torvalds * output firewall rules)
291da177e4SLinus Torvalds * Mike McLagan : Routing by source
301da177e4SLinus Torvalds * Alexey Kuznetsov: use new route cache
311da177e4SLinus Torvalds * Andi Kleen: Fix broken PMTU recovery and remove
321da177e4SLinus Torvalds * some redundant tests.
331da177e4SLinus Torvalds * Vitaly E. Lavrov : Transparent proxy revived after year coma.
341da177e4SLinus Torvalds * Andi Kleen : Replace ip_reply with ip_send_reply.
351da177e4SLinus Torvalds * Andi Kleen : Split fast and slow ip_build_xmit path
361da177e4SLinus Torvalds * for decreased register pressure on x86
37a66e04ceSBhaskar Chowdhury * and more readability.
381da177e4SLinus Torvalds * Marc Boucher : When call_out_firewall returns FW_QUEUE,
391da177e4SLinus Torvalds * silently drop skb instead of failing with -EPERM.
401da177e4SLinus Torvalds * Detlev Wengorz : Copy protocol for fragments.
411da177e4SLinus Torvalds * Hirokazu Takahashi: HW checksumming for outgoing UDP
421da177e4SLinus Torvalds * datagrams.
431da177e4SLinus Torvalds * Hirokazu Takahashi: sendfile() on UDP works now.
441da177e4SLinus Torvalds */
451da177e4SLinus Torvalds
467c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
471da177e4SLinus Torvalds #include <linux/module.h>
481da177e4SLinus Torvalds #include <linux/types.h>
491da177e4SLinus Torvalds #include <linux/kernel.h>
501da177e4SLinus Torvalds #include <linux/mm.h>
511da177e4SLinus Torvalds #include <linux/string.h>
521da177e4SLinus Torvalds #include <linux/errno.h>
53a1f8e7f7SAl Viro #include <linux/highmem.h>
545a0e3ad6STejun Heo #include <linux/slab.h>
551da177e4SLinus Torvalds
561da177e4SLinus Torvalds #include <linux/socket.h>
571da177e4SLinus Torvalds #include <linux/sockios.h>
581da177e4SLinus Torvalds #include <linux/in.h>
591da177e4SLinus Torvalds #include <linux/inet.h>
601da177e4SLinus Torvalds #include <linux/netdevice.h>
611da177e4SLinus Torvalds #include <linux/etherdevice.h>
621da177e4SLinus Torvalds #include <linux/proc_fs.h>
631da177e4SLinus Torvalds #include <linux/stat.h>
641da177e4SLinus Torvalds #include <linux/init.h>
651da177e4SLinus Torvalds
661da177e4SLinus Torvalds #include <net/snmp.h>
671da177e4SLinus Torvalds #include <net/ip.h>
681da177e4SLinus Torvalds #include <net/protocol.h>
691da177e4SLinus Torvalds #include <net/route.h>
70cfacb057SPatrick McHardy #include <net/xfrm.h>
711da177e4SLinus Torvalds #include <linux/skbuff.h>
721da177e4SLinus Torvalds #include <net/sock.h>
731da177e4SLinus Torvalds #include <net/arp.h>
741da177e4SLinus Torvalds #include <net/icmp.h>
751da177e4SLinus Torvalds #include <net/checksum.h>
76d457a0e3SEric Dumazet #include <net/gso.h>
771da177e4SLinus Torvalds #include <net/inetpeer.h>
78ba9e04a7SWei Wang #include <net/inet_ecn.h>
7914972cbdSRoopa Prabhu #include <net/lwtunnel.h>
8013f6538dSIdo Schimmel #include <net/inet_dscp.h>
8133b48679SDaniel Mack #include <linux/bpf-cgroup.h>
821da177e4SLinus Torvalds #include <linux/igmp.h>
831da177e4SLinus Torvalds #include <linux/netfilter_ipv4.h>
841da177e4SLinus Torvalds #include <linux/netfilter_bridge.h>
851da177e4SLinus Torvalds #include <linux/netlink.h>
866cbb0df7SArnaldo Carvalho de Melo #include <linux/tcp.h>
871da177e4SLinus Torvalds
88694869b3SEric W. Biederman static int
89694869b3SEric W. Biederman ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
90c5501eb3SFlorian Westphal unsigned int mtu,
91694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *));
9249d16b23SAndy Zhou
931da177e4SLinus Torvalds /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)942fbd9679SDenis Efremov void ip_send_check(struct iphdr *iph)
951da177e4SLinus Torvalds {
961da177e4SLinus Torvalds iph->check = 0;
971da177e4SLinus Torvalds iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
981da177e4SLinus Torvalds }
994bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_send_check);
1001da177e4SLinus Torvalds
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)101cf91a99dSEric W. Biederman int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
102c439cb2eSHerbert Xu {
103c439cb2eSHerbert Xu struct iphdr *iph = ip_hdr(skb);
104c439cb2eSHerbert Xu
105b4a11b20SHeng Guo IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
106b4a11b20SHeng Guo
107b1a78b9bSXin Long iph_set_totlen(iph, skb->len);
108c439cb2eSHerbert Xu ip_send_check(iph);
109a8e3e1a9SDavid Ahern
110a8e3e1a9SDavid Ahern /* if egress device is enslaved to an L3 master device pass the
111a8e3e1a9SDavid Ahern * skb to its handler for processing
112a8e3e1a9SDavid Ahern */
113a8e3e1a9SDavid Ahern skb = l3mdev_ip_out(sk, skb);
114a8e3e1a9SDavid Ahern if (unlikely(!skb))
115a8e3e1a9SDavid Ahern return 0;
116a8e3e1a9SDavid Ahern
117f4180439SEli Cooper skb->protocol = htons(ETH_P_IP);
118f4180439SEli Cooper
11929a26a56SEric W. Biederman return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
12029a26a56SEric W. Biederman net, sk, skb, NULL, skb_dst(skb)->dev,
12113206b6bSEric W. Biederman dst_output);
1227026b1ddSDavid Miller }
1237026b1ddSDavid Miller
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)12433224b16SEric W. Biederman int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
125c439cb2eSHerbert Xu {
126c439cb2eSHerbert Xu int err;
127c439cb2eSHerbert Xu
128cf91a99dSEric W. Biederman err = __ip_local_out(net, sk, skb);
129c439cb2eSHerbert Xu if (likely(err == 1))
13013206b6bSEric W. Biederman err = dst_output(net, sk, skb);
131c439cb2eSHerbert Xu
132c439cb2eSHerbert Xu return err;
133c439cb2eSHerbert Xu }
134e2cb77dbSEric W. Biederman EXPORT_SYMBOL_GPL(ip_local_out);
135c439cb2eSHerbert Xu
ip_select_ttl(const struct inet_sock * inet,const struct dst_entry * dst)136abc17a11SEric Dumazet static inline int ip_select_ttl(const struct inet_sock *inet,
137abc17a11SEric Dumazet const struct dst_entry *dst)
1381da177e4SLinus Torvalds {
13910f42426SEric Dumazet int ttl = READ_ONCE(inet->uc_ttl);
1401da177e4SLinus Torvalds
1411da177e4SLinus Torvalds if (ttl < 0)
142323e126fSDavid S. Miller ttl = ip4_dst_hoplimit(dst);
1431da177e4SLinus Torvalds return ttl;
1441da177e4SLinus Torvalds }
1451da177e4SLinus Torvalds
1461da177e4SLinus Torvalds /*
1471da177e4SLinus Torvalds * Add an ip header to a skbuff and send it out.
1481da177e4SLinus Torvalds *
1491da177e4SLinus Torvalds */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt,u8 tos)150cfe673b0SEric Dumazet int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
151de033b7dSWei Wang __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
152de033b7dSWei Wang u8 tos)
1531da177e4SLinus Torvalds {
154abc17a11SEric Dumazet const struct inet_sock *inet = inet_sk(sk);
155511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
15677589ce0SEric W. Biederman struct net *net = sock_net(sk);
1571da177e4SLinus Torvalds struct iphdr *iph;
1581da177e4SLinus Torvalds
1591da177e4SLinus Torvalds /* Build the IP header. */
160f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
1618856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb);
162eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
1631da177e4SLinus Torvalds iph->version = 4;
1641da177e4SLinus Torvalds iph->ihl = 5;
165de033b7dSWei Wang iph->tos = tos;
166d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst);
167dd927a26SDavid S. Miller iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
168dd927a26SDavid S. Miller iph->saddr = saddr;
1691da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
170970a5a3eSEric Dumazet /* Do not bother generating IPID for small packets (eg SYNACK) */
171970a5a3eSEric Dumazet if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
172cfe673b0SEric Dumazet iph->frag_off = htons(IP_DF);
173cfe673b0SEric Dumazet iph->id = 0;
174cfe673b0SEric Dumazet } else {
175cfe673b0SEric Dumazet iph->frag_off = 0;
176970a5a3eSEric Dumazet /* TCP packets here are SYNACK with fat IPv4/TCP options.
177970a5a3eSEric Dumazet * Avoid using the hashed IP ident generator.
178970a5a3eSEric Dumazet */
179970a5a3eSEric Dumazet if (sk->sk_protocol == IPPROTO_TCP)
1807e3cf084SJason A. Donenfeld iph->id = (__force __be16)get_random_u16();
181970a5a3eSEric Dumazet else
18277589ce0SEric W. Biederman __ip_select_ident(net, iph, 1);
183cfe673b0SEric Dumazet }
1841da177e4SLinus Torvalds
185f6d8bd05SEric Dumazet if (opt && opt->opt.optlen) {
186f6d8bd05SEric Dumazet iph->ihl += opt->opt.optlen>>2;
1874f0e3040SJakub Kicinski ip_options_build(skb, &opt->opt, daddr, rt);
1881da177e4SLinus Torvalds }
1891da177e4SLinus Torvalds
1908bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority);
191e05a90ecSJamal Hadi Salim if (!skb->mark)
1923c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark);
1931da177e4SLinus Torvalds
1941da177e4SLinus Torvalds /* Send it out. */
19533224b16SEric W. Biederman return ip_local_out(net, skb->sk, skb);
1961da177e4SLinus Torvalds }
197d8c97a94SArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
198d8c97a94SArnaldo Carvalho de Melo
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)199694869b3SEric W. Biederman static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
2001da177e4SLinus Torvalds {
201adf30907SEric Dumazet struct dst_entry *dst = skb_dst(skb);
20205d6d492SEric Dumazet struct rtable *rt = dst_rtable(dst);
2031da177e4SLinus Torvalds struct net_device *dev = dst->dev;
204c2636b4dSChuck Lever unsigned int hh_len = LL_RESERVED_SPACE(dev);
205f6b72b62SDavid S. Miller struct neighbour *neigh;
2065c9f7c1dSDavid Ahern bool is_v6gw = false;
2071da177e4SLinus Torvalds
208edf391ffSNeil Horman if (rt->rt_type == RTN_MULTICAST) {
2094ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
210edf391ffSNeil Horman } else if (rt->rt_type == RTN_BROADCAST)
2114ba1bf42SEric W. Biederman IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
21280787ebcSMitsuru Chinen
213e4da8c78SHeng Guo /* OUTOCTETS should be counted after fragment */
214e4da8c78SHeng Guo IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
215e4da8c78SHeng Guo
2163b04dddeSStephen Hemminger if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2175678a595SVasily Averin skb = skb_expand_head(skb, hh_len);
2185678a595SVasily Averin if (!skb)
2191da177e4SLinus Torvalds return -ENOMEM;
2201da177e4SLinus Torvalds }
2211da177e4SLinus Torvalds
22214972cbdSRoopa Prabhu if (lwtunnel_xmit_redirect(dst->lwtstate)) {
22314972cbdSRoopa Prabhu int res = lwtunnel_xmit(skb);
22414972cbdSRoopa Prabhu
225a171fbecSYan Zhai if (res != LWTUNNEL_XMIT_CONTINUE)
22614972cbdSRoopa Prabhu return res;
22714972cbdSRoopa Prabhu }
22814972cbdSRoopa Prabhu
22909eed119SEric Dumazet rcu_read_lock();
2305c9f7c1dSDavid Ahern neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2319871f1adSVasiliy Kulikov if (!IS_ERR(neigh)) {
2324ff06203SJulian Anastasov int res;
2334ff06203SJulian Anastasov
2344ff06203SJulian Anastasov sock_confirm_neigh(skb, neigh);
2355c9f7c1dSDavid Ahern /* if crossing protocols, can not use the cached header */
2365c9f7c1dSDavid Ahern res = neigh_output(neigh, skb, is_v6gw);
23709eed119SEric Dumazet rcu_read_unlock();
238f2c31e32SEric Dumazet return res;
239f2c31e32SEric Dumazet }
24009eed119SEric Dumazet rcu_read_unlock();
24105e3aa09SDavid S. Miller
242e87cc472SJoe Perches net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
243e87cc472SJoe Perches __func__);
2445e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
245c67180efSxu xin return PTR_ERR(neigh);
2461da177e4SLinus Torvalds }
2471da177e4SLinus Torvalds
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)248694869b3SEric W. Biederman static int ip_finish_output_gso(struct net *net, struct sock *sk,
249694869b3SEric W. Biederman struct sk_buff *skb, unsigned int mtu)
250c7ba65d7SFlorian Westphal {
25188bebdf5SJason A. Donenfeld struct sk_buff *segs, *nskb;
252c7ba65d7SFlorian Westphal netdev_features_t features;
253c7ba65d7SFlorian Westphal int ret = 0;
254c7ba65d7SFlorian Westphal
2559ee6c5dcSLance Richardson /* common case: seglen is <= mtu
256359ebda2SShmulik Ladkani */
257779b7931SDaniel Axtens if (skb_gso_validate_network_len(skb, mtu))
258694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb);
259c7ba65d7SFlorian Westphal
2600ace81ecSLance Richardson /* Slowpath - GSO segment length exceeds the egress MTU.
261c7ba65d7SFlorian Westphal *
2620ace81ecSLance Richardson * This can happen in several cases:
2630ace81ecSLance Richardson * - Forwarding of a TCP GRO skb, when DF flag is not set.
2640ace81ecSLance Richardson * - Forwarding of an skb that arrived on a virtualization interface
2650ace81ecSLance Richardson * (virtio-net/vhost/tap) with TSO/GSO size set by other network
2660ace81ecSLance Richardson * stack.
2670ace81ecSLance Richardson * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
2680ace81ecSLance Richardson * interface with a smaller MTU.
2690ace81ecSLance Richardson * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
2700ace81ecSLance Richardson * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
271a66e04ceSBhaskar Chowdhury * insufficient MTU.
272c7ba65d7SFlorian Westphal */
273c7ba65d7SFlorian Westphal features = netif_skb_features(skb);
274a08e7fd9SCambda Zhu BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
275c7ba65d7SFlorian Westphal segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
276330966e5SFlorian Westphal if (IS_ERR_OR_NULL(segs)) {
277c7ba65d7SFlorian Westphal kfree_skb(skb);
278c7ba65d7SFlorian Westphal return -ENOMEM;
279c7ba65d7SFlorian Westphal }
280c7ba65d7SFlorian Westphal
281c7ba65d7SFlorian Westphal consume_skb(skb);
282c7ba65d7SFlorian Westphal
28388bebdf5SJason A. Donenfeld skb_list_walk_safe(segs, segs, nskb) {
284c7ba65d7SFlorian Westphal int err;
285c7ba65d7SFlorian Westphal
286a8305bffSDavid S. Miller skb_mark_not_on_list(segs);
287694869b3SEric W. Biederman err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
288c7ba65d7SFlorian Westphal
289c7ba65d7SFlorian Westphal if (err && ret == 0)
290c7ba65d7SFlorian Westphal ret = err;
29188bebdf5SJason A. Donenfeld }
292c7ba65d7SFlorian Westphal
293c7ba65d7SFlorian Westphal return ret;
294c7ba65d7SFlorian Westphal }
295c7ba65d7SFlorian Westphal
__ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)296956fe219Sbrakmo static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2971da177e4SLinus Torvalds {
298c5501eb3SFlorian Westphal unsigned int mtu;
299c5501eb3SFlorian Westphal
3005c901daaSPatrick McHardy #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
3015c901daaSPatrick McHardy /* Policy lookup after SNAT yielded a new policy */
30200db4124SIan Morris if (skb_dst(skb)->xfrm) {
30348d5cad8SPatrick McHardy IPCB(skb)->flags |= IPSKB_REROUTED;
30413206b6bSEric W. Biederman return dst_output(net, sk, skb);
30548d5cad8SPatrick McHardy }
3065c901daaSPatrick McHardy #endif
307fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb);
308c7ba65d7SFlorian Westphal if (skb_is_gso(skb))
309694869b3SEric W. Biederman return ip_finish_output_gso(net, sk, skb, mtu);
310c7ba65d7SFlorian Westphal
311bb4cc1a1SFlorian Westphal if (skb->len > mtu || IPCB(skb)->frag_max_size)
312694869b3SEric W. Biederman return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
313c7ba65d7SFlorian Westphal
314694869b3SEric W. Biederman return ip_finish_output2(net, sk, skb);
3151da177e4SLinus Torvalds }
3161da177e4SLinus Torvalds
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)317956fe219Sbrakmo static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
318956fe219Sbrakmo {
319956fe219Sbrakmo int ret;
320956fe219Sbrakmo
321956fe219Sbrakmo ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
322956fe219Sbrakmo switch (ret) {
323956fe219Sbrakmo case NET_XMIT_SUCCESS:
324956fe219Sbrakmo return __ip_finish_output(net, sk, skb);
325956fe219Sbrakmo case NET_XMIT_CN:
326956fe219Sbrakmo return __ip_finish_output(net, sk, skb) ? : ret;
327956fe219Sbrakmo default:
3285e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
329956fe219Sbrakmo return ret;
330956fe219Sbrakmo }
331956fe219Sbrakmo }
332956fe219Sbrakmo
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)33333b48679SDaniel Mack static int ip_mc_finish_output(struct net *net, struct sock *sk,
33433b48679SDaniel Mack struct sk_buff *skb)
33533b48679SDaniel Mack {
3365b18f128SStephen Suryaputra struct rtable *new_rt;
337d96ff269SDavid S. Miller bool do_cn = false;
338d96ff269SDavid S. Miller int ret, err;
33933b48679SDaniel Mack
34033b48679SDaniel Mack ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
341956fe219Sbrakmo switch (ret) {
342956fe219Sbrakmo case NET_XMIT_CN:
343d96ff269SDavid S. Miller do_cn = true;
344a8eceea8SJoe Perches fallthrough;
345d96ff269SDavid S. Miller case NET_XMIT_SUCCESS:
346d96ff269SDavid S. Miller break;
347956fe219Sbrakmo default:
3485e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
34933b48679SDaniel Mack return ret;
35033b48679SDaniel Mack }
35133b48679SDaniel Mack
3525b18f128SStephen Suryaputra /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
3535b18f128SStephen Suryaputra * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
3545b18f128SStephen Suryaputra * see ipv4_pktinfo_prepare().
3555b18f128SStephen Suryaputra */
3565b18f128SStephen Suryaputra new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
3575b18f128SStephen Suryaputra if (new_rt) {
3585b18f128SStephen Suryaputra new_rt->rt_iif = 0;
3595b18f128SStephen Suryaputra skb_dst_drop(skb);
3605b18f128SStephen Suryaputra skb_dst_set(skb, &new_rt->dst);
3615b18f128SStephen Suryaputra }
3625b18f128SStephen Suryaputra
363d96ff269SDavid S. Miller err = dev_loopback_xmit(net, sk, skb);
364d96ff269SDavid S. Miller return (do_cn && err) ? ret : err;
36533b48679SDaniel Mack }
36633b48679SDaniel Mack
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)367ede2059dSEric W. Biederman int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
3681da177e4SLinus Torvalds {
369511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
370d8d1f30bSChangli Gao struct net_device *dev = rt->dst.dev;
3711da177e4SLinus Torvalds
3721da177e4SLinus Torvalds /*
3731da177e4SLinus Torvalds * If the indicated interface is up and running, send the packet.
3741da177e4SLinus Torvalds */
3751da177e4SLinus Torvalds skb->dev = dev;
3761da177e4SLinus Torvalds skb->protocol = htons(ETH_P_IP);
3771da177e4SLinus Torvalds
3781da177e4SLinus Torvalds /*
3791da177e4SLinus Torvalds * Multicasts are looped back for other local users
3801da177e4SLinus Torvalds */
3811da177e4SLinus Torvalds
3821da177e4SLinus Torvalds if (rt->rt_flags&RTCF_MULTICAST) {
3837ad6848cSOctavian Purdila if (sk_mc_loop(sk)
3841da177e4SLinus Torvalds #ifdef CONFIG_IP_MROUTE
3851da177e4SLinus Torvalds /* Small optimization: do not loopback not local frames,
3861da177e4SLinus Torvalds which returned after forwarding; they will be dropped
3871da177e4SLinus Torvalds by ip_mr_input in any case.
3881da177e4SLinus Torvalds Note, that local frames are looped back to be delivered
3891da177e4SLinus Torvalds to local recipients.
3901da177e4SLinus Torvalds
3911da177e4SLinus Torvalds This check is duplicated in ip_mr_input at the moment.
3921da177e4SLinus Torvalds */
3939d4fb27dSJoe Perches &&
3949d4fb27dSJoe Perches ((rt->rt_flags & RTCF_LOCAL) ||
3959d4fb27dSJoe Perches !(IPCB(skb)->flags & IPSKB_FORWARDED))
3961da177e4SLinus Torvalds #endif
3971da177e4SLinus Torvalds ) {
3981da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3991da177e4SLinus Torvalds if (newskb)
4009bbc768aSJan Engelhardt NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
40129a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev,
40233b48679SDaniel Mack ip_mc_finish_output);
4031da177e4SLinus Torvalds }
4041da177e4SLinus Torvalds
4051da177e4SLinus Torvalds /* Multicasts with ttl 0 must not go beyond the host */
4061da177e4SLinus Torvalds
407eddc9ec5SArnaldo Carvalho de Melo if (ip_hdr(skb)->ttl == 0) {
4081da177e4SLinus Torvalds kfree_skb(skb);
4091da177e4SLinus Torvalds return 0;
4101da177e4SLinus Torvalds }
4111da177e4SLinus Torvalds }
4121da177e4SLinus Torvalds
4131da177e4SLinus Torvalds if (rt->rt_flags&RTCF_BROADCAST) {
4141da177e4SLinus Torvalds struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
4151da177e4SLinus Torvalds if (newskb)
41629a26a56SEric W. Biederman NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
41729a26a56SEric W. Biederman net, sk, newskb, NULL, newskb->dev,
41833b48679SDaniel Mack ip_mc_finish_output);
4191da177e4SLinus Torvalds }
4201da177e4SLinus Torvalds
42129a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
42229a26a56SEric W. Biederman net, sk, skb, NULL, skb->dev,
42329a26a56SEric W. Biederman ip_finish_output,
42448d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED));
4251da177e4SLinus Torvalds }
4261da177e4SLinus Torvalds
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)427ede2059dSEric W. Biederman int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
4281da177e4SLinus Torvalds {
42928f8bfd1SPhil Sutter struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
4301bd9bef6SPatrick McHardy
4311bd9bef6SPatrick McHardy skb->dev = dev;
4321bd9bef6SPatrick McHardy skb->protocol = htons(ETH_P_IP);
4331bd9bef6SPatrick McHardy
43429a26a56SEric W. Biederman return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
43528f8bfd1SPhil Sutter net, sk, skb, indev, dev,
43648d5cad8SPatrick McHardy ip_finish_output,
43748d5cad8SPatrick McHardy !(IPCB(skb)->flags & IPSKB_REROUTED));
4381da177e4SLinus Torvalds }
4396585d7dcSBrian Vazquez EXPORT_SYMBOL(ip_output);
4401da177e4SLinus Torvalds
44184f9307cSEric Dumazet /*
44284f9307cSEric Dumazet * copy saddr and daddr, possibly using 64bit load/stores
44384f9307cSEric Dumazet * Equivalent to :
44484f9307cSEric Dumazet * iph->saddr = fl4->saddr;
44584f9307cSEric Dumazet * iph->daddr = fl4->daddr;
44684f9307cSEric Dumazet */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)44784f9307cSEric Dumazet static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
44884f9307cSEric Dumazet {
44984f9307cSEric Dumazet BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
45084f9307cSEric Dumazet offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
4516321c7acSGustavo A. R. Silva
4526321c7acSGustavo A. R. Silva iph->saddr = fl4->saddr;
4536321c7acSGustavo A. R. Silva iph->daddr = fl4->daddr;
45484f9307cSEric Dumazet }
45584f9307cSEric Dumazet
456b0270e91SEric Dumazet /* Note: skb->sk can be different from sk, in case of tunnels */
__ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl,__u8 tos)45769b9e1e0SXin Long int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
45869b9e1e0SXin Long __u8 tos)
4591da177e4SLinus Torvalds {
4601da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
46177589ce0SEric W. Biederman struct net *net = sock_net(sk);
462f6d8bd05SEric Dumazet struct ip_options_rcu *inet_opt;
463b57ae01aSDavid S. Miller struct flowi4 *fl4;
4641da177e4SLinus Torvalds struct rtable *rt;
4651da177e4SLinus Torvalds struct iphdr *iph;
466ab6e3febSEric Dumazet int res;
4671da177e4SLinus Torvalds
4681da177e4SLinus Torvalds /* Skip all of this if the packet is already routed,
4691da177e4SLinus Torvalds * f.e. by something like SCTP.
4701da177e4SLinus Torvalds */
471ab6e3febSEric Dumazet rcu_read_lock();
472f6d8bd05SEric Dumazet inet_opt = rcu_dereference(inet->inet_opt);
473ea4fc0d6SDavid S. Miller fl4 = &fl->u.ip4;
474511c3f92SEric Dumazet rt = skb_rtable(skb);
47500db4124SIan Morris if (rt)
4761da177e4SLinus Torvalds goto packet_routed;
4771da177e4SLinus Torvalds
4781da177e4SLinus Torvalds /* Make sure we can route this packet. */
47905d6d492SEric Dumazet rt = dst_rtable(__sk_dst_check(sk, 0));
48051456b29SIan Morris if (!rt) {
4813ca3c68eSAl Viro __be32 daddr;
4821da177e4SLinus Torvalds
4831da177e4SLinus Torvalds /* Use correct destination address if we have options. */
484c720c7e8SEric Dumazet daddr = inet->inet_daddr;
485f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.srr)
486f6d8bd05SEric Dumazet daddr = inet_opt->opt.faddr;
4871da177e4SLinus Torvalds
4881da177e4SLinus Torvalds /* If this fails, retransmit mechanism of transport layer will
4891da177e4SLinus Torvalds * keep trying until route appears or the connection times
4901da177e4SLinus Torvalds * itself out.
4911da177e4SLinus Torvalds */
49277589ce0SEric W. Biederman rt = ip_route_output_ports(net, fl4, sk,
49378fbfd8aSDavid S. Miller daddr, inet->inet_saddr,
49478fbfd8aSDavid S. Miller inet->inet_dport,
49578fbfd8aSDavid S. Miller inet->inet_sport,
49678fbfd8aSDavid S. Miller sk->sk_protocol,
497*71f1fea4SIdo Schimmel tos & INET_DSCP_MASK,
49878fbfd8aSDavid S. Miller sk->sk_bound_dev_if);
499b23dd4feSDavid S. Miller if (IS_ERR(rt))
5001da177e4SLinus Torvalds goto no_route;
501d8d1f30bSChangli Gao sk_setup_caps(sk, &rt->dst);
5021da177e4SLinus Torvalds }
503d8d1f30bSChangli Gao skb_dst_set_noref(skb, &rt->dst);
5041da177e4SLinus Torvalds
5051da177e4SLinus Torvalds packet_routed:
50677d5bc7eSDavid Ahern if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
5071da177e4SLinus Torvalds goto no_route;
5081da177e4SLinus Torvalds
5091da177e4SLinus Torvalds /* OK, we know where to send it, allocate and build IP header. */
510f6d8bd05SEric Dumazet skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
5118856dfa3SArnaldo Carvalho de Melo skb_reset_network_header(skb);
512eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
51369b9e1e0SXin Long *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
51460ff7467SWANG Cong if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
5151da177e4SLinus Torvalds iph->frag_off = htons(IP_DF);
5161da177e4SLinus Torvalds else
5171da177e4SLinus Torvalds iph->frag_off = 0;
518d8d1f30bSChangli Gao iph->ttl = ip_select_ttl(inet, &rt->dst);
5191da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
52084f9307cSEric Dumazet ip_copy_addrs(iph, fl4);
52184f9307cSEric Dumazet
5221da177e4SLinus Torvalds /* Transport layer set skb->h.foo itself. */
5231da177e4SLinus Torvalds
524f6d8bd05SEric Dumazet if (inet_opt && inet_opt->opt.optlen) {
525f6d8bd05SEric Dumazet iph->ihl += inet_opt->opt.optlen >> 2;
5264f0e3040SJakub Kicinski ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
5271da177e4SLinus Torvalds }
5281da177e4SLinus Torvalds
52977589ce0SEric W. Biederman ip_select_ident_segs(net, skb, sk,
530b6a7719aSHannes Frederic Sowa skb_shinfo(skb)->gso_segs ?: 1);
5311da177e4SLinus Torvalds
532b0270e91SEric Dumazet /* TODO : should we use skb->sk here instead of sk ? */
5338bf43be7SEric Dumazet skb->priority = READ_ONCE(sk->sk_priority);
5343c5b4d69SEric Dumazet skb->mark = READ_ONCE(sk->sk_mark);
5351da177e4SLinus Torvalds
53633224b16SEric W. Biederman res = ip_local_out(net, sk, skb);
537ab6e3febSEric Dumazet rcu_read_unlock();
538ab6e3febSEric Dumazet return res;
5391da177e4SLinus Torvalds
5401da177e4SLinus Torvalds no_route:
541ab6e3febSEric Dumazet rcu_read_unlock();
54277589ce0SEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
5435e187189SMenglong Dong kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
5441da177e4SLinus Torvalds return -EHOSTUNREACH;
5451da177e4SLinus Torvalds }
54669b9e1e0SXin Long EXPORT_SYMBOL(__ip_queue_xmit);
5471da177e4SLinus Torvalds
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)54805e22e83SEric Dumazet int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
54905e22e83SEric Dumazet {
550e08d0b3dSEric Dumazet return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
55105e22e83SEric Dumazet }
55205e22e83SEric Dumazet EXPORT_SYMBOL(ip_queue_xmit);
55305e22e83SEric Dumazet
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)5541da177e4SLinus Torvalds static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
5551da177e4SLinus Torvalds {
5561da177e4SLinus Torvalds to->pkt_type = from->pkt_type;
5571da177e4SLinus Torvalds to->priority = from->priority;
5581da177e4SLinus Torvalds to->protocol = from->protocol;
559d2f0c961SShmulik Ladkani to->skb_iif = from->skb_iif;
560adf30907SEric Dumazet skb_dst_drop(to);
561fe76cda3SEric Dumazet skb_dst_copy(to, from);
5621da177e4SLinus Torvalds to->dev = from->dev;
56382e91ffeSThomas Graf to->mark = from->mark;
5641da177e4SLinus Torvalds
5653dd1c9a1SPaolo Abeni skb_copy_hash(to, from);
5663dd1c9a1SPaolo Abeni
5671da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED
5681da177e4SLinus Torvalds to->tc_index = from->tc_index;
5691da177e4SLinus Torvalds #endif
570e7ac05f3SYasuyuki Kozakai nf_copy(to, from);
571df5042f4SFlorian Westphal skb_ext_copy(to, from);
5726ca40d4eSJavier Martinez Canillas #if IS_ENABLED(CONFIG_IP_VS)
573c98d80edSJulian Anastasov to->ipvs_property = from->ipvs_property;
574c98d80edSJulian Anastasov #endif
575984bc16cSJames Morris skb_copy_secmark(to, from);
5761da177e4SLinus Torvalds }
5771da177e4SLinus Torvalds
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))578694869b3SEric W. Biederman static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
579c5501eb3SFlorian Westphal unsigned int mtu,
580694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *))
58149d16b23SAndy Zhou {
58249d16b23SAndy Zhou struct iphdr *iph = ip_hdr(skb);
58349d16b23SAndy Zhou
584d6b915e2SFlorian Westphal if ((iph->frag_off & htons(IP_DF)) == 0)
585694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output);
586d6b915e2SFlorian Westphal
587d6b915e2SFlorian Westphal if (unlikely(!skb->ignore_df ||
58849d16b23SAndy Zhou (IPCB(skb)->frag_max_size &&
58949d16b23SAndy Zhou IPCB(skb)->frag_max_size > mtu))) {
5909479b0afSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
59149d16b23SAndy Zhou icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
59249d16b23SAndy Zhou htonl(mtu));
59349d16b23SAndy Zhou kfree_skb(skb);
59449d16b23SAndy Zhou return -EMSGSIZE;
59549d16b23SAndy Zhou }
59649d16b23SAndy Zhou
597694869b3SEric W. Biederman return ip_do_fragment(net, sk, skb, output);
59849d16b23SAndy Zhou }
59949d16b23SAndy Zhou
ip_fraglist_init(struct sk_buff * skb,struct iphdr * iph,unsigned int hlen,struct ip_fraglist_iter * iter)600c8b17be0SPablo Neira Ayuso void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
601c8b17be0SPablo Neira Ayuso unsigned int hlen, struct ip_fraglist_iter *iter)
602c8b17be0SPablo Neira Ayuso {
603c8b17be0SPablo Neira Ayuso unsigned int first_len = skb_pagelen(skb);
604c8b17be0SPablo Neira Ayuso
605b7034146SEric Dumazet iter->frag = skb_shinfo(skb)->frag_list;
606c8b17be0SPablo Neira Ayuso skb_frag_list_init(skb);
607c8b17be0SPablo Neira Ayuso
608c8b17be0SPablo Neira Ayuso iter->offset = 0;
609c8b17be0SPablo Neira Ayuso iter->iph = iph;
610c8b17be0SPablo Neira Ayuso iter->hlen = hlen;
611c8b17be0SPablo Neira Ayuso
612c8b17be0SPablo Neira Ayuso skb->data_len = first_len - skb_headlen(skb);
613c8b17be0SPablo Neira Ayuso skb->len = first_len;
614c8b17be0SPablo Neira Ayuso iph->tot_len = htons(first_len);
615c8b17be0SPablo Neira Ayuso iph->frag_off = htons(IP_MF);
616c8b17be0SPablo Neira Ayuso ip_send_check(iph);
617c8b17be0SPablo Neira Ayuso }
618c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_init);
619c8b17be0SPablo Neira Ayuso
ip_fraglist_prepare(struct sk_buff * skb,struct ip_fraglist_iter * iter)620c8b17be0SPablo Neira Ayuso void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
621c8b17be0SPablo Neira Ayuso {
622c8b17be0SPablo Neira Ayuso unsigned int hlen = iter->hlen;
623c8b17be0SPablo Neira Ayuso struct iphdr *iph = iter->iph;
624c8b17be0SPablo Neira Ayuso struct sk_buff *frag;
625c8b17be0SPablo Neira Ayuso
626c8b17be0SPablo Neira Ayuso frag = iter->frag;
627c8b17be0SPablo Neira Ayuso frag->ip_summed = CHECKSUM_NONE;
628c8b17be0SPablo Neira Ayuso skb_reset_transport_header(frag);
629c8b17be0SPablo Neira Ayuso __skb_push(frag, hlen);
630c8b17be0SPablo Neira Ayuso skb_reset_network_header(frag);
631c8b17be0SPablo Neira Ayuso memcpy(skb_network_header(frag), iph, hlen);
632c8b17be0SPablo Neira Ayuso iter->iph = ip_hdr(frag);
633c8b17be0SPablo Neira Ayuso iph = iter->iph;
634c8b17be0SPablo Neira Ayuso iph->tot_len = htons(frag->len);
635c8b17be0SPablo Neira Ayuso ip_copy_metadata(frag, skb);
636c8b17be0SPablo Neira Ayuso iter->offset += skb->len - hlen;
637c8b17be0SPablo Neira Ayuso iph->frag_off = htons(iter->offset >> 3);
638c8b17be0SPablo Neira Ayuso if (frag->next)
639c8b17be0SPablo Neira Ayuso iph->frag_off |= htons(IP_MF);
640c8b17be0SPablo Neira Ayuso /* Ready, complete checksum */
641c8b17be0SPablo Neira Ayuso ip_send_check(iph);
642c8b17be0SPablo Neira Ayuso }
643c8b17be0SPablo Neira Ayuso EXPORT_SYMBOL(ip_fraglist_prepare);
644c8b17be0SPablo Neira Ayuso
ip_frag_init(struct sk_buff * skb,unsigned int hlen,unsigned int ll_rs,unsigned int mtu,bool DF,struct ip_frag_state * state)645065ff79fSPablo Neira Ayuso void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
646e7a409c3SEric Dumazet unsigned int ll_rs, unsigned int mtu, bool DF,
647065ff79fSPablo Neira Ayuso struct ip_frag_state *state)
648065ff79fSPablo Neira Ayuso {
649065ff79fSPablo Neira Ayuso struct iphdr *iph = ip_hdr(skb);
650065ff79fSPablo Neira Ayuso
651e7a409c3SEric Dumazet state->DF = DF;
652065ff79fSPablo Neira Ayuso state->hlen = hlen;
653065ff79fSPablo Neira Ayuso state->ll_rs = ll_rs;
654065ff79fSPablo Neira Ayuso state->mtu = mtu;
655065ff79fSPablo Neira Ayuso
656065ff79fSPablo Neira Ayuso state->left = skb->len - hlen; /* Space per frame */
657065ff79fSPablo Neira Ayuso state->ptr = hlen; /* Where to start from */
658065ff79fSPablo Neira Ayuso
659065ff79fSPablo Neira Ayuso state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
660065ff79fSPablo Neira Ayuso state->not_last_frag = iph->frag_off & htons(IP_MF);
661065ff79fSPablo Neira Ayuso }
662065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_init);
663065ff79fSPablo Neira Ayuso
ip_frag_ipcb(struct sk_buff * from,struct sk_buff * to,bool first_frag)66419c3401aSPablo Neira Ayuso static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
665faf482caSYajun Deng bool first_frag)
66619c3401aSPablo Neira Ayuso {
66719c3401aSPablo Neira Ayuso /* Copy the flags to each fragment. */
66819c3401aSPablo Neira Ayuso IPCB(to)->flags = IPCB(from)->flags;
66919c3401aSPablo Neira Ayuso
67019c3401aSPablo Neira Ayuso /* ANK: dirty, but effective trick. Upgrade options only if
67119c3401aSPablo Neira Ayuso * the segment to be fragmented was THE FIRST (otherwise,
67219c3401aSPablo Neira Ayuso * options are already fixed) and make it ONCE
67319c3401aSPablo Neira Ayuso * on the initial skb, so that all the following fragments
67419c3401aSPablo Neira Ayuso * will inherit fixed options.
67519c3401aSPablo Neira Ayuso */
67619c3401aSPablo Neira Ayuso if (first_frag)
67719c3401aSPablo Neira Ayuso ip_options_fragment(from);
67819c3401aSPablo Neira Ayuso }
67919c3401aSPablo Neira Ayuso
ip_frag_next(struct sk_buff * skb,struct ip_frag_state * state)680065ff79fSPablo Neira Ayuso struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
681065ff79fSPablo Neira Ayuso {
682065ff79fSPablo Neira Ayuso unsigned int len = state->left;
683065ff79fSPablo Neira Ayuso struct sk_buff *skb2;
684065ff79fSPablo Neira Ayuso struct iphdr *iph;
685065ff79fSPablo Neira Ayuso
686065ff79fSPablo Neira Ayuso /* IF: it doesn't fit, use 'mtu' - the data space left */
687065ff79fSPablo Neira Ayuso if (len > state->mtu)
688065ff79fSPablo Neira Ayuso len = state->mtu;
689065ff79fSPablo Neira Ayuso /* IF: we are not sending up to and including the packet end
690065ff79fSPablo Neira Ayuso then align the next start on an eight byte boundary */
691065ff79fSPablo Neira Ayuso if (len < state->left) {
692065ff79fSPablo Neira Ayuso len &= ~7;
693065ff79fSPablo Neira Ayuso }
694065ff79fSPablo Neira Ayuso
695065ff79fSPablo Neira Ayuso /* Allocate buffer */
696065ff79fSPablo Neira Ayuso skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
697065ff79fSPablo Neira Ayuso if (!skb2)
698065ff79fSPablo Neira Ayuso return ERR_PTR(-ENOMEM);
699065ff79fSPablo Neira Ayuso
700065ff79fSPablo Neira Ayuso /*
701065ff79fSPablo Neira Ayuso * Set up data on packet
702065ff79fSPablo Neira Ayuso */
703065ff79fSPablo Neira Ayuso
704065ff79fSPablo Neira Ayuso ip_copy_metadata(skb2, skb);
705065ff79fSPablo Neira Ayuso skb_reserve(skb2, state->ll_rs);
706065ff79fSPablo Neira Ayuso skb_put(skb2, len + state->hlen);
707065ff79fSPablo Neira Ayuso skb_reset_network_header(skb2);
708065ff79fSPablo Neira Ayuso skb2->transport_header = skb2->network_header + state->hlen;
709065ff79fSPablo Neira Ayuso
710065ff79fSPablo Neira Ayuso /*
711065ff79fSPablo Neira Ayuso * Charge the memory for the fragment to any owner
712065ff79fSPablo Neira Ayuso * it might possess
713065ff79fSPablo Neira Ayuso */
714065ff79fSPablo Neira Ayuso
715065ff79fSPablo Neira Ayuso if (skb->sk)
716065ff79fSPablo Neira Ayuso skb_set_owner_w(skb2, skb->sk);
717065ff79fSPablo Neira Ayuso
718065ff79fSPablo Neira Ayuso /*
719065ff79fSPablo Neira Ayuso * Copy the packet header into the new buffer.
720065ff79fSPablo Neira Ayuso */
721065ff79fSPablo Neira Ayuso
722065ff79fSPablo Neira Ayuso skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
723065ff79fSPablo Neira Ayuso
724065ff79fSPablo Neira Ayuso /*
725065ff79fSPablo Neira Ayuso * Copy a block of the IP datagram.
726065ff79fSPablo Neira Ayuso */
727065ff79fSPablo Neira Ayuso if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
728065ff79fSPablo Neira Ayuso BUG();
729065ff79fSPablo Neira Ayuso state->left -= len;
730065ff79fSPablo Neira Ayuso
731065ff79fSPablo Neira Ayuso /*
732065ff79fSPablo Neira Ayuso * Fill in the new header fields.
733065ff79fSPablo Neira Ayuso */
734065ff79fSPablo Neira Ayuso iph = ip_hdr(skb2);
735065ff79fSPablo Neira Ayuso iph->frag_off = htons((state->offset >> 3));
736e7a409c3SEric Dumazet if (state->DF)
737e7a409c3SEric Dumazet iph->frag_off |= htons(IP_DF);
738065ff79fSPablo Neira Ayuso
739065ff79fSPablo Neira Ayuso /*
740065ff79fSPablo Neira Ayuso * Added AC : If we are fragmenting a fragment that's not the
741065ff79fSPablo Neira Ayuso * last fragment then keep MF on each bit
742065ff79fSPablo Neira Ayuso */
743065ff79fSPablo Neira Ayuso if (state->left > 0 || state->not_last_frag)
744065ff79fSPablo Neira Ayuso iph->frag_off |= htons(IP_MF);
745065ff79fSPablo Neira Ayuso state->ptr += len;
746065ff79fSPablo Neira Ayuso state->offset += len;
747065ff79fSPablo Neira Ayuso
748065ff79fSPablo Neira Ayuso iph->tot_len = htons(len + state->hlen);
749065ff79fSPablo Neira Ayuso
750065ff79fSPablo Neira Ayuso ip_send_check(iph);
751065ff79fSPablo Neira Ayuso
752065ff79fSPablo Neira Ayuso return skb2;
753065ff79fSPablo Neira Ayuso }
754065ff79fSPablo Neira Ayuso EXPORT_SYMBOL(ip_frag_next);
755065ff79fSPablo Neira Ayuso
7561da177e4SLinus Torvalds /*
7571da177e4SLinus Torvalds * This IP datagram is too large to be sent in one piece. Break it up into
7581da177e4SLinus Torvalds * smaller pieces (each of size equal to IP header plus
7591da177e4SLinus Torvalds * a block of the data of the original IP data part) that will yet fit in a
7601da177e4SLinus Torvalds * single device frame, and queue such a frame for sending.
7611da177e4SLinus Torvalds */
7621da177e4SLinus Torvalds
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))763694869b3SEric W. Biederman int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
764694869b3SEric W. Biederman int (*output)(struct net *, struct sock *, struct sk_buff *))
7651da177e4SLinus Torvalds {
7661da177e4SLinus Torvalds struct iphdr *iph;
7671da177e4SLinus Torvalds struct sk_buff *skb2;
7684d25ca2dSAbhishek Chauhan u8 tstamp_type = skb->tstamp_type;
769511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
770065ff79fSPablo Neira Ayuso unsigned int mtu, hlen, ll_rs;
771c8b17be0SPablo Neira Ayuso struct ip_fraglist_iter iter;
7729669fffcSEric Dumazet ktime_t tstamp = skb->tstamp;
773065ff79fSPablo Neira Ayuso struct ip_frag_state state;
7741da177e4SLinus Torvalds int err = 0;
7751da177e4SLinus Torvalds
776dbd3393cSHannes Frederic Sowa /* for offloaded checksums cleanup checksum before fragmentation */
777dbd3393cSHannes Frederic Sowa if (skb->ip_summed == CHECKSUM_PARTIAL &&
778dbd3393cSHannes Frederic Sowa (err = skb_checksum_help(skb)))
779dbd3393cSHannes Frederic Sowa goto fail;
780dbd3393cSHannes Frederic Sowa
7811da177e4SLinus Torvalds /*
7821da177e4SLinus Torvalds * Point into the IP datagram header.
7831da177e4SLinus Torvalds */
7841da177e4SLinus Torvalds
785eddc9ec5SArnaldo Carvalho de Melo iph = ip_hdr(skb);
7861da177e4SLinus Torvalds
787fedbb6b4SShmulik Ladkani mtu = ip_skb_dst_mtu(sk, skb);
788d6b915e2SFlorian Westphal if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
789d6b915e2SFlorian Westphal mtu = IPCB(skb)->frag_max_size;
7901da177e4SLinus Torvalds
7911da177e4SLinus Torvalds /*
7921da177e4SLinus Torvalds * Setup starting values.
7931da177e4SLinus Torvalds */
7941da177e4SLinus Torvalds
7951da177e4SLinus Torvalds hlen = iph->ihl * 4;
796f87c10a8SHannes Frederic Sowa mtu = mtu - hlen; /* Size of data space */
79789cee8b1SHerbert Xu IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
798254d900bSVasily Averin ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
7991da177e4SLinus Torvalds
8001da177e4SLinus Torvalds /* When frag_list is given, use it. First, check its validity:
8011da177e4SLinus Torvalds * some transformers could create wrong frag_list or break existing
8021da177e4SLinus Torvalds * one, it is not prohibited. In this case fall back to copying.
8031da177e4SLinus Torvalds *
8041da177e4SLinus Torvalds * LATER: this step can be merged to real generation of fragments,
8051da177e4SLinus Torvalds * we can switch to copy when see the first bad fragment.
8061da177e4SLinus Torvalds */
80721dc3301SDavid S. Miller if (skb_has_frag_list(skb)) {
8083d13008eSEric Dumazet struct sk_buff *frag, *frag2;
809c72d8cdaSAlexey Dobriyan unsigned int first_len = skb_pagelen(skb);
8101da177e4SLinus Torvalds
8111da177e4SLinus Torvalds if (first_len - hlen > mtu ||
8121da177e4SLinus Torvalds ((first_len - hlen) & 7) ||
81356f8a75cSPaul Gortmaker ip_is_fragment(iph) ||
814254d900bSVasily Averin skb_cloned(skb) ||
815254d900bSVasily Averin skb_headroom(skb) < ll_rs)
8161da177e4SLinus Torvalds goto slow_path;
8171da177e4SLinus Torvalds
818d7fcf1a5SDavid S. Miller skb_walk_frags(skb, frag) {
8191da177e4SLinus Torvalds /* Correct geometry. */
8201da177e4SLinus Torvalds if (frag->len > mtu ||
8211da177e4SLinus Torvalds ((frag->len & 7) && frag->next) ||
822254d900bSVasily Averin skb_headroom(frag) < hlen + ll_rs)
8233d13008eSEric Dumazet goto slow_path_clean;
8241da177e4SLinus Torvalds
8251da177e4SLinus Torvalds /* Partially cloned skb? */
8261da177e4SLinus Torvalds if (skb_shared(frag))
8273d13008eSEric Dumazet goto slow_path_clean;
8282fdba6b0SHerbert Xu
8292fdba6b0SHerbert Xu BUG_ON(frag->sk);
8302fdba6b0SHerbert Xu if (skb->sk) {
8312fdba6b0SHerbert Xu frag->sk = skb->sk;
8322fdba6b0SHerbert Xu frag->destructor = sock_wfree;
8332fdba6b0SHerbert Xu }
8343d13008eSEric Dumazet skb->truesize -= frag->truesize;
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds
8371da177e4SLinus Torvalds /* Everything is OK. Generate! */
838c8b17be0SPablo Neira Ayuso ip_fraglist_init(skb, iph, hlen, &iter);
8391b9fbe81SYajun Deng
8401da177e4SLinus Torvalds for (;;) {
8411da177e4SLinus Torvalds /* Prepare header of the next frame,
8421da177e4SLinus Torvalds * before previous one went down. */
84319c3401aSPablo Neira Ayuso if (iter.frag) {
84427a8caa5SJakub Kicinski bool first_frag = (iter.offset == 0);
84527a8caa5SJakub Kicinski
846faf482caSYajun Deng IPCB(iter.frag)->flags = IPCB(skb)->flags;
847c8b17be0SPablo Neira Ayuso ip_fraglist_prepare(skb, &iter);
84827a8caa5SJakub Kicinski if (first_frag && IPCB(skb)->opt.optlen) {
84927a8caa5SJakub Kicinski /* ipcb->opt is not populated for frags
85027a8caa5SJakub Kicinski * coming from __ip_make_skb(),
85127a8caa5SJakub Kicinski * ip_options_fragment() needs optlen
85227a8caa5SJakub Kicinski */
85327a8caa5SJakub Kicinski IPCB(iter.frag)->opt.optlen =
85427a8caa5SJakub Kicinski IPCB(skb)->opt.optlen;
85527a8caa5SJakub Kicinski ip_options_fragment(iter.frag);
85627a8caa5SJakub Kicinski ip_send_check(iter.iph);
85727a8caa5SJakub Kicinski }
85819c3401aSPablo Neira Ayuso }
8591da177e4SLinus Torvalds
8604d25ca2dSAbhishek Chauhan skb_set_delivery_time(skb, tstamp, tstamp_type);
861694869b3SEric W. Biederman err = output(net, sk, skb);
8621da177e4SLinus Torvalds
863dafee490SWei Dong if (!err)
86426a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
865c8b17be0SPablo Neira Ayuso if (err || !iter.frag)
8661da177e4SLinus Torvalds break;
8671da177e4SLinus Torvalds
868c8b17be0SPablo Neira Ayuso skb = ip_fraglist_next(&iter);
8691da177e4SLinus Torvalds }
8701da177e4SLinus Torvalds
8711da177e4SLinus Torvalds if (err == 0) {
87226a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
8731da177e4SLinus Torvalds return 0;
8741da177e4SLinus Torvalds }
8751da177e4SLinus Torvalds
876b7034146SEric Dumazet kfree_skb_list(iter.frag);
877942f146aSPablo Neira Ayuso
87826a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
8791da177e4SLinus Torvalds return err;
8803d13008eSEric Dumazet
8813d13008eSEric Dumazet slow_path_clean:
8823d13008eSEric Dumazet skb_walk_frags(skb, frag2) {
8833d13008eSEric Dumazet if (frag2 == frag)
8843d13008eSEric Dumazet break;
8853d13008eSEric Dumazet frag2->sk = NULL;
8863d13008eSEric Dumazet frag2->destructor = NULL;
8873d13008eSEric Dumazet skb->truesize += frag2->truesize;
8883d13008eSEric Dumazet }
8891da177e4SLinus Torvalds }
8901da177e4SLinus Torvalds
8911da177e4SLinus Torvalds slow_path:
8921da177e4SLinus Torvalds /*
8931da177e4SLinus Torvalds * Fragment the datagram.
8941da177e4SLinus Torvalds */
8951da177e4SLinus Torvalds
896e7a409c3SEric Dumazet ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
897e7a409c3SEric Dumazet &state);
8981da177e4SLinus Torvalds
8991da177e4SLinus Torvalds /*
9001da177e4SLinus Torvalds * Keep copying data until we run out.
9011da177e4SLinus Torvalds */
9021da177e4SLinus Torvalds
903065ff79fSPablo Neira Ayuso while (state.left > 0) {
90419c3401aSPablo Neira Ayuso bool first_frag = (state.offset == 0);
90519c3401aSPablo Neira Ayuso
906065ff79fSPablo Neira Ayuso skb2 = ip_frag_next(skb, &state);
907065ff79fSPablo Neira Ayuso if (IS_ERR(skb2)) {
908065ff79fSPablo Neira Ayuso err = PTR_ERR(skb2);
9091da177e4SLinus Torvalds goto fail;
9101da177e4SLinus Torvalds }
911faf482caSYajun Deng ip_frag_ipcb(skb, skb2, first_frag);
9121da177e4SLinus Torvalds
9131da177e4SLinus Torvalds /*
9141da177e4SLinus Torvalds * Put this fragment into the sending queue.
9151da177e4SLinus Torvalds */
9164d25ca2dSAbhishek Chauhan skb_set_delivery_time(skb2, tstamp, tstamp_type);
917694869b3SEric W. Biederman err = output(net, sk, skb2);
9181da177e4SLinus Torvalds if (err)
9191da177e4SLinus Torvalds goto fail;
920dafee490SWei Dong
92126a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
9221da177e4SLinus Torvalds }
9235d0ba55bSEric Dumazet consume_skb(skb);
92426a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
9251da177e4SLinus Torvalds return err;
9261da177e4SLinus Torvalds
9271da177e4SLinus Torvalds fail:
9281da177e4SLinus Torvalds kfree_skb(skb);
92926a949dbSEric W. Biederman IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
9301da177e4SLinus Torvalds return err;
9311da177e4SLinus Torvalds }
93249d16b23SAndy Zhou EXPORT_SYMBOL(ip_do_fragment);
9332e2f7aefSPatrick McHardy
9341da177e4SLinus Torvalds int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)9351da177e4SLinus Torvalds ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
9361da177e4SLinus Torvalds {
937f69e6d13SAl Viro struct msghdr *msg = from;
9381da177e4SLinus Torvalds
93984fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) {
9400b62fca2SAl Viro if (!copy_from_iter_full(to, len, &msg->msg_iter))
9411da177e4SLinus Torvalds return -EFAULT;
9421da177e4SLinus Torvalds } else {
94344bb9363SAl Viro __wsum csum = 0;
9440b62fca2SAl Viro if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
9451da177e4SLinus Torvalds return -EFAULT;
9461da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd);
9471da177e4SLinus Torvalds }
9481da177e4SLinus Torvalds return 0;
9491da177e4SLinus Torvalds }
9504bc2f18bSEric Dumazet EXPORT_SYMBOL(ip_generic_getfrag);
9511da177e4SLinus Torvalds
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)952f5fca608SDavid S. Miller static int __ip_append_data(struct sock *sk,
953f5fca608SDavid S. Miller struct flowi4 *fl4,
954f5fca608SDavid S. Miller struct sk_buff_head *queue,
9551470ddf7SHerbert Xu struct inet_cork *cork,
9565640f768SEric Dumazet struct page_frag *pfrag,
9571470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset,
9581470ddf7SHerbert Xu int len, int odd, struct sk_buff *skb),
9591da177e4SLinus Torvalds void *from, int length, int transhdrlen,
9601da177e4SLinus Torvalds unsigned int flags)
9611da177e4SLinus Torvalds {
9621da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
963b5947e5dSWillem de Bruijn struct ubuf_info *uarg = NULL;
9641da177e4SLinus Torvalds struct sk_buff *skb;
96507df5294SHerbert Xu struct ip_options *opt = cork->opt;
9661da177e4SLinus Torvalds int hh_len;
9671da177e4SLinus Torvalds int exthdrlen;
9681da177e4SLinus Torvalds int mtu;
9691da177e4SLinus Torvalds int copy;
9701da177e4SLinus Torvalds int err;
9711da177e4SLinus Torvalds int offset = 0;
9728eb77cc7SPavel Begunkov bool zc = false;
973daba287bSHannes Frederic Sowa unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
9741da177e4SLinus Torvalds int csummode = CHECKSUM_NONE;
97505d6d492SEric Dumazet struct rtable *rt = dst_rtable(cork->dst);
976488b6d91SVadim Fedorenko bool paged, hold_tskey, extra_uref = false;
977694aba69SEric Dumazet unsigned int wmem_alloc_delta = 0;
97809c2d251SWillem de Bruijn u32 tskey = 0;
9791da177e4SLinus Torvalds
98096d7303eSSteffen Klassert skb = skb_peek_tail(queue);
98196d7303eSSteffen Klassert
98296d7303eSSteffen Klassert exthdrlen = !skb ? rt->dst.header_len : 0;
983bec1f6f6SWillem de Bruijn mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
98415e36f5bSWillem de Bruijn paged = !!cork->gso_size;
985bec1f6f6SWillem de Bruijn
986d8d1f30bSChangli Gao hh_len = LL_RESERVED_SPACE(rt->dst.dev);
9871da177e4SLinus Torvalds
9881da177e4SLinus Torvalds fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
9891da177e4SLinus Torvalds maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
990cbc08a33SMiaohe Lin maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
9911da177e4SLinus Torvalds
992daba287bSHannes Frederic Sowa if (cork->length + length > maxnonfragsize - fragheaderlen) {
993f5fca608SDavid S. Miller ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
99461e7f09dSHannes Frederic Sowa mtu - (opt ? opt->optlen : 0));
9951da177e4SLinus Torvalds return -EMSGSIZE;
9961da177e4SLinus Torvalds }
9971da177e4SLinus Torvalds
9981da177e4SLinus Torvalds /*
9991da177e4SLinus Torvalds * transhdrlen > 0 means that this is the first fragment and we wish
10001da177e4SLinus Torvalds * it won't be fragmented in the future.
10011da177e4SLinus Torvalds */
10021da177e4SLinus Torvalds if (transhdrlen &&
10031da177e4SLinus Torvalds length + fragheaderlen <= mtu &&
1004c8cd0989STom Herbert rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1005bec1f6f6SWillem de Bruijn (!(flags & MSG_MORE) || cork->gso_size) &&
1006cd027a54SJacek Kalwas (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
100784fa7933SPatrick McHardy csummode = CHECKSUM_PARTIAL;
10081da177e4SLinus Torvalds
1009c445f31bSPavel Begunkov if ((flags & MSG_ZEROCOPY) && length) {
1010c445f31bSPavel Begunkov struct msghdr *msg = from;
1011c445f31bSPavel Begunkov
1012c445f31bSPavel Begunkov if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1013c445f31bSPavel Begunkov if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1014c445f31bSPavel Begunkov return -EINVAL;
1015c445f31bSPavel Begunkov
1016c445f31bSPavel Begunkov /* Leave uarg NULL if can't zerocopy, callers should
1017c445f31bSPavel Begunkov * be able to handle it.
1018c445f31bSPavel Begunkov */
1019c445f31bSPavel Begunkov if ((rt->dst.dev->features & NETIF_F_SG) &&
1020c445f31bSPavel Begunkov csummode == CHECKSUM_PARTIAL) {
1021c445f31bSPavel Begunkov paged = true;
1022c445f31bSPavel Begunkov zc = true;
1023c445f31bSPavel Begunkov uarg = msg->msg_ubuf;
1024c445f31bSPavel Begunkov }
1025c445f31bSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
10268c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1027b5947e5dSWillem de Bruijn if (!uarg)
1028b5947e5dSWillem de Bruijn return -ENOBUFS;
1029522924b5SWillem de Bruijn extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1030b5947e5dSWillem de Bruijn if (rt->dst.dev->features & NETIF_F_SG &&
1031b5947e5dSWillem de Bruijn csummode == CHECKSUM_PARTIAL) {
1032b5947e5dSWillem de Bruijn paged = true;
10338eb77cc7SPavel Begunkov zc = true;
1034b5947e5dSWillem de Bruijn } else {
1035e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0;
103652900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref);
1037b5947e5dSWillem de Bruijn }
1038b5947e5dSWillem de Bruijn }
10397da0dde6SDavid Howells } else if ((flags & MSG_SPLICE_PAGES) && length) {
1040cafbe182SEric Dumazet if (inet_test_bit(HDRINCL, sk))
10417da0dde6SDavid Howells return -EPERM;
10425a6f6873SDavid Howells if (rt->dst.dev->features & NETIF_F_SG &&
10435a6f6873SDavid Howells getfrag == ip_generic_getfrag)
10447da0dde6SDavid Howells /* We need an empty buffer to attach stuff to */
10457da0dde6SDavid Howells paged = true;
10467da0dde6SDavid Howells else
10477da0dde6SDavid Howells flags &= ~MSG_SPLICE_PAGES;
1048c445f31bSPavel Begunkov }
1049b5947e5dSWillem de Bruijn
10501470ddf7SHerbert Xu cork->length += length;
10511da177e4SLinus Torvalds
1052488b6d91SVadim Fedorenko hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
1053488b6d91SVadim Fedorenko READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
1054488b6d91SVadim Fedorenko if (hold_tskey)
1055488b6d91SVadim Fedorenko tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1056488b6d91SVadim Fedorenko
10571da177e4SLinus Torvalds /* So, what's going on in the loop below?
10581da177e4SLinus Torvalds *
10591da177e4SLinus Torvalds * We use calculated fragment length to generate chained skb,
10601da177e4SLinus Torvalds * each of segments is IP fragment ready for sending to network after
10611da177e4SLinus Torvalds * adding appropriate IP header.
10621da177e4SLinus Torvalds */
10631da177e4SLinus Torvalds
106426cde9f7SHerbert Xu if (!skb)
10651da177e4SLinus Torvalds goto alloc_new_skb;
10661da177e4SLinus Torvalds
10671da177e4SLinus Torvalds while (length > 0) {
10681da177e4SLinus Torvalds /* Check if the remaining data fits into current packet. */
10691da177e4SLinus Torvalds copy = mtu - skb->len;
10701da177e4SLinus Torvalds if (copy < length)
10711da177e4SLinus Torvalds copy = maxfraglen - skb->len;
10721da177e4SLinus Torvalds if (copy <= 0) {
10731da177e4SLinus Torvalds char *data;
10741da177e4SLinus Torvalds unsigned int datalen;
10751da177e4SLinus Torvalds unsigned int fraglen;
10761da177e4SLinus Torvalds unsigned int fraggap;
10776d123b81SJakub Kicinski unsigned int alloclen, alloc_extra;
1078aba36930SWillem de Bruijn unsigned int pagedlen;
10791da177e4SLinus Torvalds struct sk_buff *skb_prev;
10801da177e4SLinus Torvalds alloc_new_skb:
10811da177e4SLinus Torvalds skb_prev = skb;
10821da177e4SLinus Torvalds if (skb_prev)
10831da177e4SLinus Torvalds fraggap = skb_prev->len - maxfraglen;
10841da177e4SLinus Torvalds else
10851da177e4SLinus Torvalds fraggap = 0;
10861da177e4SLinus Torvalds
10871da177e4SLinus Torvalds /*
10881da177e4SLinus Torvalds * If remaining data exceeds the mtu,
10891da177e4SLinus Torvalds * we know we need more fragment(s).
10901da177e4SLinus Torvalds */
10911da177e4SLinus Torvalds datalen = length + fraggap;
10921da177e4SLinus Torvalds if (datalen > mtu - fragheaderlen)
10931da177e4SLinus Torvalds datalen = maxfraglen - fragheaderlen;
10941da177e4SLinus Torvalds fraglen = datalen + fragheaderlen;
1095aba36930SWillem de Bruijn pagedlen = 0;
10961da177e4SLinus Torvalds
10976d123b81SJakub Kicinski alloc_extra = hh_len + 15;
10986d123b81SJakub Kicinski alloc_extra += exthdrlen;
1099353e5c9aSSteffen Klassert
11001da177e4SLinus Torvalds /* The last fragment gets additional space at tail.
11011da177e4SLinus Torvalds * Note, with MSG_MORE we overallocate on fragments,
11021da177e4SLinus Torvalds * because we have no idea what fragment will be
11031da177e4SLinus Torvalds * the last.
11041da177e4SLinus Torvalds */
110533f99dc7SSteffen Klassert if (datalen == length + fraggap)
11066d123b81SJakub Kicinski alloc_extra += rt->dst.trailer_len;
11076d123b81SJakub Kicinski
11086d123b81SJakub Kicinski if ((flags & MSG_MORE) &&
11096d123b81SJakub Kicinski !(rt->dst.dev->features&NETIF_F_SG))
11106d123b81SJakub Kicinski alloclen = mtu;
11116d123b81SJakub Kicinski else if (!paged &&
11126d123b81SJakub Kicinski (fraglen + alloc_extra < SKB_MAX_ALLOC ||
11136d123b81SJakub Kicinski !(rt->dst.dev->features & NETIF_F_SG)))
11146d123b81SJakub Kicinski alloclen = fraglen;
111547cf8899SPavel Begunkov else {
11168eb77cc7SPavel Begunkov alloclen = fragheaderlen + transhdrlen;
11178eb77cc7SPavel Begunkov pagedlen = datalen - transhdrlen;
11186d123b81SJakub Kicinski }
11196d123b81SJakub Kicinski
11206d123b81SJakub Kicinski alloclen += alloc_extra;
112133f99dc7SSteffen Klassert
11221da177e4SLinus Torvalds if (transhdrlen) {
11236d123b81SJakub Kicinski skb = sock_alloc_send_skb(sk, alloclen,
11241da177e4SLinus Torvalds (flags & MSG_DONTWAIT), &err);
11251da177e4SLinus Torvalds } else {
11261da177e4SLinus Torvalds skb = NULL;
1127694aba69SEric Dumazet if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
11281da177e4SLinus Torvalds 2 * sk->sk_sndbuf)
11296d123b81SJakub Kicinski skb = alloc_skb(alloclen,
11301da177e4SLinus Torvalds sk->sk_allocation);
113151456b29SIan Morris if (unlikely(!skb))
11321da177e4SLinus Torvalds err = -ENOBUFS;
11331da177e4SLinus Torvalds }
113451456b29SIan Morris if (!skb)
11351da177e4SLinus Torvalds goto error;
11361da177e4SLinus Torvalds
11371da177e4SLinus Torvalds /*
11381da177e4SLinus Torvalds * Fill in the control structures
11391da177e4SLinus Torvalds */
11401da177e4SLinus Torvalds skb->ip_summed = csummode;
11411da177e4SLinus Torvalds skb->csum = 0;
11421da177e4SLinus Torvalds skb_reserve(skb, hh_len);
114311878b40SWillem de Bruijn
11441da177e4SLinus Torvalds /*
11451da177e4SLinus Torvalds * Find where to start putting bytes.
11461da177e4SLinus Torvalds */
114715e36f5bSWillem de Bruijn data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1148c14d2450SArnaldo Carvalho de Melo skb_set_network_header(skb, exthdrlen);
1149b0e380b1SArnaldo Carvalho de Melo skb->transport_header = (skb->network_header +
1150b0e380b1SArnaldo Carvalho de Melo fragheaderlen);
1151353e5c9aSSteffen Klassert data += fragheaderlen + exthdrlen;
11521da177e4SLinus Torvalds
11531da177e4SLinus Torvalds if (fraggap) {
11541da177e4SLinus Torvalds skb->csum = skb_copy_and_csum_bits(
11551da177e4SLinus Torvalds skb_prev, maxfraglen,
11568d5930dfSAl Viro data + transhdrlen, fraggap);
11571da177e4SLinus Torvalds skb_prev->csum = csum_sub(skb_prev->csum,
11581da177e4SLinus Torvalds skb->csum);
11591da177e4SLinus Torvalds data += fraggap;
1160e9fa4f7bSHerbert Xu pskb_trim_unique(skb_prev, maxfraglen);
11611da177e4SLinus Torvalds }
11621da177e4SLinus Torvalds
116315e36f5bSWillem de Bruijn copy = datalen - transhdrlen - fraggap - pagedlen;
11640f71c9caSDavid Howells /* [!] NOTE: copy will be negative if pagedlen>0
11650f71c9caSDavid Howells * because then the equation reduces to -fraggap.
11660f71c9caSDavid Howells */
11671da177e4SLinus Torvalds if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
11681da177e4SLinus Torvalds err = -EFAULT;
11691da177e4SLinus Torvalds kfree_skb(skb);
11701da177e4SLinus Torvalds goto error;
11710f71c9caSDavid Howells } else if (flags & MSG_SPLICE_PAGES) {
11720f71c9caSDavid Howells copy = 0;
11731da177e4SLinus Torvalds }
11741da177e4SLinus Torvalds
11751da177e4SLinus Torvalds offset += copy;
117615e36f5bSWillem de Bruijn length -= copy + transhdrlen;
11771da177e4SLinus Torvalds transhdrlen = 0;
11781da177e4SLinus Torvalds exthdrlen = 0;
11791da177e4SLinus Torvalds csummode = CHECKSUM_NONE;
11801da177e4SLinus Torvalds
118152900d22SWillem de Bruijn /* only the initial fragment is time stamped */
118252900d22SWillem de Bruijn skb_shinfo(skb)->tx_flags = cork->tx_flags;
118352900d22SWillem de Bruijn cork->tx_flags = 0;
118452900d22SWillem de Bruijn skb_shinfo(skb)->tskey = tskey;
118552900d22SWillem de Bruijn tskey = 0;
118652900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, &extra_uref);
118752900d22SWillem de Bruijn
11880dec879fSJulian Anastasov if ((flags & MSG_CONFIRM) && !skb_prev)
11890dec879fSJulian Anastasov skb_set_dst_pending_confirm(skb, 1);
11900dec879fSJulian Anastasov
11911da177e4SLinus Torvalds /*
11921da177e4SLinus Torvalds * Put the packet on the pending queue.
11931da177e4SLinus Torvalds */
1194694aba69SEric Dumazet if (!skb->destructor) {
1195694aba69SEric Dumazet skb->destructor = sock_wfree;
1196694aba69SEric Dumazet skb->sk = sk;
1197694aba69SEric Dumazet wmem_alloc_delta += skb->truesize;
1198694aba69SEric Dumazet }
11991470ddf7SHerbert Xu __skb_queue_tail(queue, skb);
12001da177e4SLinus Torvalds continue;
12011da177e4SLinus Torvalds }
12021da177e4SLinus Torvalds
12031da177e4SLinus Torvalds if (copy > length)
12041da177e4SLinus Torvalds copy = length;
12051da177e4SLinus Torvalds
1206113f99c3SWillem de Bruijn if (!(rt->dst.dev->features&NETIF_F_SG) &&
1207113f99c3SWillem de Bruijn skb_tailroom(skb) >= copy) {
12081da177e4SLinus Torvalds unsigned int off;
12091da177e4SLinus Torvalds
12101da177e4SLinus Torvalds off = skb->len;
12111da177e4SLinus Torvalds if (getfrag(from, skb_put(skb, copy),
12121da177e4SLinus Torvalds offset, copy, off, skb) < 0) {
12131da177e4SLinus Torvalds __skb_trim(skb, off);
12141da177e4SLinus Torvalds err = -EFAULT;
12151da177e4SLinus Torvalds goto error;
12161da177e4SLinus Torvalds }
12177da0dde6SDavid Howells } else if (flags & MSG_SPLICE_PAGES) {
12187da0dde6SDavid Howells struct msghdr *msg = from;
12197da0dde6SDavid Howells
12200f71c9caSDavid Howells err = -EIO;
12210f71c9caSDavid Howells if (WARN_ON_ONCE(copy > msg->msg_iter.count))
12220f71c9caSDavid Howells goto error;
12230f71c9caSDavid Howells
12247da0dde6SDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
12257da0dde6SDavid Howells sk->sk_allocation);
12267da0dde6SDavid Howells if (err < 0)
12277da0dde6SDavid Howells goto error;
12287da0dde6SDavid Howells copy = err;
12297da0dde6SDavid Howells wmem_alloc_delta += copy;
1230c445f31bSPavel Begunkov } else if (!zc) {
12311da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags;
12321da177e4SLinus Torvalds
12331da177e4SLinus Torvalds err = -ENOMEM;
12345640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag))
12351da177e4SLinus Torvalds goto error;
12361da177e4SLinus Torvalds
1237c445f31bSPavel Begunkov skb_zcopy_downgrade_managed(skb);
12385640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page,
12395640f768SEric Dumazet pfrag->offset)) {
12401da177e4SLinus Torvalds err = -EMSGSIZE;
12415640f768SEric Dumazet if (i == MAX_SKB_FRAGS)
12421da177e4SLinus Torvalds goto error;
12435640f768SEric Dumazet
12445640f768SEric Dumazet __skb_fill_page_desc(skb, i, pfrag->page,
12455640f768SEric Dumazet pfrag->offset, 0);
12465640f768SEric Dumazet skb_shinfo(skb)->nr_frags = ++i;
12475640f768SEric Dumazet get_page(pfrag->page);
12481da177e4SLinus Torvalds }
12495640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset);
12505640f768SEric Dumazet if (getfrag(from,
12515640f768SEric Dumazet page_address(pfrag->page) + pfrag->offset,
12525640f768SEric Dumazet offset, copy, skb->len, skb) < 0)
12535640f768SEric Dumazet goto error_efault;
12545640f768SEric Dumazet
12555640f768SEric Dumazet pfrag->offset += copy;
12565640f768SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1257ede57d58SRichard Gobert skb_len_add(skb, copy);
1258694aba69SEric Dumazet wmem_alloc_delta += copy;
1259b5947e5dSWillem de Bruijn } else {
1260b5947e5dSWillem de Bruijn err = skb_zerocopy_iter_dgram(skb, from, copy);
1261b5947e5dSWillem de Bruijn if (err < 0)
1262b5947e5dSWillem de Bruijn goto error;
12631da177e4SLinus Torvalds }
12641da177e4SLinus Torvalds offset += copy;
12651da177e4SLinus Torvalds length -= copy;
12661da177e4SLinus Torvalds }
12671da177e4SLinus Torvalds
12689e8445a5SPaolo Abeni if (wmem_alloc_delta)
1269694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
12701da177e4SLinus Torvalds return 0;
12711da177e4SLinus Torvalds
12725640f768SEric Dumazet error_efault:
12735640f768SEric Dumazet err = -EFAULT;
12741da177e4SLinus Torvalds error:
12758e044917SJonathan Lemon net_zcopy_put_abort(uarg, extra_uref);
12761470ddf7SHerbert Xu cork->length -= length;
12775e38e270SPavel Emelyanov IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1278694aba69SEric Dumazet refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1279488b6d91SVadim Fedorenko if (hold_tskey)
1280488b6d91SVadim Fedorenko atomic_dec(&sk->sk_tskey);
12811da177e4SLinus Torvalds return err;
12821da177e4SLinus Torvalds }
12831da177e4SLinus Torvalds
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)12841470ddf7SHerbert Xu static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
12851470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp)
12861470ddf7SHerbert Xu {
1287f6d8bd05SEric Dumazet struct ip_options_rcu *opt;
12881470ddf7SHerbert Xu struct rtable *rt;
12891470ddf7SHerbert Xu
12909783ccd0SGao Feng rt = *rtp;
12919783ccd0SGao Feng if (unlikely(!rt))
12929783ccd0SGao Feng return -EFAULT;
12939783ccd0SGao Feng
12945dee6d69SZhipeng Lu cork->fragsize = ip_sk_use_pmtu(sk) ?
12955dee6d69SZhipeng Lu dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
12965dee6d69SZhipeng Lu
12975dee6d69SZhipeng Lu if (!inetdev_valid_mtu(cork->fragsize))
12985dee6d69SZhipeng Lu return -ENETUNREACH;
12995dee6d69SZhipeng Lu
13001470ddf7SHerbert Xu /*
13011470ddf7SHerbert Xu * setup for corking.
13021470ddf7SHerbert Xu */
13031470ddf7SHerbert Xu opt = ipc->opt;
13041470ddf7SHerbert Xu if (opt) {
130551456b29SIan Morris if (!cork->opt) {
13061470ddf7SHerbert Xu cork->opt = kmalloc(sizeof(struct ip_options) + 40,
13071470ddf7SHerbert Xu sk->sk_allocation);
130851456b29SIan Morris if (unlikely(!cork->opt))
13091470ddf7SHerbert Xu return -ENOBUFS;
13101470ddf7SHerbert Xu }
1311f6d8bd05SEric Dumazet memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
13121470ddf7SHerbert Xu cork->flags |= IPCORK_OPT;
13131470ddf7SHerbert Xu cork->addr = ipc->addr;
13141470ddf7SHerbert Xu }
13159783ccd0SGao Feng
1316fbf47813SWillem de Bruijn cork->gso_size = ipc->gso_size;
1317501a90c9SEric Dumazet
13181470ddf7SHerbert Xu cork->dst = &rt->dst;
1319501a90c9SEric Dumazet /* We stole this route, caller should not release it. */
1320501a90c9SEric Dumazet *rtp = NULL;
1321501a90c9SEric Dumazet
13221470ddf7SHerbert Xu cork->length = 0;
1323aa661581SFrancesco Fusco cork->ttl = ipc->ttl;
1324aa661581SFrancesco Fusco cork->tos = ipc->tos;
1325c6af0c22SWillem de Bruijn cork->mark = ipc->sockc.mark;
1326aa661581SFrancesco Fusco cork->priority = ipc->priority;
1327bc969a97SJesus Sanchez-Palencia cork->transmit_time = ipc->sockc.transmit_time;
1328678ca42dSWillem de Bruijn cork->tx_flags = 0;
1329678ca42dSWillem de Bruijn sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
13301470ddf7SHerbert Xu
13311470ddf7SHerbert Xu return 0;
13321470ddf7SHerbert Xu }
13331470ddf7SHerbert Xu
13341470ddf7SHerbert Xu /*
1335c49cf266SDavid Howells * ip_append_data() can make one large IP datagram from many pieces of
1336c49cf266SDavid Howells * data. Each piece will be held on the socket until
1337c49cf266SDavid Howells * ip_push_pending_frames() is called. Each piece can be a page or
1338c49cf266SDavid Howells * non-page data.
13391470ddf7SHerbert Xu *
13401470ddf7SHerbert Xu * Not only UDP, other transport protocols - e.g. raw sockets - can use
13411470ddf7SHerbert Xu * this interface potentially.
13421470ddf7SHerbert Xu *
13431470ddf7SHerbert Xu * LATER: length must be adjusted by pad at tail, when it is required.
13441470ddf7SHerbert Xu */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1345f5fca608SDavid S. Miller int ip_append_data(struct sock *sk, struct flowi4 *fl4,
13461470ddf7SHerbert Xu int getfrag(void *from, char *to, int offset, int len,
13471470ddf7SHerbert Xu int odd, struct sk_buff *skb),
13481470ddf7SHerbert Xu void *from, int length, int transhdrlen,
13491470ddf7SHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp,
13501470ddf7SHerbert Xu unsigned int flags)
13511470ddf7SHerbert Xu {
13521470ddf7SHerbert Xu struct inet_sock *inet = inet_sk(sk);
13531470ddf7SHerbert Xu int err;
13541470ddf7SHerbert Xu
13551470ddf7SHerbert Xu if (flags&MSG_PROBE)
13561470ddf7SHerbert Xu return 0;
13571470ddf7SHerbert Xu
13581470ddf7SHerbert Xu if (skb_queue_empty(&sk->sk_write_queue)) {
1359bdc712b4SDavid S. Miller err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
13601470ddf7SHerbert Xu if (err)
13611470ddf7SHerbert Xu return err;
13621470ddf7SHerbert Xu } else {
13631470ddf7SHerbert Xu transhdrlen = 0;
13641470ddf7SHerbert Xu }
13651470ddf7SHerbert Xu
13665640f768SEric Dumazet return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
13675640f768SEric Dumazet sk_page_frag(sk), getfrag,
13681470ddf7SHerbert Xu from, length, transhdrlen, flags);
13691470ddf7SHerbert Xu }
13701470ddf7SHerbert Xu
ip_cork_release(struct inet_cork * cork)13711470ddf7SHerbert Xu static void ip_cork_release(struct inet_cork *cork)
1372429f08e9SPavel Emelyanov {
13731470ddf7SHerbert Xu cork->flags &= ~IPCORK_OPT;
13741470ddf7SHerbert Xu kfree(cork->opt);
13751470ddf7SHerbert Xu cork->opt = NULL;
13761470ddf7SHerbert Xu dst_release(cork->dst);
13771470ddf7SHerbert Xu cork->dst = NULL;
1378429f08e9SPavel Emelyanov }
1379429f08e9SPavel Emelyanov
13801da177e4SLinus Torvalds /*
13811da177e4SLinus Torvalds * Combined all pending IP fragments on the socket as one IP datagram
13821da177e4SLinus Torvalds * and push them out.
13831da177e4SLinus Torvalds */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)13841c32c5adSHerbert Xu struct sk_buff *__ip_make_skb(struct sock *sk,
138577968b78SDavid S. Miller struct flowi4 *fl4,
13861470ddf7SHerbert Xu struct sk_buff_head *queue,
13871470ddf7SHerbert Xu struct inet_cork *cork)
13881da177e4SLinus Torvalds {
13891da177e4SLinus Torvalds struct sk_buff *skb, *tmp_skb;
13901da177e4SLinus Torvalds struct sk_buff **tail_skb;
13911da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
13920388b004SPavel Emelyanov struct net *net = sock_net(sk);
13931da177e4SLinus Torvalds struct ip_options *opt = NULL;
139405d6d492SEric Dumazet struct rtable *rt = dst_rtable(cork->dst);
13951da177e4SLinus Torvalds struct iphdr *iph;
1396ceaa7141SEric Dumazet u8 pmtudisc, ttl;
139776ab608dSAlexey Dobriyan __be16 df = 0;
13981da177e4SLinus Torvalds
139951456b29SIan Morris skb = __skb_dequeue(queue);
140051456b29SIan Morris if (!skb)
14011da177e4SLinus Torvalds goto out;
14021da177e4SLinus Torvalds tail_skb = &(skb_shinfo(skb)->frag_list);
14031da177e4SLinus Torvalds
14041da177e4SLinus Torvalds /* move skb->data to ip header from ext header */
1405d56f90a7SArnaldo Carvalho de Melo if (skb->data < skb_network_header(skb))
1406bbe735e4SArnaldo Carvalho de Melo __skb_pull(skb, skb_network_offset(skb));
14071470ddf7SHerbert Xu while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1408cfe1fc77SArnaldo Carvalho de Melo __skb_pull(tmp_skb, skb_network_header_len(skb));
14091da177e4SLinus Torvalds *tail_skb = tmp_skb;
14101da177e4SLinus Torvalds tail_skb = &(tmp_skb->next);
14111da177e4SLinus Torvalds skb->len += tmp_skb->len;
14121da177e4SLinus Torvalds skb->data_len += tmp_skb->len;
14131da177e4SLinus Torvalds skb->truesize += tmp_skb->truesize;
14141da177e4SLinus Torvalds tmp_skb->destructor = NULL;
14151da177e4SLinus Torvalds tmp_skb->sk = NULL;
14161da177e4SLinus Torvalds }
14171da177e4SLinus Torvalds
14181da177e4SLinus Torvalds /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
14191da177e4SLinus Torvalds * to fragment the frame generated here. No matter, what transforms
14201da177e4SLinus Torvalds * how transforms change size of the packet, it will come out.
14211da177e4SLinus Torvalds */
142260ff7467SWANG Cong skb->ignore_df = ip_sk_ignore_df(sk);
14231da177e4SLinus Torvalds
14241da177e4SLinus Torvalds /* DF bit is set when we want to see DF on outgoing frames.
142560ff7467SWANG Cong * If ignore_df is set too, we still allow to fragment this frame
14261da177e4SLinus Torvalds * locally. */
1427ceaa7141SEric Dumazet pmtudisc = READ_ONCE(inet->pmtudisc);
1428ceaa7141SEric Dumazet if (pmtudisc == IP_PMTUDISC_DO ||
1429ceaa7141SEric Dumazet pmtudisc == IP_PMTUDISC_PROBE ||
1430d8d1f30bSChangli Gao (skb->len <= dst_mtu(&rt->dst) &&
1431d8d1f30bSChangli Gao ip_dont_fragment(sk, &rt->dst)))
14321da177e4SLinus Torvalds df = htons(IP_DF);
14331da177e4SLinus Torvalds
14341470ddf7SHerbert Xu if (cork->flags & IPCORK_OPT)
14351470ddf7SHerbert Xu opt = cork->opt;
14361da177e4SLinus Torvalds
1437aa661581SFrancesco Fusco if (cork->ttl != 0)
1438aa661581SFrancesco Fusco ttl = cork->ttl;
1439aa661581SFrancesco Fusco else if (rt->rt_type == RTN_MULTICAST)
1440c9746e6aSEric Dumazet ttl = READ_ONCE(inet->mc_ttl);
14411da177e4SLinus Torvalds else
1442d8d1f30bSChangli Gao ttl = ip_select_ttl(inet, &rt->dst);
14431da177e4SLinus Torvalds
1444749154aaSAnsis Atteka iph = ip_hdr(skb);
14451da177e4SLinus Torvalds iph->version = 4;
14461da177e4SLinus Torvalds iph->ihl = 5;
1447e08d0b3dSEric Dumazet iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
14481da177e4SLinus Torvalds iph->frag_off = df;
14491da177e4SLinus Torvalds iph->ttl = ttl;
14501da177e4SLinus Torvalds iph->protocol = sk->sk_protocol;
145184f9307cSEric Dumazet ip_copy_addrs(iph, fl4);
1452b6a7719aSHannes Frederic Sowa ip_select_ident(net, skb, sk);
14531da177e4SLinus Torvalds
145422f728f8SDavid S. Miller if (opt) {
145522f728f8SDavid S. Miller iph->ihl += opt->optlen >> 2;
14564f0e3040SJakub Kicinski ip_options_build(skb, opt, cork->addr, rt);
145722f728f8SDavid S. Miller }
145822f728f8SDavid S. Miller
145910bbf165SEric Dumazet skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
1460c6af0c22SWillem de Bruijn skb->mark = cork->mark;
14611693c5dbSAbhishek Chauhan if (sk_is_tcp(sk))
14621693c5dbSAbhishek Chauhan skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
14631693c5dbSAbhishek Chauhan else
14641693c5dbSAbhishek Chauhan skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
1465a21bba94SEric Dumazet /*
1466a21bba94SEric Dumazet * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1467a21bba94SEric Dumazet * on dst refcount
1468a21bba94SEric Dumazet */
14691470ddf7SHerbert Xu cork->dst = NULL;
1470d8d1f30bSChangli Gao skb_dst_set(skb, &rt->dst);
14711da177e4SLinus Torvalds
147299e5acaeSZiyang Xuan if (iph->protocol == IPPROTO_ICMP) {
147399e5acaeSZiyang Xuan u8 icmp_type;
147499e5acaeSZiyang Xuan
147599e5acaeSZiyang Xuan /* For such sockets, transhdrlen is zero when do ip_append_data(),
147699e5acaeSZiyang Xuan * so icmphdr does not in skb linear region and can not get icmp_type
147799e5acaeSZiyang Xuan * by icmp_hdr(skb)->type.
147899e5acaeSZiyang Xuan */
1479cafbe182SEric Dumazet if (sk->sk_type == SOCK_RAW &&
1480fc1092f5SShigeru Yoshida !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
148199e5acaeSZiyang Xuan icmp_type = fl4->fl4_icmp_type;
148299e5acaeSZiyang Xuan else
148399e5acaeSZiyang Xuan icmp_type = icmp_hdr(skb)->type;
148499e5acaeSZiyang Xuan icmp_out_count(net, icmp_type);
148599e5acaeSZiyang Xuan }
148696793b48SDavid L Stevens
14871c32c5adSHerbert Xu ip_cork_release(cork);
14881c32c5adSHerbert Xu out:
14891c32c5adSHerbert Xu return skb;
14901c32c5adSHerbert Xu }
14911c32c5adSHerbert Xu
ip_send_skb(struct net * net,struct sk_buff * skb)1492b5ec8eeaSEric Dumazet int ip_send_skb(struct net *net, struct sk_buff *skb)
14931c32c5adSHerbert Xu {
14941c32c5adSHerbert Xu int err;
14951c32c5adSHerbert Xu
149633224b16SEric W. Biederman err = ip_local_out(net, skb->sk, skb);
14971da177e4SLinus Torvalds if (err) {
14981da177e4SLinus Torvalds if (err > 0)
14996ce9e7b5SEric Dumazet err = net_xmit_errno(err);
15001da177e4SLinus Torvalds if (err)
15011c32c5adSHerbert Xu IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
15021da177e4SLinus Torvalds }
15031da177e4SLinus Torvalds
15041da177e4SLinus Torvalds return err;
15051da177e4SLinus Torvalds }
15061da177e4SLinus Torvalds
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)150777968b78SDavid S. Miller int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
15081470ddf7SHerbert Xu {
15091c32c5adSHerbert Xu struct sk_buff *skb;
15101c32c5adSHerbert Xu
151177968b78SDavid S. Miller skb = ip_finish_skb(sk, fl4);
15121c32c5adSHerbert Xu if (!skb)
15131c32c5adSHerbert Xu return 0;
15141c32c5adSHerbert Xu
15151c32c5adSHerbert Xu /* Netfilter gets whole the not fragmented skb. */
1516b5ec8eeaSEric Dumazet return ip_send_skb(sock_net(sk), skb);
15171470ddf7SHerbert Xu }
15181470ddf7SHerbert Xu
15191da177e4SLinus Torvalds /*
15201da177e4SLinus Torvalds * Throw away all pending data on the socket.
15211da177e4SLinus Torvalds */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)15221470ddf7SHerbert Xu static void __ip_flush_pending_frames(struct sock *sk,
15231470ddf7SHerbert Xu struct sk_buff_head *queue,
15241470ddf7SHerbert Xu struct inet_cork *cork)
15251da177e4SLinus Torvalds {
15261da177e4SLinus Torvalds struct sk_buff *skb;
15271da177e4SLinus Torvalds
15281470ddf7SHerbert Xu while ((skb = __skb_dequeue_tail(queue)) != NULL)
15291da177e4SLinus Torvalds kfree_skb(skb);
15301da177e4SLinus Torvalds
15311470ddf7SHerbert Xu ip_cork_release(cork);
15321470ddf7SHerbert Xu }
15331470ddf7SHerbert Xu
ip_flush_pending_frames(struct sock * sk)15341470ddf7SHerbert Xu void ip_flush_pending_frames(struct sock *sk)
15351470ddf7SHerbert Xu {
1536bdc712b4SDavid S. Miller __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
15371da177e4SLinus Torvalds }
15381da177e4SLinus Torvalds
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,struct inet_cork * cork,unsigned int flags)15391c32c5adSHerbert Xu struct sk_buff *ip_make_skb(struct sock *sk,
154077968b78SDavid S. Miller struct flowi4 *fl4,
15411c32c5adSHerbert Xu int getfrag(void *from, char *to, int offset,
15421c32c5adSHerbert Xu int len, int odd, struct sk_buff *skb),
15431c32c5adSHerbert Xu void *from, int length, int transhdrlen,
15441c32c5adSHerbert Xu struct ipcm_cookie *ipc, struct rtable **rtp,
15451cd7884dSWillem de Bruijn struct inet_cork *cork, unsigned int flags)
15461c32c5adSHerbert Xu {
15471c32c5adSHerbert Xu struct sk_buff_head queue;
15481c32c5adSHerbert Xu int err;
15491c32c5adSHerbert Xu
15501c32c5adSHerbert Xu if (flags & MSG_PROBE)
15511c32c5adSHerbert Xu return NULL;
15521c32c5adSHerbert Xu
15531c32c5adSHerbert Xu __skb_queue_head_init(&queue);
15541c32c5adSHerbert Xu
15551cd7884dSWillem de Bruijn cork->flags = 0;
15561cd7884dSWillem de Bruijn cork->addr = 0;
15571cd7884dSWillem de Bruijn cork->opt = NULL;
15581cd7884dSWillem de Bruijn err = ip_setup_cork(sk, cork, ipc, rtp);
15591c32c5adSHerbert Xu if (err)
15601c32c5adSHerbert Xu return ERR_PTR(err);
15611c32c5adSHerbert Xu
15621cd7884dSWillem de Bruijn err = __ip_append_data(sk, fl4, &queue, cork,
15635640f768SEric Dumazet ¤t->task_frag, getfrag,
15641c32c5adSHerbert Xu from, length, transhdrlen, flags);
15651c32c5adSHerbert Xu if (err) {
15661cd7884dSWillem de Bruijn __ip_flush_pending_frames(sk, &queue, cork);
15671c32c5adSHerbert Xu return ERR_PTR(err);
15681c32c5adSHerbert Xu }
15691c32c5adSHerbert Xu
15701cd7884dSWillem de Bruijn return __ip_make_skb(sk, fl4, &queue, cork);
15711c32c5adSHerbert Xu }
15721da177e4SLinus Torvalds
15731da177e4SLinus Torvalds /*
15741da177e4SLinus Torvalds * Fetch data from kernel space and fill in checksum if needed.
15751da177e4SLinus Torvalds */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)15761da177e4SLinus Torvalds static int ip_reply_glue_bits(void *dptr, char *to, int offset,
15771da177e4SLinus Torvalds int len, int odd, struct sk_buff *skb)
15781da177e4SLinus Torvalds {
15795084205fSAl Viro __wsum csum;
15801da177e4SLinus Torvalds
1581cc44c17bSAl Viro csum = csum_partial_copy_nocheck(dptr+offset, to, len);
15821da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, csum, odd);
15831da177e4SLinus Torvalds return 0;
15841da177e4SLinus Torvalds }
15851da177e4SLinus Torvalds
15861da177e4SLinus Torvalds /*
15871da177e4SLinus Torvalds * Generic function to send a packet as reply to another packet.
1588be9f4a44SEric Dumazet * Used to send some TCP resets/acks so far.
15891da177e4SLinus Torvalds */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len,u64 transmit_time,u32 txhash)1590bdbbb852SEric Dumazet void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
159124a2d43dSEric Dumazet const struct ip_options *sopt,
159224a2d43dSEric Dumazet __be32 daddr, __be32 saddr,
159324a2d43dSEric Dumazet const struct ip_reply_arg *arg,
1594c0a8966eSAntoine Tenart unsigned int len, u64 transmit_time, u32 txhash)
15951da177e4SLinus Torvalds {
1596f6d8bd05SEric Dumazet struct ip_options_data replyopts;
15971da177e4SLinus Torvalds struct ipcm_cookie ipc;
159877968b78SDavid S. Miller struct flowi4 fl4;
1599511c3f92SEric Dumazet struct rtable *rt = skb_rtable(skb);
1600bdbbb852SEric Dumazet struct net *net = sock_net(sk);
1601be9f4a44SEric Dumazet struct sk_buff *nskb;
16024062090eSVasily Averin int err;
1603f7ba868bSDavid Ahern int oif;
16041da177e4SLinus Torvalds
160591ed1e66SPaolo Abeni if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
16061da177e4SLinus Torvalds return;
16071da177e4SLinus Torvalds
160835178206SWillem de Bruijn ipcm_init(&ipc);
16090a5ebb80SDavid S. Miller ipc.addr = daddr;
1610d6fb396cSEric Dumazet ipc.sockc.transmit_time = transmit_time;
16111da177e4SLinus Torvalds
1612f6d8bd05SEric Dumazet if (replyopts.opt.opt.optlen) {
16131da177e4SLinus Torvalds ipc.opt = &replyopts.opt;
16141da177e4SLinus Torvalds
1615f6d8bd05SEric Dumazet if (replyopts.opt.opt.srr)
1616f6d8bd05SEric Dumazet daddr = replyopts.opt.opt.faddr;
16171da177e4SLinus Torvalds }
16181da177e4SLinus Torvalds
1619f7ba868bSDavid Ahern oif = arg->bound_dev_if;
16209b6c14d5SDavid Ahern if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
16219b6c14d5SDavid Ahern oif = skb->skb_iif;
1622f7ba868bSDavid Ahern
1623f7ba868bSDavid Ahern flowi4_init_output(&fl4, oif,
162400483690SJon Maxwell IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
162513f6538dSIdo Schimmel arg->tos & INET_DSCP_MASK,
1626be9f4a44SEric Dumazet RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1627538de0e0SDavid S. Miller ip_reply_arg_flowi_flags(arg),
162870e73416SDavid S. Miller daddr, saddr,
1629e2d118a1SLorenzo Colitti tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1630e2d118a1SLorenzo Colitti arg->uid);
16313df98d79SPaul Moore security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1632e22aa148Ssewookseo rt = ip_route_output_flow(net, &fl4, sk);
1633b23dd4feSDavid S. Miller if (IS_ERR(rt))
16341da177e4SLinus Torvalds return;
16351da177e4SLinus Torvalds
1636ba9e04a7SWei Wang inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
16371da177e4SLinus Torvalds
1638eddc9ec5SArnaldo Carvalho de Melo sk->sk_protocol = ip_hdr(skb)->protocol;
1639f0e48dbfSPatrick McHardy sk->sk_bound_dev_if = arg->bound_dev_if;
16401227c177SKuniyuki Iwashima sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
16410da7536fSWillem de Bruijn ipc.sockc.mark = fl4.flowi4_mark;
16424062090eSVasily Averin err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
16434062090eSVasily Averin len, 0, &ipc, &rt, MSG_DONTWAIT);
16444062090eSVasily Averin if (unlikely(err)) {
16454062090eSVasily Averin ip_flush_pending_frames(sk);
16464062090eSVasily Averin goto out;
16474062090eSVasily Averin }
16484062090eSVasily Averin
1649be9f4a44SEric Dumazet nskb = skb_peek(&sk->sk_write_queue);
1650be9f4a44SEric Dumazet if (nskb) {
16511da177e4SLinus Torvalds if (arg->csumoffset >= 0)
1652be9f4a44SEric Dumazet *((__sum16 *)skb_transport_header(nskb) +
1653be9f4a44SEric Dumazet arg->csumoffset) = csum_fold(csum_add(nskb->csum,
16549c70220bSArnaldo Carvalho de Melo arg->csum));
1655be9f4a44SEric Dumazet nskb->ip_summed = CHECKSUM_NONE;
16564d25ca2dSAbhishek Chauhan if (transmit_time)
16574d25ca2dSAbhishek Chauhan nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
1658c0a8966eSAntoine Tenart if (txhash)
1659c0a8966eSAntoine Tenart skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
166077968b78SDavid S. Miller ip_push_pending_frames(sk, &fl4);
16611da177e4SLinus Torvalds }
16624062090eSVasily Averin out:
16631da177e4SLinus Torvalds ip_rt_put(rt);
16641da177e4SLinus Torvalds }
16651da177e4SLinus Torvalds
ip_init(void)16661da177e4SLinus Torvalds void __init ip_init(void)
16671da177e4SLinus Torvalds {
16681da177e4SLinus Torvalds ip_rt_init();
16691da177e4SLinus Torvalds inet_initpeers();
16701da177e4SLinus Torvalds
167172c1d3bdSWANG Cong #if defined(CONFIG_IP_MULTICAST)
167272c1d3bdSWANG Cong igmp_mc_init();
16731da177e4SLinus Torvalds #endif
16741da177e4SLinus Torvalds }
1675