1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readability.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/gso.h>
77 #include <net/inetpeer.h>
78 #include <net/lwtunnel.h>
79 #include <net/inet_dscp.h>
80 #include <linux/bpf-cgroup.h>
81 #include <linux/igmp.h>
82 #include <linux/netfilter_ipv4.h>
83 #include <linux/netfilter_bridge.h>
84 #include <linux/netlink.h>
85 #include <linux/tcp.h>
86
87 static int
88 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
89 unsigned int mtu,
90 int (*output)(struct net *, struct sock *, struct sk_buff *));
91
92 /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)93 void ip_send_check(struct iphdr *iph)
94 {
95 iph->check = 0;
96 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
97 }
98 EXPORT_SYMBOL(ip_send_check);
99
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)100 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
101 {
102 struct iphdr *iph = ip_hdr(skb);
103
104 IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
105
106 iph_set_totlen(iph, skb->len);
107 ip_send_check(iph);
108
109 /* if egress device is enslaved to an L3 master device pass the
110 * skb to its handler for processing
111 */
112 skb = l3mdev_ip_out(sk, skb);
113 if (unlikely(!skb))
114 return 0;
115
116 skb->protocol = htons(ETH_P_IP);
117
118 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
119 net, sk, skb, NULL, skb_dst_dev(skb),
120 dst_output);
121 }
122
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)123 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
124 {
125 int err;
126
127 err = __ip_local_out(net, sk, skb);
128 if (likely(err == 1))
129 err = dst_output(net, sk, skb);
130
131 return err;
132 }
133 EXPORT_SYMBOL_GPL(ip_local_out);
134
ip_select_ttl(const struct inet_sock * inet,const struct dst_entry * dst)135 static inline int ip_select_ttl(const struct inet_sock *inet,
136 const struct dst_entry *dst)
137 {
138 int ttl = READ_ONCE(inet->uc_ttl);
139
140 if (ttl < 0)
141 ttl = ip4_dst_hoplimit(dst);
142 return ttl;
143 }
144
145 /*
146 * Add an ip header to a skbuff and send it out.
147 *
148 */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt,u8 tos)149 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
150 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
151 u8 tos)
152 {
153 const struct inet_sock *inet = inet_sk(sk);
154 struct rtable *rt = skb_rtable(skb);
155 struct net *net = sock_net(sk);
156 struct iphdr *iph;
157
158 /* Build the IP header. */
159 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
160 skb_reset_network_header(skb);
161 iph = ip_hdr(skb);
162 iph->version = 4;
163 iph->ihl = 5;
164 iph->tos = tos;
165 iph->ttl = ip_select_ttl(inet, &rt->dst);
166 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
167 iph->saddr = saddr;
168 iph->protocol = sk->sk_protocol;
169 /* Do not bother generating IPID for small packets (eg SYNACK) */
170 if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
171 iph->frag_off = htons(IP_DF);
172 iph->id = 0;
173 } else {
174 iph->frag_off = 0;
175 /* TCP packets here are SYNACK with fat IPv4/TCP options.
176 * Avoid using the hashed IP ident generator.
177 */
178 if (sk->sk_protocol == IPPROTO_TCP)
179 iph->id = (__force __be16)get_random_u16();
180 else
181 __ip_select_ident(net, iph, 1);
182 }
183
184 if (opt && opt->opt.optlen) {
185 iph->ihl += opt->opt.optlen>>2;
186 ip_options_build(skb, &opt->opt, daddr, rt);
187 }
188
189 skb->priority = READ_ONCE(sk->sk_priority);
190 if (!skb->mark)
191 skb->mark = READ_ONCE(sk->sk_mark);
192
193 /* Send it out. */
194 return ip_local_out(net, skb->sk, skb);
195 }
196 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
197
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)198 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
199 {
200 struct dst_entry *dst = skb_dst(skb);
201 struct rtable *rt = dst_rtable(dst);
202 struct net_device *dev = dst_dev(dst);
203 unsigned int hh_len = LL_RESERVED_SPACE(dev);
204 struct neighbour *neigh;
205 bool is_v6gw = false;
206
207 if (rt->rt_type == RTN_MULTICAST) {
208 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
209 } else if (rt->rt_type == RTN_BROADCAST)
210 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
211
212 /* OUTOCTETS should be counted after fragment */
213 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
214
215 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
216 skb = skb_expand_head(skb, hh_len);
217 if (!skb)
218 return -ENOMEM;
219 }
220
221 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
222 int res = lwtunnel_xmit(skb);
223
224 if (res != LWTUNNEL_XMIT_CONTINUE)
225 return res;
226 }
227
228 rcu_read_lock();
229 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
230 if (!IS_ERR(neigh)) {
231 int res;
232
233 sock_confirm_neigh(skb, neigh);
234 /* if crossing protocols, can not use the cached header */
235 res = neigh_output(neigh, skb, is_v6gw);
236 rcu_read_unlock();
237 return res;
238 }
239 rcu_read_unlock();
240
241 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
242 __func__);
243 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
244 return PTR_ERR(neigh);
245 }
246
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)247 static int ip_finish_output_gso(struct net *net, struct sock *sk,
248 struct sk_buff *skb, unsigned int mtu)
249 {
250 struct sk_buff *segs, *nskb;
251 netdev_features_t features;
252 int ret = 0;
253
254 /* common case: seglen is <= mtu
255 */
256 if (skb_gso_validate_network_len(skb, mtu))
257 return ip_finish_output2(net, sk, skb);
258
259 /* Slowpath - GSO segment length exceeds the egress MTU.
260 *
261 * This can happen in several cases:
262 * - Forwarding of a TCP GRO skb, when DF flag is not set.
263 * - Forwarding of an skb that arrived on a virtualization interface
264 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
265 * stack.
266 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
267 * interface with a smaller MTU.
268 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
269 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
270 * insufficient MTU.
271 */
272 features = netif_skb_features(skb);
273 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
274 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
275 if (IS_ERR_OR_NULL(segs)) {
276 kfree_skb(skb);
277 return -ENOMEM;
278 }
279
280 consume_skb(skb);
281
282 skb_list_walk_safe(segs, segs, nskb) {
283 int err;
284
285 skb_mark_not_on_list(segs);
286 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
287
288 if (err && ret == 0)
289 ret = err;
290 }
291
292 return ret;
293 }
294
__ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)295 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
296 {
297 unsigned int mtu;
298
299 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
300 /* Policy lookup after SNAT yielded a new policy */
301 if (skb_dst(skb)->xfrm) {
302 IPCB(skb)->flags |= IPSKB_REROUTED;
303 return dst_output(net, sk, skb);
304 }
305 #endif
306 mtu = ip_skb_dst_mtu(sk, skb);
307 if (skb_is_gso(skb))
308 return ip_finish_output_gso(net, sk, skb, mtu);
309
310 if (skb->len > mtu || IPCB(skb)->frag_max_size)
311 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
312
313 return ip_finish_output2(net, sk, skb);
314 }
315
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)316 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
317 {
318 int ret;
319
320 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
321 switch (ret) {
322 case NET_XMIT_SUCCESS:
323 return __ip_finish_output(net, sk, skb);
324 case NET_XMIT_CN:
325 return __ip_finish_output(net, sk, skb) ? : ret;
326 default:
327 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
328 return ret;
329 }
330 }
331
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)332 static int ip_mc_finish_output(struct net *net, struct sock *sk,
333 struct sk_buff *skb)
334 {
335 struct rtable *new_rt;
336 bool do_cn = false;
337 int ret, err;
338
339 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
340 switch (ret) {
341 case NET_XMIT_CN:
342 do_cn = true;
343 fallthrough;
344 case NET_XMIT_SUCCESS:
345 break;
346 default:
347 kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
348 return ret;
349 }
350
351 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
352 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
353 * see ipv4_pktinfo_prepare().
354 */
355 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
356 if (new_rt) {
357 new_rt->rt_iif = 0;
358 skb_dst_drop(skb);
359 skb_dst_set(skb, &new_rt->dst);
360 }
361
362 err = dev_loopback_xmit(net, sk, skb);
363 return (do_cn && err) ? ret : err;
364 }
365
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)366 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
367 {
368 struct rtable *rt = skb_rtable(skb);
369 struct net_device *dev = rt->dst.dev;
370
371 /*
372 * If the indicated interface is up and running, send the packet.
373 */
374 skb->dev = dev;
375 skb->protocol = htons(ETH_P_IP);
376
377 /*
378 * Multicasts are looped back for other local users
379 */
380
381 if (rt->rt_flags&RTCF_MULTICAST) {
382 if (sk_mc_loop(sk)
383 #ifdef CONFIG_IP_MROUTE
384 /* Small optimization: do not loopback not local frames,
385 which returned after forwarding; they will be dropped
386 by ip_mr_input in any case.
387 Note, that local frames are looped back to be delivered
388 to local recipients.
389
390 This check is duplicated in ip_mr_input at the moment.
391 */
392 &&
393 ((rt->rt_flags & RTCF_LOCAL) ||
394 !(IPCB(skb)->flags & IPSKB_FORWARDED))
395 #endif
396 ) {
397 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
398 if (newskb)
399 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
400 net, sk, newskb, NULL, newskb->dev,
401 ip_mc_finish_output);
402 }
403
404 /* Multicasts with ttl 0 must not go beyond the host */
405
406 if (ip_hdr(skb)->ttl == 0) {
407 kfree_skb(skb);
408 return 0;
409 }
410 }
411
412 if (rt->rt_flags&RTCF_BROADCAST) {
413 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
414 if (newskb)
415 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
416 net, sk, newskb, NULL, newskb->dev,
417 ip_mc_finish_output);
418 }
419
420 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
421 net, sk, skb, NULL, skb->dev,
422 ip_finish_output,
423 !(IPCB(skb)->flags & IPSKB_REROUTED));
424 }
425
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)426 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
427 {
428 struct net_device *dev, *indev = skb->dev;
429 int ret_val;
430
431 rcu_read_lock();
432 dev = skb_dst_dev_rcu(skb);
433 skb->dev = dev;
434 skb->protocol = htons(ETH_P_IP);
435
436 ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
437 net, sk, skb, indev, dev,
438 ip_finish_output,
439 !(IPCB(skb)->flags & IPSKB_REROUTED));
440 rcu_read_unlock();
441 return ret_val;
442 }
443 EXPORT_SYMBOL(ip_output);
444
445 /*
446 * copy saddr and daddr, possibly using 64bit load/stores
447 * Equivalent to :
448 * iph->saddr = fl4->saddr;
449 * iph->daddr = fl4->daddr;
450 */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)451 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
452 {
453 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
454 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
455
456 iph->saddr = fl4->saddr;
457 iph->daddr = fl4->daddr;
458 }
459
460 /* Note: skb->sk can be different from sk, in case of tunnels */
__ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl,__u8 tos)461 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
462 __u8 tos)
463 {
464 struct inet_sock *inet = inet_sk(sk);
465 struct net *net = sock_net(sk);
466 struct ip_options_rcu *inet_opt;
467 struct flowi4 *fl4;
468 struct rtable *rt;
469 struct iphdr *iph;
470 int res;
471
472 /* Skip all of this if the packet is already routed,
473 * f.e. by something like SCTP.
474 */
475 rcu_read_lock();
476 inet_opt = rcu_dereference(inet->inet_opt);
477 fl4 = &fl->u.ip4;
478 rt = skb_rtable(skb);
479 if (rt)
480 goto packet_routed;
481
482 /* Make sure we can route this packet. */
483 rt = dst_rtable(__sk_dst_check(sk, 0));
484 if (!rt) {
485 inet_sk_init_flowi4(inet, fl4);
486
487 /* sctp_v4_xmit() uses its own DSCP value */
488 fl4->flowi4_tos = tos & INET_DSCP_MASK;
489
490 /* If this fails, retransmit mechanism of transport layer will
491 * keep trying until route appears or the connection times
492 * itself out.
493 */
494 rt = ip_route_output_flow(net, fl4, sk);
495 if (IS_ERR(rt))
496 goto no_route;
497 sk_setup_caps(sk, &rt->dst);
498 }
499 skb_dst_set_noref(skb, &rt->dst);
500
501 packet_routed:
502 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
503 goto no_route;
504
505 /* OK, we know where to send it, allocate and build IP header. */
506 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
507 skb_reset_network_header(skb);
508 iph = ip_hdr(skb);
509 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
510 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
511 iph->frag_off = htons(IP_DF);
512 else
513 iph->frag_off = 0;
514 iph->ttl = ip_select_ttl(inet, &rt->dst);
515 iph->protocol = sk->sk_protocol;
516 ip_copy_addrs(iph, fl4);
517
518 /* Transport layer set skb->h.foo itself. */
519
520 if (inet_opt && inet_opt->opt.optlen) {
521 iph->ihl += inet_opt->opt.optlen >> 2;
522 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
523 }
524
525 ip_select_ident_segs(net, skb, sk,
526 skb_shinfo(skb)->gso_segs ?: 1);
527
528 /* TODO : should we use skb->sk here instead of sk ? */
529 skb->priority = READ_ONCE(sk->sk_priority);
530 skb->mark = READ_ONCE(sk->sk_mark);
531
532 res = ip_local_out(net, sk, skb);
533 rcu_read_unlock();
534 return res;
535
536 no_route:
537 rcu_read_unlock();
538 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
539 kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
540 return -EHOSTUNREACH;
541 }
542 EXPORT_SYMBOL(__ip_queue_xmit);
543
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)544 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
545 {
546 return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
547 }
548 EXPORT_SYMBOL(ip_queue_xmit);
549
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)550 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
551 {
552 to->pkt_type = from->pkt_type;
553 to->priority = from->priority;
554 to->protocol = from->protocol;
555 to->skb_iif = from->skb_iif;
556 skb_dst_drop(to);
557 skb_dst_copy(to, from);
558 to->dev = from->dev;
559 to->mark = from->mark;
560
561 skb_copy_hash(to, from);
562
563 #ifdef CONFIG_NET_SCHED
564 to->tc_index = from->tc_index;
565 #endif
566 nf_copy(to, from);
567 skb_ext_copy(to, from);
568 #if IS_ENABLED(CONFIG_IP_VS)
569 to->ipvs_property = from->ipvs_property;
570 #endif
571 skb_copy_secmark(to, from);
572 }
573
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))574 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
575 unsigned int mtu,
576 int (*output)(struct net *, struct sock *, struct sk_buff *))
577 {
578 struct iphdr *iph = ip_hdr(skb);
579
580 if ((iph->frag_off & htons(IP_DF)) == 0)
581 return ip_do_fragment(net, sk, skb, output);
582
583 if (unlikely(!skb->ignore_df ||
584 (IPCB(skb)->frag_max_size &&
585 IPCB(skb)->frag_max_size > mtu))) {
586 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
587 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
588 htonl(mtu));
589 kfree_skb(skb);
590 return -EMSGSIZE;
591 }
592
593 return ip_do_fragment(net, sk, skb, output);
594 }
595
ip_fraglist_init(struct sk_buff * skb,struct iphdr * iph,unsigned int hlen,struct ip_fraglist_iter * iter)596 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
597 unsigned int hlen, struct ip_fraglist_iter *iter)
598 {
599 unsigned int first_len = skb_pagelen(skb);
600
601 iter->frag = skb_shinfo(skb)->frag_list;
602 skb_frag_list_init(skb);
603
604 iter->offset = 0;
605 iter->iph = iph;
606 iter->hlen = hlen;
607
608 skb->data_len = first_len - skb_headlen(skb);
609 skb->len = first_len;
610 iph->tot_len = htons(first_len);
611 iph->frag_off = htons(IP_MF);
612 ip_send_check(iph);
613 }
614 EXPORT_SYMBOL(ip_fraglist_init);
615
ip_fraglist_prepare(struct sk_buff * skb,struct ip_fraglist_iter * iter)616 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
617 {
618 unsigned int hlen = iter->hlen;
619 struct iphdr *iph = iter->iph;
620 struct sk_buff *frag;
621
622 frag = iter->frag;
623 frag->ip_summed = CHECKSUM_NONE;
624 skb_reset_transport_header(frag);
625 __skb_push(frag, hlen);
626 skb_reset_network_header(frag);
627 memcpy(skb_network_header(frag), iph, hlen);
628 iter->iph = ip_hdr(frag);
629 iph = iter->iph;
630 iph->tot_len = htons(frag->len);
631 ip_copy_metadata(frag, skb);
632 iter->offset += skb->len - hlen;
633 iph->frag_off = htons(iter->offset >> 3);
634 if (frag->next)
635 iph->frag_off |= htons(IP_MF);
636 /* Ready, complete checksum */
637 ip_send_check(iph);
638 }
639 EXPORT_SYMBOL(ip_fraglist_prepare);
640
ip_frag_init(struct sk_buff * skb,unsigned int hlen,unsigned int ll_rs,unsigned int mtu,bool DF,struct ip_frag_state * state)641 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
642 unsigned int ll_rs, unsigned int mtu, bool DF,
643 struct ip_frag_state *state)
644 {
645 struct iphdr *iph = ip_hdr(skb);
646
647 state->DF = DF;
648 state->hlen = hlen;
649 state->ll_rs = ll_rs;
650 state->mtu = mtu;
651
652 state->left = skb->len - hlen; /* Space per frame */
653 state->ptr = hlen; /* Where to start from */
654
655 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
656 state->not_last_frag = iph->frag_off & htons(IP_MF);
657 }
658 EXPORT_SYMBOL(ip_frag_init);
659
ip_frag_ipcb(struct sk_buff * from,struct sk_buff * to,bool first_frag)660 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
661 bool first_frag)
662 {
663 /* Copy the flags to each fragment. */
664 IPCB(to)->flags = IPCB(from)->flags;
665
666 /* ANK: dirty, but effective trick. Upgrade options only if
667 * the segment to be fragmented was THE FIRST (otherwise,
668 * options are already fixed) and make it ONCE
669 * on the initial skb, so that all the following fragments
670 * will inherit fixed options.
671 */
672 if (first_frag)
673 ip_options_fragment(from);
674 }
675
ip_frag_next(struct sk_buff * skb,struct ip_frag_state * state)676 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
677 {
678 unsigned int len = state->left;
679 struct sk_buff *skb2;
680 struct iphdr *iph;
681
682 /* IF: it doesn't fit, use 'mtu' - the data space left */
683 if (len > state->mtu)
684 len = state->mtu;
685 /* IF: we are not sending up to and including the packet end
686 then align the next start on an eight byte boundary */
687 if (len < state->left) {
688 len &= ~7;
689 }
690
691 /* Allocate buffer */
692 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
693 if (!skb2)
694 return ERR_PTR(-ENOMEM);
695
696 /*
697 * Set up data on packet
698 */
699
700 ip_copy_metadata(skb2, skb);
701 skb_reserve(skb2, state->ll_rs);
702 skb_put(skb2, len + state->hlen);
703 skb_reset_network_header(skb2);
704 skb2->transport_header = skb2->network_header + state->hlen;
705
706 /*
707 * Charge the memory for the fragment to any owner
708 * it might possess
709 */
710
711 if (skb->sk)
712 skb_set_owner_w(skb2, skb->sk);
713
714 /*
715 * Copy the packet header into the new buffer.
716 */
717
718 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
719
720 /*
721 * Copy a block of the IP datagram.
722 */
723 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
724 BUG();
725 state->left -= len;
726
727 /*
728 * Fill in the new header fields.
729 */
730 iph = ip_hdr(skb2);
731 iph->frag_off = htons((state->offset >> 3));
732 if (state->DF)
733 iph->frag_off |= htons(IP_DF);
734
735 /*
736 * Added AC : If we are fragmenting a fragment that's not the
737 * last fragment then keep MF on each bit
738 */
739 if (state->left > 0 || state->not_last_frag)
740 iph->frag_off |= htons(IP_MF);
741 state->ptr += len;
742 state->offset += len;
743
744 iph->tot_len = htons(len + state->hlen);
745
746 ip_send_check(iph);
747
748 return skb2;
749 }
750 EXPORT_SYMBOL(ip_frag_next);
751
752 /*
753 * This IP datagram is too large to be sent in one piece. Break it up into
754 * smaller pieces (each of size equal to IP header plus
755 * a block of the data of the original IP data part) that will yet fit in a
756 * single device frame, and queue such a frame for sending.
757 */
758
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))759 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
760 int (*output)(struct net *, struct sock *, struct sk_buff *))
761 {
762 struct iphdr *iph;
763 struct sk_buff *skb2;
764 u8 tstamp_type = skb->tstamp_type;
765 struct rtable *rt = skb_rtable(skb);
766 unsigned int mtu, hlen, ll_rs;
767 struct ip_fraglist_iter iter;
768 ktime_t tstamp = skb->tstamp;
769 struct ip_frag_state state;
770 int err = 0;
771
772 /* for offloaded checksums cleanup checksum before fragmentation */
773 if (skb->ip_summed == CHECKSUM_PARTIAL &&
774 (err = skb_checksum_help(skb)))
775 goto fail;
776
777 /*
778 * Point into the IP datagram header.
779 */
780
781 iph = ip_hdr(skb);
782
783 mtu = ip_skb_dst_mtu(sk, skb);
784 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
785 mtu = IPCB(skb)->frag_max_size;
786
787 /*
788 * Setup starting values.
789 */
790
791 hlen = iph->ihl * 4;
792 mtu = mtu - hlen; /* Size of data space */
793 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
794 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
795
796 /* When frag_list is given, use it. First, check its validity:
797 * some transformers could create wrong frag_list or break existing
798 * one, it is not prohibited. In this case fall back to copying.
799 *
800 * LATER: this step can be merged to real generation of fragments,
801 * we can switch to copy when see the first bad fragment.
802 */
803 if (skb_has_frag_list(skb)) {
804 struct sk_buff *frag, *frag2;
805 unsigned int first_len = skb_pagelen(skb);
806
807 if (first_len - hlen > mtu ||
808 ((first_len - hlen) & 7) ||
809 ip_is_fragment(iph) ||
810 skb_cloned(skb) ||
811 skb_headroom(skb) < ll_rs)
812 goto slow_path;
813
814 skb_walk_frags(skb, frag) {
815 /* Correct geometry. */
816 if (frag->len > mtu ||
817 ((frag->len & 7) && frag->next) ||
818 skb_headroom(frag) < hlen + ll_rs)
819 goto slow_path_clean;
820
821 /* Partially cloned skb? */
822 if (skb_shared(frag))
823 goto slow_path_clean;
824
825 BUG_ON(frag->sk);
826 if (skb->sk) {
827 frag->sk = skb->sk;
828 frag->destructor = sock_wfree;
829 }
830 skb->truesize -= frag->truesize;
831 }
832
833 /* Everything is OK. Generate! */
834 ip_fraglist_init(skb, iph, hlen, &iter);
835
836 for (;;) {
837 /* Prepare header of the next frame,
838 * before previous one went down. */
839 if (iter.frag) {
840 bool first_frag = (iter.offset == 0);
841
842 IPCB(iter.frag)->flags = IPCB(skb)->flags;
843 ip_fraglist_prepare(skb, &iter);
844 if (first_frag && IPCB(skb)->opt.optlen) {
845 /* ipcb->opt is not populated for frags
846 * coming from __ip_make_skb(),
847 * ip_options_fragment() needs optlen
848 */
849 IPCB(iter.frag)->opt.optlen =
850 IPCB(skb)->opt.optlen;
851 ip_options_fragment(iter.frag);
852 ip_send_check(iter.iph);
853 }
854 }
855
856 skb_set_delivery_time(skb, tstamp, tstamp_type);
857 err = output(net, sk, skb);
858
859 if (!err)
860 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
861 if (err || !iter.frag)
862 break;
863
864 skb = ip_fraglist_next(&iter);
865 }
866
867 if (err == 0) {
868 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
869 return 0;
870 }
871
872 kfree_skb_list(iter.frag);
873
874 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
875 return err;
876
877 slow_path_clean:
878 skb_walk_frags(skb, frag2) {
879 if (frag2 == frag)
880 break;
881 frag2->sk = NULL;
882 frag2->destructor = NULL;
883 skb->truesize += frag2->truesize;
884 }
885 }
886
887 slow_path:
888 /*
889 * Fragment the datagram.
890 */
891
892 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
893 &state);
894
895 /*
896 * Keep copying data until we run out.
897 */
898
899 while (state.left > 0) {
900 bool first_frag = (state.offset == 0);
901
902 skb2 = ip_frag_next(skb, &state);
903 if (IS_ERR(skb2)) {
904 err = PTR_ERR(skb2);
905 goto fail;
906 }
907 ip_frag_ipcb(skb, skb2, first_frag);
908
909 /*
910 * Put this fragment into the sending queue.
911 */
912 skb_set_delivery_time(skb2, tstamp, tstamp_type);
913 err = output(net, sk, skb2);
914 if (err)
915 goto fail;
916
917 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
918 }
919 consume_skb(skb);
920 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
921 return err;
922
923 fail:
924 kfree_skb(skb);
925 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
926 return err;
927 }
928 EXPORT_SYMBOL(ip_do_fragment);
929
930 int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)931 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
932 {
933 struct msghdr *msg = from;
934
935 if (skb->ip_summed == CHECKSUM_PARTIAL) {
936 if (!copy_from_iter_full(to, len, &msg->msg_iter))
937 return -EFAULT;
938 } else {
939 __wsum csum = 0;
940 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
941 return -EFAULT;
942 skb->csum = csum_block_add(skb->csum, csum, odd);
943 }
944 return 0;
945 }
946 EXPORT_SYMBOL(ip_generic_getfrag);
947
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)948 static int __ip_append_data(struct sock *sk,
949 struct flowi4 *fl4,
950 struct sk_buff_head *queue,
951 struct inet_cork *cork,
952 struct page_frag *pfrag,
953 int getfrag(void *from, char *to, int offset,
954 int len, int odd, struct sk_buff *skb),
955 void *from, int length, int transhdrlen,
956 unsigned int flags)
957 {
958 struct inet_sock *inet = inet_sk(sk);
959 struct ubuf_info *uarg = NULL;
960 struct sk_buff *skb;
961 struct ip_options *opt = cork->opt;
962 int hh_len;
963 int exthdrlen;
964 int mtu;
965 int copy;
966 int err;
967 int offset = 0;
968 bool zc = false;
969 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
970 int csummode = CHECKSUM_NONE;
971 struct rtable *rt = dst_rtable(cork->dst);
972 bool paged, hold_tskey = false, extra_uref = false;
973 unsigned int wmem_alloc_delta = 0;
974 u32 tskey = 0;
975
976 skb = skb_peek_tail(queue);
977
978 exthdrlen = !skb ? rt->dst.header_len : 0;
979 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
980 paged = !!cork->gso_size;
981
982 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
983
984 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
985 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
986 maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
987
988 if (cork->length + length > maxnonfragsize - fragheaderlen) {
989 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
990 mtu - (opt ? opt->optlen : 0));
991 return -EMSGSIZE;
992 }
993
994 /*
995 * transhdrlen > 0 means that this is the first fragment and we wish
996 * it won't be fragmented in the future.
997 */
998 if (transhdrlen &&
999 length + fragheaderlen <= mtu &&
1000 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1001 (!(flags & MSG_MORE) || cork->gso_size) &&
1002 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1003 csummode = CHECKSUM_PARTIAL;
1004
1005 if ((flags & MSG_ZEROCOPY) && length) {
1006 struct msghdr *msg = from;
1007
1008 if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1009 if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1010 return -EINVAL;
1011
1012 /* Leave uarg NULL if can't zerocopy, callers should
1013 * be able to handle it.
1014 */
1015 if ((rt->dst.dev->features & NETIF_F_SG) &&
1016 csummode == CHECKSUM_PARTIAL) {
1017 paged = true;
1018 zc = true;
1019 uarg = msg->msg_ubuf;
1020 }
1021 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1022 uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb),
1023 false);
1024 if (!uarg)
1025 return -ENOBUFS;
1026 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1027 if (rt->dst.dev->features & NETIF_F_SG &&
1028 csummode == CHECKSUM_PARTIAL) {
1029 paged = true;
1030 zc = true;
1031 } else {
1032 uarg_to_msgzc(uarg)->zerocopy = 0;
1033 skb_zcopy_set(skb, uarg, &extra_uref);
1034 }
1035 }
1036 } else if ((flags & MSG_SPLICE_PAGES) && length) {
1037 if (inet_test_bit(HDRINCL, sk))
1038 return -EPERM;
1039 if (rt->dst.dev->features & NETIF_F_SG &&
1040 getfrag == ip_generic_getfrag)
1041 /* We need an empty buffer to attach stuff to */
1042 paged = true;
1043 else
1044 flags &= ~MSG_SPLICE_PAGES;
1045 }
1046
1047 cork->length += length;
1048
1049 if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
1050 READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
1051 if (cork->flags & IPCORK_TS_OPT_ID) {
1052 tskey = cork->ts_opt_id;
1053 } else {
1054 tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1055 hold_tskey = true;
1056 }
1057 }
1058
1059 /* So, what's going on in the loop below?
1060 *
1061 * We use calculated fragment length to generate chained skb,
1062 * each of segments is IP fragment ready for sending to network after
1063 * adding appropriate IP header.
1064 */
1065
1066 if (!skb)
1067 goto alloc_new_skb;
1068
1069 while (length > 0) {
1070 /* Check if the remaining data fits into current packet. */
1071 copy = mtu - skb->len;
1072 if (copy < length)
1073 copy = maxfraglen - skb->len;
1074 if (copy <= 0) {
1075 char *data;
1076 unsigned int datalen;
1077 unsigned int fraglen;
1078 unsigned int fraggap;
1079 unsigned int alloclen, alloc_extra;
1080 unsigned int pagedlen;
1081 struct sk_buff *skb_prev;
1082 alloc_new_skb:
1083 skb_prev = skb;
1084 if (skb_prev)
1085 fraggap = skb_prev->len - maxfraglen;
1086 else
1087 fraggap = 0;
1088
1089 /*
1090 * If remaining data exceeds the mtu,
1091 * we know we need more fragment(s).
1092 */
1093 datalen = length + fraggap;
1094 if (datalen > mtu - fragheaderlen)
1095 datalen = maxfraglen - fragheaderlen;
1096 fraglen = datalen + fragheaderlen;
1097 pagedlen = 0;
1098
1099 alloc_extra = hh_len + 15;
1100 alloc_extra += exthdrlen;
1101
1102 /* The last fragment gets additional space at tail.
1103 * Note, with MSG_MORE we overallocate on fragments,
1104 * because we have no idea what fragment will be
1105 * the last.
1106 */
1107 if (datalen == length + fraggap)
1108 alloc_extra += rt->dst.trailer_len;
1109
1110 if ((flags & MSG_MORE) &&
1111 !(rt->dst.dev->features&NETIF_F_SG))
1112 alloclen = mtu;
1113 else if (!paged &&
1114 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1115 !(rt->dst.dev->features & NETIF_F_SG)))
1116 alloclen = fraglen;
1117 else {
1118 alloclen = fragheaderlen + transhdrlen;
1119 pagedlen = datalen - transhdrlen;
1120 }
1121
1122 alloclen += alloc_extra;
1123
1124 if (transhdrlen) {
1125 skb = sock_alloc_send_skb(sk, alloclen,
1126 (flags & MSG_DONTWAIT), &err);
1127 } else {
1128 skb = NULL;
1129 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1130 2 * sk->sk_sndbuf)
1131 skb = alloc_skb(alloclen,
1132 sk->sk_allocation);
1133 if (unlikely(!skb))
1134 err = -ENOBUFS;
1135 }
1136 if (!skb)
1137 goto error;
1138
1139 /*
1140 * Fill in the control structures
1141 */
1142 skb->ip_summed = csummode;
1143 skb->csum = 0;
1144 skb_reserve(skb, hh_len);
1145
1146 /*
1147 * Find where to start putting bytes.
1148 */
1149 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1150 skb_set_network_header(skb, exthdrlen);
1151 skb->transport_header = (skb->network_header +
1152 fragheaderlen);
1153 data += fragheaderlen + exthdrlen;
1154
1155 if (fraggap) {
1156 skb->csum = skb_copy_and_csum_bits(
1157 skb_prev, maxfraglen,
1158 data + transhdrlen, fraggap);
1159 skb_prev->csum = csum_sub(skb_prev->csum,
1160 skb->csum);
1161 data += fraggap;
1162 pskb_trim_unique(skb_prev, maxfraglen);
1163 }
1164
1165 copy = datalen - transhdrlen - fraggap - pagedlen;
1166 /* [!] NOTE: copy will be negative if pagedlen>0
1167 * because then the equation reduces to -fraggap.
1168 */
1169 if (copy > 0 &&
1170 INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
1171 from, data + transhdrlen, offset,
1172 copy, fraggap, skb) < 0) {
1173 err = -EFAULT;
1174 kfree_skb(skb);
1175 goto error;
1176 } else if (flags & MSG_SPLICE_PAGES) {
1177 copy = 0;
1178 }
1179
1180 offset += copy;
1181 length -= copy + transhdrlen;
1182 transhdrlen = 0;
1183 exthdrlen = 0;
1184 csummode = CHECKSUM_NONE;
1185
1186 /* only the initial fragment is time stamped */
1187 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1188 cork->tx_flags = 0;
1189 skb_shinfo(skb)->tskey = tskey;
1190 tskey = 0;
1191 skb_zcopy_set(skb, uarg, &extra_uref);
1192
1193 if ((flags & MSG_CONFIRM) && !skb_prev)
1194 skb_set_dst_pending_confirm(skb, 1);
1195
1196 /*
1197 * Put the packet on the pending queue.
1198 */
1199 if (!skb->destructor) {
1200 skb->destructor = sock_wfree;
1201 skb->sk = sk;
1202 wmem_alloc_delta += skb->truesize;
1203 }
1204 __skb_queue_tail(queue, skb);
1205 continue;
1206 }
1207
1208 if (copy > length)
1209 copy = length;
1210
1211 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1212 skb_tailroom(skb) >= copy) {
1213 unsigned int off;
1214
1215 off = skb->len;
1216 if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
1217 from, skb_put(skb, copy),
1218 offset, copy, off, skb) < 0) {
1219 __skb_trim(skb, off);
1220 err = -EFAULT;
1221 goto error;
1222 }
1223 } else if (flags & MSG_SPLICE_PAGES) {
1224 struct msghdr *msg = from;
1225
1226 err = -EIO;
1227 if (WARN_ON_ONCE(copy > msg->msg_iter.count))
1228 goto error;
1229
1230 err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
1231 if (err < 0)
1232 goto error;
1233 copy = err;
1234 wmem_alloc_delta += copy;
1235 } else if (!zc) {
1236 int i = skb_shinfo(skb)->nr_frags;
1237
1238 err = -ENOMEM;
1239 if (!sk_page_frag_refill(sk, pfrag))
1240 goto error;
1241
1242 skb_zcopy_downgrade_managed(skb);
1243 if (!skb_can_coalesce(skb, i, pfrag->page,
1244 pfrag->offset)) {
1245 err = -EMSGSIZE;
1246 if (i == MAX_SKB_FRAGS)
1247 goto error;
1248
1249 __skb_fill_page_desc(skb, i, pfrag->page,
1250 pfrag->offset, 0);
1251 skb_shinfo(skb)->nr_frags = ++i;
1252 get_page(pfrag->page);
1253 }
1254 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1255 if (INDIRECT_CALL_1(getfrag, ip_generic_getfrag,
1256 from,
1257 page_address(pfrag->page) + pfrag->offset,
1258 offset, copy, skb->len, skb) < 0)
1259 goto error_efault;
1260
1261 pfrag->offset += copy;
1262 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1263 skb_len_add(skb, copy);
1264 wmem_alloc_delta += copy;
1265 } else {
1266 err = skb_zerocopy_iter_dgram(skb, from, copy);
1267 if (err < 0)
1268 goto error;
1269 }
1270 offset += copy;
1271 length -= copy;
1272 }
1273
1274 if (wmem_alloc_delta)
1275 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1276 return 0;
1277
1278 error_efault:
1279 err = -EFAULT;
1280 error:
1281 net_zcopy_put_abort(uarg, extra_uref);
1282 cork->length -= length;
1283 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1284 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1285 if (hold_tskey)
1286 atomic_dec(&sk->sk_tskey);
1287 return err;
1288 }
1289
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)1290 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1291 struct ipcm_cookie *ipc, struct rtable **rtp)
1292 {
1293 struct ip_options_rcu *opt;
1294 struct rtable *rt;
1295
1296 rt = *rtp;
1297 if (unlikely(!rt))
1298 return -EFAULT;
1299
1300 cork->fragsize = ip_sk_use_pmtu(sk) ?
1301 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1302
1303 if (!inetdev_valid_mtu(cork->fragsize))
1304 return -ENETUNREACH;
1305
1306 /*
1307 * setup for corking.
1308 */
1309 opt = ipc->opt;
1310 if (opt) {
1311 if (!cork->opt) {
1312 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1313 sk->sk_allocation);
1314 if (unlikely(!cork->opt))
1315 return -ENOBUFS;
1316 }
1317 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1318 cork->flags |= IPCORK_OPT;
1319 cork->addr = ipc->addr;
1320 }
1321
1322 cork->gso_size = ipc->gso_size;
1323
1324 cork->dst = &rt->dst;
1325 /* We stole this route, caller should not release it. */
1326 *rtp = NULL;
1327
1328 cork->length = 0;
1329 cork->ttl = ipc->ttl;
1330 cork->tos = ipc->tos;
1331 cork->mark = ipc->sockc.mark;
1332 cork->priority = ipc->sockc.priority;
1333 cork->transmit_time = ipc->sockc.transmit_time;
1334 cork->tx_flags = 0;
1335 sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags);
1336 if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) {
1337 cork->flags |= IPCORK_TS_OPT_ID;
1338 cork->ts_opt_id = ipc->sockc.ts_opt_id;
1339 }
1340
1341 return 0;
1342 }
1343
1344 /*
1345 * ip_append_data() can make one large IP datagram from many pieces of
1346 * data. Each piece will be held on the socket until
1347 * ip_push_pending_frames() is called. Each piece can be a page or
1348 * non-page data.
1349 *
1350 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1351 * this interface potentially.
1352 *
1353 * LATER: length must be adjusted by pad at tail, when it is required.
1354 */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1355 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1356 int getfrag(void *from, char *to, int offset, int len,
1357 int odd, struct sk_buff *skb),
1358 void *from, int length, int transhdrlen,
1359 struct ipcm_cookie *ipc, struct rtable **rtp,
1360 unsigned int flags)
1361 {
1362 struct inet_sock *inet = inet_sk(sk);
1363 int err;
1364
1365 if (flags&MSG_PROBE)
1366 return 0;
1367
1368 if (skb_queue_empty(&sk->sk_write_queue)) {
1369 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1370 if (err)
1371 return err;
1372 } else {
1373 transhdrlen = 0;
1374 }
1375
1376 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1377 sk_page_frag(sk), getfrag,
1378 from, length, transhdrlen, flags);
1379 }
1380
ip_cork_release(struct inet_cork * cork)1381 static void ip_cork_release(struct inet_cork *cork)
1382 {
1383 cork->flags &= ~IPCORK_OPT;
1384 kfree(cork->opt);
1385 cork->opt = NULL;
1386 dst_release(cork->dst);
1387 cork->dst = NULL;
1388 }
1389
1390 /*
1391 * Combined all pending IP fragments on the socket as one IP datagram
1392 * and push them out.
1393 */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)1394 struct sk_buff *__ip_make_skb(struct sock *sk,
1395 struct flowi4 *fl4,
1396 struct sk_buff_head *queue,
1397 struct inet_cork *cork)
1398 {
1399 struct sk_buff *skb, *tmp_skb;
1400 struct sk_buff **tail_skb;
1401 struct inet_sock *inet = inet_sk(sk);
1402 struct net *net = sock_net(sk);
1403 struct ip_options *opt = NULL;
1404 struct rtable *rt = dst_rtable(cork->dst);
1405 struct iphdr *iph;
1406 u8 pmtudisc, ttl;
1407 __be16 df = 0;
1408
1409 skb = __skb_dequeue(queue);
1410 if (!skb)
1411 goto out;
1412 tail_skb = &(skb_shinfo(skb)->frag_list);
1413
1414 /* move skb->data to ip header from ext header */
1415 if (skb->data < skb_network_header(skb))
1416 __skb_pull(skb, skb_network_offset(skb));
1417 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1418 __skb_pull(tmp_skb, skb_network_header_len(skb));
1419 *tail_skb = tmp_skb;
1420 tail_skb = &(tmp_skb->next);
1421 skb->len += tmp_skb->len;
1422 skb->data_len += tmp_skb->len;
1423 skb->truesize += tmp_skb->truesize;
1424 tmp_skb->destructor = NULL;
1425 tmp_skb->sk = NULL;
1426 }
1427
1428 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1429 * to fragment the frame generated here. No matter, what transforms
1430 * how transforms change size of the packet, it will come out.
1431 */
1432 skb->ignore_df = ip_sk_ignore_df(sk);
1433
1434 /* DF bit is set when we want to see DF on outgoing frames.
1435 * If ignore_df is set too, we still allow to fragment this frame
1436 * locally. */
1437 pmtudisc = READ_ONCE(inet->pmtudisc);
1438 if (pmtudisc == IP_PMTUDISC_DO ||
1439 pmtudisc == IP_PMTUDISC_PROBE ||
1440 (skb->len <= dst_mtu(&rt->dst) &&
1441 ip_dont_fragment(sk, &rt->dst)))
1442 df = htons(IP_DF);
1443
1444 if (cork->flags & IPCORK_OPT)
1445 opt = cork->opt;
1446
1447 if (cork->ttl != 0)
1448 ttl = cork->ttl;
1449 else if (rt->rt_type == RTN_MULTICAST)
1450 ttl = READ_ONCE(inet->mc_ttl);
1451 else
1452 ttl = ip_select_ttl(inet, &rt->dst);
1453
1454 iph = ip_hdr(skb);
1455 iph->version = 4;
1456 iph->ihl = 5;
1457 iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
1458 iph->frag_off = df;
1459 iph->ttl = ttl;
1460 iph->protocol = sk->sk_protocol;
1461 ip_copy_addrs(iph, fl4);
1462 ip_select_ident(net, skb, sk);
1463
1464 if (opt) {
1465 iph->ihl += opt->optlen >> 2;
1466 ip_options_build(skb, opt, cork->addr, rt);
1467 }
1468
1469 skb->priority = cork->priority;
1470 skb->mark = cork->mark;
1471 if (sk_is_tcp(sk))
1472 skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
1473 else
1474 skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
1475 /*
1476 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1477 * on dst refcount
1478 */
1479 cork->dst = NULL;
1480 skb_dst_set(skb, &rt->dst);
1481
1482 if (iph->protocol == IPPROTO_ICMP) {
1483 u8 icmp_type;
1484
1485 /* For such sockets, transhdrlen is zero when do ip_append_data(),
1486 * so icmphdr does not in skb linear region and can not get icmp_type
1487 * by icmp_hdr(skb)->type.
1488 */
1489 if (sk->sk_type == SOCK_RAW &&
1490 !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
1491 icmp_type = fl4->fl4_icmp_type;
1492 else
1493 icmp_type = icmp_hdr(skb)->type;
1494 icmp_out_count(net, icmp_type);
1495 }
1496
1497 ip_cork_release(cork);
1498 out:
1499 return skb;
1500 }
1501
ip_send_skb(struct net * net,struct sk_buff * skb)1502 int ip_send_skb(struct net *net, struct sk_buff *skb)
1503 {
1504 int err;
1505
1506 err = ip_local_out(net, skb->sk, skb);
1507 if (err) {
1508 if (err > 0)
1509 err = net_xmit_errno(err);
1510 if (err)
1511 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1512 }
1513
1514 return err;
1515 }
1516
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)1517 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1518 {
1519 struct sk_buff *skb;
1520
1521 skb = ip_finish_skb(sk, fl4);
1522 if (!skb)
1523 return 0;
1524
1525 /* Netfilter gets whole the not fragmented skb. */
1526 return ip_send_skb(sock_net(sk), skb);
1527 }
1528
1529 /*
1530 * Throw away all pending data on the socket.
1531 */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)1532 static void __ip_flush_pending_frames(struct sock *sk,
1533 struct sk_buff_head *queue,
1534 struct inet_cork *cork)
1535 {
1536 struct sk_buff *skb;
1537
1538 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1539 kfree_skb(skb);
1540
1541 ip_cork_release(cork);
1542 }
1543
ip_flush_pending_frames(struct sock * sk)1544 void ip_flush_pending_frames(struct sock *sk)
1545 {
1546 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1547 }
1548
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,struct inet_cork * cork,unsigned int flags)1549 struct sk_buff *ip_make_skb(struct sock *sk,
1550 struct flowi4 *fl4,
1551 int getfrag(void *from, char *to, int offset,
1552 int len, int odd, struct sk_buff *skb),
1553 void *from, int length, int transhdrlen,
1554 struct ipcm_cookie *ipc, struct rtable **rtp,
1555 struct inet_cork *cork, unsigned int flags)
1556 {
1557 struct sk_buff_head queue;
1558 int err;
1559
1560 if (flags & MSG_PROBE)
1561 return NULL;
1562
1563 __skb_queue_head_init(&queue);
1564
1565 cork->flags = 0;
1566 cork->addr = 0;
1567 cork->opt = NULL;
1568 err = ip_setup_cork(sk, cork, ipc, rtp);
1569 if (err)
1570 return ERR_PTR(err);
1571
1572 err = __ip_append_data(sk, fl4, &queue, cork,
1573 ¤t->task_frag, getfrag,
1574 from, length, transhdrlen, flags);
1575 if (err) {
1576 __ip_flush_pending_frames(sk, &queue, cork);
1577 return ERR_PTR(err);
1578 }
1579
1580 return __ip_make_skb(sk, fl4, &queue, cork);
1581 }
1582
1583 /*
1584 * Fetch data from kernel space and fill in checksum if needed.
1585 */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)1586 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1587 int len, int odd, struct sk_buff *skb)
1588 {
1589 __wsum csum;
1590
1591 csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1592 skb->csum = csum_block_add(skb->csum, csum, odd);
1593 return 0;
1594 }
1595
1596 /*
1597 * Generic function to send a packet as reply to another packet.
1598 * Used to send some TCP resets/acks so far.
1599 */
ip_send_unicast_reply(struct sock * sk,const struct sock * orig_sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len,u64 transmit_time,u32 txhash)1600 void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
1601 struct sk_buff *skb,
1602 const struct ip_options *sopt,
1603 __be32 daddr, __be32 saddr,
1604 const struct ip_reply_arg *arg,
1605 unsigned int len, u64 transmit_time, u32 txhash)
1606 {
1607 struct ip_options_data replyopts;
1608 struct ipcm_cookie ipc;
1609 struct flowi4 fl4;
1610 struct rtable *rt = skb_rtable(skb);
1611 struct net *net = sock_net(sk);
1612 struct sk_buff *nskb;
1613 int err;
1614 int oif;
1615
1616 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1617 return;
1618
1619 ipcm_init(&ipc);
1620 ipc.addr = daddr;
1621 ipc.sockc.transmit_time = transmit_time;
1622
1623 if (replyopts.opt.opt.optlen) {
1624 ipc.opt = &replyopts.opt;
1625
1626 if (replyopts.opt.opt.srr)
1627 daddr = replyopts.opt.opt.faddr;
1628 }
1629
1630 oif = arg->bound_dev_if;
1631 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1632 oif = skb->skb_iif;
1633
1634 flowi4_init_output(&fl4, oif,
1635 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1636 arg->tos & INET_DSCP_MASK,
1637 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1638 ip_reply_arg_flowi_flags(arg),
1639 daddr, saddr,
1640 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1641 arg->uid);
1642 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1643 rt = ip_route_output_flow(net, &fl4, sk);
1644 if (IS_ERR(rt))
1645 return;
1646
1647 inet_sk(sk)->tos = arg->tos;
1648
1649 sk->sk_protocol = ip_hdr(skb)->protocol;
1650 sk->sk_bound_dev_if = arg->bound_dev_if;
1651 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
1652 ipc.sockc.mark = fl4.flowi4_mark;
1653 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1654 len, 0, &ipc, &rt, MSG_DONTWAIT);
1655 if (unlikely(err)) {
1656 ip_flush_pending_frames(sk);
1657 goto out;
1658 }
1659
1660 nskb = skb_peek(&sk->sk_write_queue);
1661 if (nskb) {
1662 if (arg->csumoffset >= 0)
1663 *((__sum16 *)skb_transport_header(nskb) +
1664 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1665 arg->csum));
1666 nskb->ip_summed = CHECKSUM_NONE;
1667 if (orig_sk)
1668 skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
1669 if (transmit_time)
1670 nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
1671 if (txhash)
1672 skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
1673 ip_push_pending_frames(sk, &fl4);
1674 }
1675 out:
1676 ip_rt_put(rt);
1677 }
1678
ip_init(void)1679 void __init ip_init(void)
1680 {
1681 ip_rt_init();
1682 inet_initpeers();
1683
1684 #if defined(CONFIG_IP_MULTICAST)
1685 igmp_mc_init();
1686 #endif
1687 }
1688