xref: /linux/net/ipv4/ip_input.c (revision 3c4fc7bf4c9e66fe71abcbf93f62f4ddb89b7f15)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		The Internet Protocol (IP) module.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Donald Becker, <becker@super.org>
12  *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
13  *		Richard Underwood
14  *		Stefan Becker, <stefanb@yello.ping.de>
15  *		Jorge Cwik, <jorge@laser.satlink.net>
16  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17  *
18  * Fixes:
19  *		Alan Cox	:	Commented a couple of minor bits of surplus code
20  *		Alan Cox	:	Undefining IP_FORWARD doesn't include the code
21  *					(just stops a compiler warning).
22  *		Alan Cox	:	Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23  *					are junked rather than corrupting things.
24  *		Alan Cox	:	Frames to bad broadcast subnets are dumped
25  *					We used to process them non broadcast and
26  *					boy could that cause havoc.
27  *		Alan Cox	:	ip_forward sets the free flag on the
28  *					new frame it queues. Still crap because
29  *					it copies the frame but at least it
30  *					doesn't eat memory too.
31  *		Alan Cox	:	Generic queue code and memory fixes.
32  *		Fred Van Kempen :	IP fragment support (borrowed from NET2E)
33  *		Gerhard Koerting:	Forward fragmented frames correctly.
34  *		Gerhard Koerting: 	Fixes to my fix of the above 8-).
35  *		Gerhard Koerting:	IP interface addressing fix.
36  *		Linus Torvalds	:	More robustness checks
37  *		Alan Cox	:	Even more checks: Still not as robust as it ought to be
38  *		Alan Cox	:	Save IP header pointer for later
39  *		Alan Cox	:	ip option setting
40  *		Alan Cox	:	Use ip_tos/ip_ttl settings
41  *		Alan Cox	:	Fragmentation bogosity removed
42  *					(Thanks to Mark.Bush@prg.ox.ac.uk)
43  *		Dmitry Gorodchanin :	Send of a raw packet crash fix.
44  *		Alan Cox	:	Silly ip bug when an overlength
45  *					fragment turns up. Now frees the
46  *					queue.
47  *		Linus Torvalds/ :	Memory leakage on fragmentation
48  *		Alan Cox	:	handling.
49  *		Gerhard Koerting:	Forwarding uses IP priority hints
50  *		Teemu Rantanen	:	Fragment problems.
51  *		Alan Cox	:	General cleanup, comments and reformat
52  *		Alan Cox	:	SNMP statistics
53  *		Alan Cox	:	BSD address rule semantics. Also see
54  *					UDP as there is a nasty checksum issue
55  *					if you do things the wrong way.
56  *		Alan Cox	:	Always defrag, moved IP_FORWARD to the config.in file
57  *		Alan Cox	: 	IP options adjust sk->priority.
58  *		Pedro Roque	:	Fix mtu/length error in ip_forward.
59  *		Alan Cox	:	Avoid ip_chk_addr when possible.
60  *	Richard Underwood	:	IP multicasting.
61  *		Alan Cox	:	Cleaned up multicast handlers.
62  *		Alan Cox	:	RAW sockets demultiplex in the BSD style.
63  *		Gunther Mayer	:	Fix the SNMP reporting typo
64  *		Alan Cox	:	Always in group 224.0.0.1
65  *	Pauline Middelink	:	Fast ip_checksum update when forwarding
66  *					Masquerading support.
67  *		Alan Cox	:	Multicast loopback error for 224.0.0.1
68  *		Alan Cox	:	IP_MULTICAST_LOOP option.
69  *		Alan Cox	:	Use notifiers.
70  *		Bjorn Ekwall	:	Removed ip_csum (from slhc.c too)
71  *		Bjorn Ekwall	:	Moved ip_fast_csum to ip.h (inline!)
72  *		Stefan Becker   :       Send out ICMP HOST REDIRECT
73  *	Arnt Gulbrandsen	:	ip_build_xmit
74  *		Alan Cox	:	Per socket routing cache
75  *		Alan Cox	:	Fixed routing cache, added header cache.
76  *		Alan Cox	:	Loopback didn't work right in original ip_build_xmit - fixed it.
77  *		Alan Cox	:	Only send ICMP_REDIRECT if src/dest are the same net.
78  *		Alan Cox	:	Incoming IP option handling.
79  *		Alan Cox	:	Set saddr on raw output frames as per BSD.
80  *		Alan Cox	:	Stopped broadcast source route explosions.
81  *		Alan Cox	:	Can disable source routing
82  *		Takeshi Sone    :	Masquerading didn't work.
83  *	Dave Bonn,Alan Cox	:	Faster IP forwarding whenever possible.
84  *		Alan Cox	:	Memory leaks, tramples, misc debugging.
85  *		Alan Cox	:	Fixed multicast (by popular demand 8))
86  *		Alan Cox	:	Fixed forwarding (by even more popular demand 8))
87  *		Alan Cox	:	Fixed SNMP statistics [I think]
88  *	Gerhard Koerting	:	IP fragmentation forwarding fix
89  *		Alan Cox	:	Device lock against page fault.
90  *		Alan Cox	:	IP_HDRINCL facility.
91  *	Werner Almesberger	:	Zero fragment bug
92  *		Alan Cox	:	RAW IP frame length bug
93  *		Alan Cox	:	Outgoing firewall on build_xmit
94  *		A.N.Kuznetsov	:	IP_OPTIONS support throughout the kernel
95  *		Alan Cox	:	Multicast routing hooks
96  *		Jos Vos		:	Do accounting *before* call_in_firewall
97  *	Willy Konynenberg	:	Transparent proxying support
98  *
99  * To Fix:
100  *		IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101  *		and could be made very efficient with the addition of some virtual memory hacks to permit
102  *		the allocation of a buffer that can then be 'grown' by twiddling page tables.
103  *		Output fragmentation wants updating along with the buffer management to use a single
104  *		interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105  *		output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106  *		fragmentation anyway.
107  */
108 
109 #define pr_fmt(fmt) "IPv4: " fmt
110 
111 #include <linux/module.h>
112 #include <linux/types.h>
113 #include <linux/kernel.h>
114 #include <linux/string.h>
115 #include <linux/errno.h>
116 #include <linux/slab.h>
117 
118 #include <linux/net.h>
119 #include <linux/socket.h>
120 #include <linux/sockios.h>
121 #include <linux/in.h>
122 #include <linux/inet.h>
123 #include <linux/inetdevice.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/indirect_call_wrapper.h>
127 
128 #include <net/snmp.h>
129 #include <net/ip.h>
130 #include <net/protocol.h>
131 #include <net/route.h>
132 #include <linux/skbuff.h>
133 #include <net/sock.h>
134 #include <net/arp.h>
135 #include <net/icmp.h>
136 #include <net/raw.h>
137 #include <net/checksum.h>
138 #include <net/inet_ecn.h>
139 #include <linux/netfilter_ipv4.h>
140 #include <net/xfrm.h>
141 #include <linux/mroute.h>
142 #include <linux/netlink.h>
143 #include <net/dst_metadata.h>
144 
145 /*
146  *	Process Router Attention IP option (RFC 2113)
147  */
148 bool ip_call_ra_chain(struct sk_buff *skb)
149 {
150 	struct ip_ra_chain *ra;
151 	u8 protocol = ip_hdr(skb)->protocol;
152 	struct sock *last = NULL;
153 	struct net_device *dev = skb->dev;
154 	struct net *net = dev_net(dev);
155 
156 	for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157 		struct sock *sk = ra->sk;
158 
159 		/* If socket is bound to an interface, only report
160 		 * the packet if it came  from that interface.
161 		 */
162 		if (sk && inet_sk(sk)->inet_num == protocol &&
163 		    (!sk->sk_bound_dev_if ||
164 		     sk->sk_bound_dev_if == dev->ifindex)) {
165 			if (ip_is_fragment(ip_hdr(skb))) {
166 				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
167 					return true;
168 			}
169 			if (last) {
170 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171 				if (skb2)
172 					raw_rcv(last, skb2);
173 			}
174 			last = sk;
175 		}
176 	}
177 
178 	if (last) {
179 		raw_rcv(last, skb);
180 		return true;
181 	}
182 	return false;
183 }
184 
185 INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186 INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188 {
189 	const struct net_protocol *ipprot;
190 	int raw, ret;
191 
192 resubmit:
193 	raw = raw_local_deliver(skb, protocol);
194 
195 	ipprot = rcu_dereference(inet_protos[protocol]);
196 	if (ipprot) {
197 		if (!ipprot->no_policy) {
198 			if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199 				kfree_skb_reason(skb,
200 						 SKB_DROP_REASON_XFRM_POLICY);
201 				return;
202 			}
203 			nf_reset_ct(skb);
204 		}
205 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
206 				      skb);
207 		if (ret < 0) {
208 			protocol = -ret;
209 			goto resubmit;
210 		}
211 		__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
212 	} else {
213 		if (!raw) {
214 			if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
215 				__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
216 				icmp_send(skb, ICMP_DEST_UNREACH,
217 					  ICMP_PROT_UNREACH, 0);
218 			}
219 			kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO);
220 		} else {
221 			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
222 			consume_skb(skb);
223 		}
224 	}
225 }
226 
227 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
228 {
229 	skb_clear_delivery_time(skb);
230 	__skb_pull(skb, skb_network_header_len(skb));
231 
232 	rcu_read_lock();
233 	ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
234 	rcu_read_unlock();
235 
236 	return 0;
237 }
238 
239 /*
240  * 	Deliver IP Packets to the higher protocol layers.
241  */
242 int ip_local_deliver(struct sk_buff *skb)
243 {
244 	/*
245 	 *	Reassemble IP fragments.
246 	 */
247 	struct net *net = dev_net(skb->dev);
248 
249 	if (ip_is_fragment(ip_hdr(skb))) {
250 		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
251 			return 0;
252 	}
253 
254 	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
255 		       net, NULL, skb, skb->dev, NULL,
256 		       ip_local_deliver_finish);
257 }
258 EXPORT_SYMBOL(ip_local_deliver);
259 
260 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
261 {
262 	struct ip_options *opt;
263 	const struct iphdr *iph;
264 
265 	/* It looks as overkill, because not all
266 	   IP options require packet mangling.
267 	   But it is the easiest for now, especially taking
268 	   into account that combination of IP options
269 	   and running sniffer is extremely rare condition.
270 					      --ANK (980813)
271 	*/
272 	if (skb_cow(skb, skb_headroom(skb))) {
273 		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
274 		goto drop;
275 	}
276 
277 	iph = ip_hdr(skb);
278 	opt = &(IPCB(skb)->opt);
279 	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
280 
281 	if (ip_options_compile(dev_net(dev), opt, skb)) {
282 		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
283 		goto drop;
284 	}
285 
286 	if (unlikely(opt->srr)) {
287 		struct in_device *in_dev = __in_dev_get_rcu(dev);
288 
289 		if (in_dev) {
290 			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
291 				if (IN_DEV_LOG_MARTIANS(in_dev))
292 					net_info_ratelimited("source route option %pI4 -> %pI4\n",
293 							     &iph->saddr,
294 							     &iph->daddr);
295 				goto drop;
296 			}
297 		}
298 
299 		if (ip_options_rcv_srr(skb, dev))
300 			goto drop;
301 	}
302 
303 	return false;
304 drop:
305 	return true;
306 }
307 
308 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
309 			    const struct sk_buff *hint)
310 {
311 	return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
312 	       ip_hdr(hint)->tos == iph->tos;
313 }
314 
315 int tcp_v4_early_demux(struct sk_buff *skb);
316 int udp_v4_early_demux(struct sk_buff *skb);
317 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
318 			      struct sk_buff *skb, struct net_device *dev,
319 			      const struct sk_buff *hint)
320 {
321 	const struct iphdr *iph = ip_hdr(skb);
322 	int err, drop_reason;
323 	struct rtable *rt;
324 
325 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
326 
327 	if (ip_can_use_hint(skb, iph, hint)) {
328 		err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
329 					dev, hint);
330 		if (unlikely(err))
331 			goto drop_error;
332 	}
333 
334 	if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
335 	    !skb_dst(skb) &&
336 	    !skb->sk &&
337 	    !ip_is_fragment(iph)) {
338 		switch (iph->protocol) {
339 		case IPPROTO_TCP:
340 			if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
341 				tcp_v4_early_demux(skb);
342 
343 				/* must reload iph, skb->head might have changed */
344 				iph = ip_hdr(skb);
345 			}
346 			break;
347 		case IPPROTO_UDP:
348 			if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
349 				err = udp_v4_early_demux(skb);
350 				if (unlikely(err))
351 					goto drop_error;
352 
353 				/* must reload iph, skb->head might have changed */
354 				iph = ip_hdr(skb);
355 			}
356 			break;
357 		}
358 	}
359 
360 	/*
361 	 *	Initialise the virtual path cache for the packet. It describes
362 	 *	how the packet travels inside Linux networking.
363 	 */
364 	if (!skb_valid_dst(skb)) {
365 		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
366 					   iph->tos, dev);
367 		if (unlikely(err))
368 			goto drop_error;
369 	}
370 
371 #ifdef CONFIG_IP_ROUTE_CLASSID
372 	if (unlikely(skb_dst(skb)->tclassid)) {
373 		struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
374 		u32 idx = skb_dst(skb)->tclassid;
375 		st[idx&0xFF].o_packets++;
376 		st[idx&0xFF].o_bytes += skb->len;
377 		st[(idx>>16)&0xFF].i_packets++;
378 		st[(idx>>16)&0xFF].i_bytes += skb->len;
379 	}
380 #endif
381 
382 	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
383 		goto drop;
384 
385 	rt = skb_rtable(skb);
386 	if (rt->rt_type == RTN_MULTICAST) {
387 		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
388 	} else if (rt->rt_type == RTN_BROADCAST) {
389 		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
390 	} else if (skb->pkt_type == PACKET_BROADCAST ||
391 		   skb->pkt_type == PACKET_MULTICAST) {
392 		struct in_device *in_dev = __in_dev_get_rcu(dev);
393 
394 		/* RFC 1122 3.3.6:
395 		 *
396 		 *   When a host sends a datagram to a link-layer broadcast
397 		 *   address, the IP destination address MUST be a legal IP
398 		 *   broadcast or IP multicast address.
399 		 *
400 		 *   A host SHOULD silently discard a datagram that is received
401 		 *   via a link-layer broadcast (see Section 2.4) but does not
402 		 *   specify an IP multicast or broadcast destination address.
403 		 *
404 		 * This doesn't explicitly say L2 *broadcast*, but broadcast is
405 		 * in a way a form of multicast and the most common use case for
406 		 * this is 802.11 protecting against cross-station spoofing (the
407 		 * so-called "hole-196" attack) so do it for both.
408 		 */
409 		if (in_dev &&
410 		    IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) {
411 			drop_reason = SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST;
412 			goto drop;
413 		}
414 	}
415 
416 	return NET_RX_SUCCESS;
417 
418 drop:
419 	kfree_skb_reason(skb, drop_reason);
420 	return NET_RX_DROP;
421 
422 drop_error:
423 	if (err == -EXDEV) {
424 		drop_reason = SKB_DROP_REASON_IP_RPFILTER;
425 		__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
426 	}
427 	goto drop;
428 }
429 
430 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
431 {
432 	struct net_device *dev = skb->dev;
433 	int ret;
434 
435 	/* if ingress device is enslaved to an L3 master device pass the
436 	 * skb to its handler for processing
437 	 */
438 	skb = l3mdev_ip_rcv(skb);
439 	if (!skb)
440 		return NET_RX_SUCCESS;
441 
442 	ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
443 	if (ret != NET_RX_DROP)
444 		ret = dst_input(skb);
445 	return ret;
446 }
447 
448 /*
449  * 	Main IP Receive routine.
450  */
451 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
452 {
453 	const struct iphdr *iph;
454 	int drop_reason;
455 	u32 len;
456 
457 	/* When the interface is in promisc. mode, drop all the crap
458 	 * that it receives, do not try to analyse it.
459 	 */
460 	if (skb->pkt_type == PACKET_OTHERHOST) {
461 		dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
462 		drop_reason = SKB_DROP_REASON_OTHERHOST;
463 		goto drop;
464 	}
465 
466 	__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
467 
468 	skb = skb_share_check(skb, GFP_ATOMIC);
469 	if (!skb) {
470 		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
471 		goto out;
472 	}
473 
474 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
475 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
476 		goto inhdr_error;
477 
478 	iph = ip_hdr(skb);
479 
480 	/*
481 	 *	RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
482 	 *
483 	 *	Is the datagram acceptable?
484 	 *
485 	 *	1.	Length at least the size of an ip header
486 	 *	2.	Version of 4
487 	 *	3.	Checksums correctly. [Speed optimisation for later, skip loopback checksums]
488 	 *	4.	Doesn't have a bogus length
489 	 */
490 
491 	if (iph->ihl < 5 || iph->version != 4)
492 		goto inhdr_error;
493 
494 	BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
495 	BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
496 	BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
497 	__IP_ADD_STATS(net,
498 		       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
499 		       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
500 
501 	if (!pskb_may_pull(skb, iph->ihl*4))
502 		goto inhdr_error;
503 
504 	iph = ip_hdr(skb);
505 
506 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
507 		goto csum_error;
508 
509 	len = ntohs(iph->tot_len);
510 	if (skb->len < len) {
511 		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
512 		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
513 		goto drop;
514 	} else if (len < (iph->ihl*4))
515 		goto inhdr_error;
516 
517 	/* Our transport medium may have padded the buffer out. Now we know it
518 	 * is IP we can trim to the true length of the frame.
519 	 * Note this now means skb->len holds ntohs(iph->tot_len).
520 	 */
521 	if (pskb_trim_rcsum(skb, len)) {
522 		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
523 		goto drop;
524 	}
525 
526 	iph = ip_hdr(skb);
527 	skb->transport_header = skb->network_header + iph->ihl*4;
528 
529 	/* Remove any debris in the socket control block */
530 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
531 	IPCB(skb)->iif = skb->skb_iif;
532 
533 	/* Must drop socket now because of tproxy. */
534 	if (!skb_sk_is_prefetched(skb))
535 		skb_orphan(skb);
536 
537 	return skb;
538 
539 csum_error:
540 	drop_reason = SKB_DROP_REASON_IP_CSUM;
541 	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
542 inhdr_error:
543 	if (drop_reason == SKB_DROP_REASON_NOT_SPECIFIED)
544 		drop_reason = SKB_DROP_REASON_IP_INHDR;
545 	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
546 drop:
547 	kfree_skb_reason(skb, drop_reason);
548 out:
549 	return NULL;
550 }
551 
552 /*
553  * IP receive entry point
554  */
555 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
556 	   struct net_device *orig_dev)
557 {
558 	struct net *net = dev_net(dev);
559 
560 	skb = ip_rcv_core(skb, net);
561 	if (skb == NULL)
562 		return NET_RX_DROP;
563 
564 	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
565 		       net, NULL, skb, dev, NULL,
566 		       ip_rcv_finish);
567 }
568 
569 static void ip_sublist_rcv_finish(struct list_head *head)
570 {
571 	struct sk_buff *skb, *next;
572 
573 	list_for_each_entry_safe(skb, next, head, list) {
574 		skb_list_del_init(skb);
575 		dst_input(skb);
576 	}
577 }
578 
579 static struct sk_buff *ip_extract_route_hint(const struct net *net,
580 					     struct sk_buff *skb, int rt_type)
581 {
582 	if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
583 		return NULL;
584 
585 	return skb;
586 }
587 
588 static void ip_list_rcv_finish(struct net *net, struct sock *sk,
589 			       struct list_head *head)
590 {
591 	struct sk_buff *skb, *next, *hint = NULL;
592 	struct dst_entry *curr_dst = NULL;
593 	struct list_head sublist;
594 
595 	INIT_LIST_HEAD(&sublist);
596 	list_for_each_entry_safe(skb, next, head, list) {
597 		struct net_device *dev = skb->dev;
598 		struct dst_entry *dst;
599 
600 		skb_list_del_init(skb);
601 		/* if ingress device is enslaved to an L3 master device pass the
602 		 * skb to its handler for processing
603 		 */
604 		skb = l3mdev_ip_rcv(skb);
605 		if (!skb)
606 			continue;
607 		if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
608 			continue;
609 
610 		dst = skb_dst(skb);
611 		if (curr_dst != dst) {
612 			hint = ip_extract_route_hint(net, skb,
613 					       ((struct rtable *)dst)->rt_type);
614 
615 			/* dispatch old sublist */
616 			if (!list_empty(&sublist))
617 				ip_sublist_rcv_finish(&sublist);
618 			/* start new sublist */
619 			INIT_LIST_HEAD(&sublist);
620 			curr_dst = dst;
621 		}
622 		list_add_tail(&skb->list, &sublist);
623 	}
624 	/* dispatch final sublist */
625 	ip_sublist_rcv_finish(&sublist);
626 }
627 
628 static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
629 			   struct net *net)
630 {
631 	NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
632 		     head, dev, NULL, ip_rcv_finish);
633 	ip_list_rcv_finish(net, NULL, head);
634 }
635 
636 /* Receive a list of IP packets */
637 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
638 		 struct net_device *orig_dev)
639 {
640 	struct net_device *curr_dev = NULL;
641 	struct net *curr_net = NULL;
642 	struct sk_buff *skb, *next;
643 	struct list_head sublist;
644 
645 	INIT_LIST_HEAD(&sublist);
646 	list_for_each_entry_safe(skb, next, head, list) {
647 		struct net_device *dev = skb->dev;
648 		struct net *net = dev_net(dev);
649 
650 		skb_list_del_init(skb);
651 		skb = ip_rcv_core(skb, net);
652 		if (skb == NULL)
653 			continue;
654 
655 		if (curr_dev != dev || curr_net != net) {
656 			/* dispatch old sublist */
657 			if (!list_empty(&sublist))
658 				ip_sublist_rcv(&sublist, curr_dev, curr_net);
659 			/* start new sublist */
660 			INIT_LIST_HEAD(&sublist);
661 			curr_dev = dev;
662 			curr_net = net;
663 		}
664 		list_add_tail(&skb->list, &sublist);
665 	}
666 	/* dispatch final sublist */
667 	if (!list_empty(&sublist))
668 		ip_sublist_rcv(&sublist, curr_dev, curr_net);
669 }
670