1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPv6 input 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * Ian P. Morris <I.P.Morris@soton.ac.uk> 9 * 10 * Based in linux/net/ipv4/ip_input.c 11 */ 12 /* Changes 13 * 14 * Mitsuru KANDA @USAGI and 15 * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/types.h> 20 #include <linux/socket.h> 21 #include <linux/sockios.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/in6.h> 25 #include <linux/icmpv6.h> 26 #include <linux/mroute6.h> 27 #include <linux/slab.h> 28 #include <linux/indirect_call_wrapper.h> 29 30 #include <linux/netfilter.h> 31 #include <linux/netfilter_ipv6.h> 32 33 #include <net/sock.h> 34 #include <net/snmp.h> 35 #include <net/udp.h> 36 37 #include <net/ipv6.h> 38 #include <net/protocol.h> 39 #include <net/transp_v6.h> 40 #include <net/rawv6.h> 41 #include <net/ndisc.h> 42 #include <net/ip6_route.h> 43 #include <net/addrconf.h> 44 #include <net/xfrm.h> 45 #include <net/inet_ecn.h> 46 #include <net/dst_metadata.h> 47 48 static void ip6_rcv_finish_core(struct net *net, struct sock *sk, 49 struct sk_buff *skb) 50 { 51 if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && 52 !skb_dst(skb) && !skb->sk) { 53 switch (ipv6_hdr(skb)->nexthdr) { 54 case IPPROTO_TCP: 55 if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) 56 tcp_v6_early_demux(skb); 57 break; 58 case IPPROTO_UDP: 59 if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) 60 udp_v6_early_demux(skb); 61 break; 62 } 63 } 64 65 if (!skb_valid_dst(skb)) 66 ip6_route_input(skb); 67 } 68 69 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 70 { 71 /* if ingress device is enslaved to an L3 master device pass the 72 * skb to its handler for processing 73 */ 74 skb = l3mdev_ip6_rcv(skb); 75 if (!skb) 76 return NET_RX_SUCCESS; 77 ip6_rcv_finish_core(net, sk, skb); 78 79 return dst_input(skb); 80 } 81 82 static void ip6_sublist_rcv_finish(struct list_head *head) 83 { 84 struct sk_buff *skb, *next; 85 86 list_for_each_entry_safe(skb, next, head, list) { 87 skb_list_del_init(skb); 88 dst_input(skb); 89 } 90 } 91 92 static bool ip6_can_use_hint(const struct sk_buff *skb, 93 const struct sk_buff *hint) 94 { 95 return hint && !skb_dst(skb) && 96 ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr); 97 } 98 99 static struct sk_buff *ip6_extract_route_hint(const struct net *net, 100 struct sk_buff *skb) 101 { 102 if (fib6_routes_require_src(net) || fib6_has_custom_rules(net)) 103 return NULL; 104 105 return skb; 106 } 107 108 static void ip6_list_rcv_finish(struct net *net, struct sock *sk, 109 struct list_head *head) 110 { 111 struct sk_buff *skb, *next, *hint = NULL; 112 struct dst_entry *curr_dst = NULL; 113 struct list_head sublist; 114 115 INIT_LIST_HEAD(&sublist); 116 list_for_each_entry_safe(skb, next, head, list) { 117 struct dst_entry *dst; 118 119 skb_list_del_init(skb); 120 /* if ingress device is enslaved to an L3 master device pass the 121 * skb to its handler for processing 122 */ 123 skb = l3mdev_ip6_rcv(skb); 124 if (!skb) 125 continue; 126 127 if (ip6_can_use_hint(skb, hint)) 128 skb_dst_copy(skb, hint); 129 else 130 ip6_rcv_finish_core(net, sk, skb); 131 dst = skb_dst(skb); 132 if (curr_dst != dst) { 133 hint = ip6_extract_route_hint(net, skb); 134 135 /* dispatch old sublist */ 136 if (!list_empty(&sublist)) 137 ip6_sublist_rcv_finish(&sublist); 138 /* start new sublist */ 139 INIT_LIST_HEAD(&sublist); 140 curr_dst = dst; 141 } 142 list_add_tail(&skb->list, &sublist); 143 } 144 /* dispatch final sublist */ 145 ip6_sublist_rcv_finish(&sublist); 146 } 147 148 static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, 149 struct net *net) 150 { 151 enum skb_drop_reason reason; 152 const struct ipv6hdr *hdr; 153 u32 pkt_len; 154 struct inet6_dev *idev; 155 156 if (skb->pkt_type == PACKET_OTHERHOST) { 157 dev_core_stats_rx_otherhost_dropped_inc(skb->dev); 158 kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST); 159 return NULL; 160 } 161 162 rcu_read_lock(); 163 164 idev = __in6_dev_get(skb->dev); 165 166 __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); 167 168 SKB_DR_SET(reason, NOT_SPECIFIED); 169 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || 170 !idev || unlikely(idev->cnf.disable_ipv6)) { 171 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); 172 if (idev && unlikely(idev->cnf.disable_ipv6)) 173 SKB_DR_SET(reason, IPV6DISABLED); 174 goto drop; 175 } 176 177 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 178 179 /* 180 * Store incoming device index. When the packet will 181 * be queued, we cannot refer to skb->dev anymore. 182 * 183 * BTW, when we send a packet for our own local address on a 184 * non-loopback interface (e.g. ethX), it is being delivered 185 * via the loopback interface (lo) here; skb->dev = loopback_dev. 186 * It, however, should be considered as if it is being 187 * arrived via the sending interface (ethX), because of the 188 * nature of scoping architecture. --yoshfuji 189 */ 190 IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; 191 192 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) 193 goto err; 194 195 hdr = ipv6_hdr(skb); 196 197 if (hdr->version != 6) { 198 SKB_DR_SET(reason, UNHANDLED_PROTO); 199 goto err; 200 } 201 202 __IP6_ADD_STATS(net, idev, 203 IPSTATS_MIB_NOECTPKTS + 204 (ipv6_get_dsfield(hdr) & INET_ECN_MASK), 205 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); 206 /* 207 * RFC4291 2.5.3 208 * The loopback address must not be used as the source address in IPv6 209 * packets that are sent outside of a single node. [..] 210 * A packet received on an interface with a destination address 211 * of loopback must be dropped. 212 */ 213 if ((ipv6_addr_loopback(&hdr->saddr) || 214 ipv6_addr_loopback(&hdr->daddr)) && 215 !(dev->flags & IFF_LOOPBACK) && 216 !netif_is_l3_master(dev)) 217 goto err; 218 219 /* RFC4291 Errata ID: 3480 220 * Interface-Local scope spans only a single interface on a 221 * node and is useful only for loopback transmission of 222 * multicast. Packets with interface-local scope received 223 * from another node must be discarded. 224 */ 225 if (!(skb->pkt_type == PACKET_LOOPBACK || 226 dev->flags & IFF_LOOPBACK) && 227 ipv6_addr_is_multicast(&hdr->daddr) && 228 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) 229 goto err; 230 231 /* If enabled, drop unicast packets that were encapsulated in link-layer 232 * multicast or broadcast to protected against the so-called "hole-196" 233 * attack in 802.11 wireless. 234 */ 235 if (!ipv6_addr_is_multicast(&hdr->daddr) && 236 (skb->pkt_type == PACKET_BROADCAST || 237 skb->pkt_type == PACKET_MULTICAST) && 238 idev->cnf.drop_unicast_in_l2_multicast) { 239 SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST); 240 goto err; 241 } 242 243 /* RFC4291 2.7 244 * Nodes must not originate a packet to a multicast address whose scope 245 * field contains the reserved value 0; if such a packet is received, it 246 * must be silently dropped. 247 */ 248 if (ipv6_addr_is_multicast(&hdr->daddr) && 249 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) 250 goto err; 251 252 /* 253 * RFC4291 2.7 254 * Multicast addresses must not be used as source addresses in IPv6 255 * packets or appear in any Routing header. 256 */ 257 if (ipv6_addr_is_multicast(&hdr->saddr)) 258 goto err; 259 260 skb->transport_header = skb->network_header + sizeof(*hdr); 261 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 262 263 pkt_len = ntohs(hdr->payload_len); 264 265 /* pkt_len may be zero if Jumbo payload option is present */ 266 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { 267 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { 268 __IP6_INC_STATS(net, 269 idev, IPSTATS_MIB_INTRUNCATEDPKTS); 270 SKB_DR_SET(reason, PKT_TOO_SMALL); 271 goto drop; 272 } 273 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 274 goto err; 275 hdr = ipv6_hdr(skb); 276 } 277 278 if (hdr->nexthdr == NEXTHDR_HOP) { 279 if (ipv6_parse_hopopts(skb) < 0) { 280 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); 281 rcu_read_unlock(); 282 return NULL; 283 } 284 } 285 286 rcu_read_unlock(); 287 288 /* Must drop socket now because of tproxy. */ 289 if (!skb_sk_is_prefetched(skb)) 290 skb_orphan(skb); 291 292 return skb; 293 err: 294 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); 295 SKB_DR_OR(reason, IP_INHDR); 296 drop: 297 rcu_read_unlock(); 298 kfree_skb_reason(skb, reason); 299 return NULL; 300 } 301 302 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 303 { 304 struct net *net = dev_net(skb->dev); 305 306 skb = ip6_rcv_core(skb, dev, net); 307 if (skb == NULL) 308 return NET_RX_DROP; 309 return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 310 net, NULL, skb, dev, NULL, 311 ip6_rcv_finish); 312 } 313 314 static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, 315 struct net *net) 316 { 317 NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, 318 head, dev, NULL, ip6_rcv_finish); 319 ip6_list_rcv_finish(net, NULL, head); 320 } 321 322 /* Receive a list of IPv6 packets */ 323 void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, 324 struct net_device *orig_dev) 325 { 326 struct net_device *curr_dev = NULL; 327 struct net *curr_net = NULL; 328 struct sk_buff *skb, *next; 329 struct list_head sublist; 330 331 INIT_LIST_HEAD(&sublist); 332 list_for_each_entry_safe(skb, next, head, list) { 333 struct net_device *dev = skb->dev; 334 struct net *net = dev_net(dev); 335 336 skb_list_del_init(skb); 337 skb = ip6_rcv_core(skb, dev, net); 338 if (skb == NULL) 339 continue; 340 341 if (curr_dev != dev || curr_net != net) { 342 /* dispatch old sublist */ 343 if (!list_empty(&sublist)) 344 ip6_sublist_rcv(&sublist, curr_dev, curr_net); 345 /* start new sublist */ 346 INIT_LIST_HEAD(&sublist); 347 curr_dev = dev; 348 curr_net = net; 349 } 350 list_add_tail(&skb->list, &sublist); 351 } 352 /* dispatch final sublist */ 353 if (!list_empty(&sublist)) 354 ip6_sublist_rcv(&sublist, curr_dev, curr_net); 355 } 356 357 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *)); 358 359 /* 360 * Deliver the packet to the host 361 */ 362 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, 363 bool have_final) 364 { 365 const struct inet6_protocol *ipprot; 366 struct inet6_dev *idev; 367 unsigned int nhoff; 368 SKB_DR(reason); 369 bool raw; 370 371 /* 372 * Parse extension headers 373 */ 374 375 resubmit: 376 idev = ip6_dst_idev(skb_dst(skb)); 377 nhoff = IP6CB(skb)->nhoff; 378 if (!have_final) { 379 if (!pskb_pull(skb, skb_transport_offset(skb))) 380 goto discard; 381 nexthdr = skb_network_header(skb)[nhoff]; 382 } 383 384 resubmit_final: 385 raw = raw6_local_deliver(skb, nexthdr); 386 ipprot = rcu_dereference(inet6_protos[nexthdr]); 387 if (ipprot) { 388 int ret; 389 390 if (have_final) { 391 if (!(ipprot->flags & INET6_PROTO_FINAL)) { 392 /* Once we've seen a final protocol don't 393 * allow encapsulation on any non-final 394 * ones. This allows foo in UDP encapsulation 395 * to work. 396 */ 397 goto discard; 398 } 399 } else if (ipprot->flags & INET6_PROTO_FINAL) { 400 const struct ipv6hdr *hdr; 401 int sdif = inet6_sdif(skb); 402 struct net_device *dev; 403 404 /* Only do this once for first final protocol */ 405 have_final = true; 406 407 /* Free reference early: we don't need it any more, 408 and it may hold ip_conntrack module loaded 409 indefinitely. */ 410 nf_reset_ct(skb); 411 412 skb_postpull_rcsum(skb, skb_network_header(skb), 413 skb_network_header_len(skb)); 414 hdr = ipv6_hdr(skb); 415 416 /* skb->dev passed may be master dev for vrfs. */ 417 if (sdif) { 418 dev = dev_get_by_index_rcu(net, sdif); 419 if (!dev) 420 goto discard; 421 } else { 422 dev = skb->dev; 423 } 424 425 if (ipv6_addr_is_multicast(&hdr->daddr) && 426 !ipv6_chk_mcast_addr(dev, &hdr->daddr, 427 &hdr->saddr) && 428 !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) { 429 SKB_DR_SET(reason, IP_INADDRERRORS); 430 goto discard; 431 } 432 } 433 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && 434 !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 435 SKB_DR_SET(reason, XFRM_POLICY); 436 goto discard; 437 } 438 439 ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv, 440 skb); 441 if (ret > 0) { 442 if (ipprot->flags & INET6_PROTO_FINAL) { 443 /* Not an extension header, most likely UDP 444 * encapsulation. Use return value as nexthdr 445 * protocol not nhoff (which presumably is 446 * not set by handler). 447 */ 448 nexthdr = ret; 449 goto resubmit_final; 450 } else { 451 goto resubmit; 452 } 453 } else if (ret == 0) { 454 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); 455 } 456 } else { 457 if (!raw) { 458 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 459 __IP6_INC_STATS(net, idev, 460 IPSTATS_MIB_INUNKNOWNPROTOS); 461 icmpv6_send(skb, ICMPV6_PARAMPROB, 462 ICMPV6_UNK_NEXTHDR, nhoff); 463 SKB_DR_SET(reason, IP_NOPROTO); 464 } else { 465 SKB_DR_SET(reason, XFRM_POLICY); 466 } 467 kfree_skb_reason(skb, reason); 468 } else { 469 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); 470 consume_skb(skb); 471 } 472 } 473 return; 474 475 discard: 476 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); 477 kfree_skb_reason(skb, reason); 478 } 479 480 static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 481 { 482 skb_clear_delivery_time(skb); 483 rcu_read_lock(); 484 ip6_protocol_deliver_rcu(net, skb, 0, false); 485 rcu_read_unlock(); 486 487 return 0; 488 } 489 490 491 int ip6_input(struct sk_buff *skb) 492 { 493 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, 494 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 495 ip6_input_finish); 496 } 497 EXPORT_SYMBOL_GPL(ip6_input); 498 499 int ip6_mc_input(struct sk_buff *skb) 500 { 501 int sdif = inet6_sdif(skb); 502 const struct ipv6hdr *hdr; 503 struct net_device *dev; 504 bool deliver; 505 506 __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev), 507 __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST, 508 skb->len); 509 510 /* skb->dev passed may be master dev for vrfs. */ 511 if (sdif) { 512 rcu_read_lock(); 513 dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif); 514 if (!dev) { 515 rcu_read_unlock(); 516 kfree_skb(skb); 517 return -ENODEV; 518 } 519 } else { 520 dev = skb->dev; 521 } 522 523 hdr = ipv6_hdr(skb); 524 deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL); 525 if (sdif) 526 rcu_read_unlock(); 527 528 #ifdef CONFIG_IPV6_MROUTE 529 /* 530 * IPv6 multicast router mode is now supported ;) 531 */ 532 if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && 533 !(ipv6_addr_type(&hdr->daddr) & 534 (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && 535 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { 536 /* 537 * Okay, we try to forward - split and duplicate 538 * packets. 539 */ 540 struct sk_buff *skb2; 541 struct inet6_skb_parm *opt = IP6CB(skb); 542 543 /* Check for MLD */ 544 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { 545 /* Check if this is a mld message */ 546 u8 nexthdr = hdr->nexthdr; 547 __be16 frag_off; 548 int offset; 549 550 /* Check if the value of Router Alert 551 * is for MLD (0x0000). 552 */ 553 if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) { 554 deliver = false; 555 556 if (!ipv6_ext_hdr(nexthdr)) { 557 /* BUG */ 558 goto out; 559 } 560 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), 561 &nexthdr, &frag_off); 562 if (offset < 0) 563 goto out; 564 565 if (ipv6_is_mld(skb, nexthdr, offset)) 566 deliver = true; 567 568 goto out; 569 } 570 /* unknown RA - process it normally */ 571 } 572 573 if (deliver) 574 skb2 = skb_clone(skb, GFP_ATOMIC); 575 else { 576 skb2 = skb; 577 skb = NULL; 578 } 579 580 if (skb2) { 581 ip6_mr_input(skb2); 582 } 583 } 584 out: 585 #endif 586 if (likely(deliver)) 587 ip6_input(skb); 588 else { 589 /* discard */ 590 kfree_skb(skb); 591 } 592 593 return 0; 594 } 595