1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Implementation of the ICMP protocol layer. 4 * 5 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * 7 * Some of the function names and the icmp unreach table for this 8 * module were derived from [icmp.c 1.0.11 06/02/93] by 9 * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. 10 * Other than that this module is a complete rewrite. 11 * 12 * Fixes: 13 * Clemens Fruhwirth : introduce global icmp rate limiting 14 * with icmp type masking ability instead 15 * of broken per type icmp timeouts. 16 * Mike Shaver : RFC1122 checks. 17 * Alan Cox : Multicast ping reply as self. 18 * Alan Cox : Fix atomicity lockup in ip_build_xmit 19 * call. 20 * Alan Cox : Added 216,128 byte paths to the MTU 21 * code. 22 * Martin Mares : RFC1812 checks. 23 * Martin Mares : Can be configured to follow redirects 24 * if acting as a router _without_ a 25 * routing protocol (RFC 1812). 26 * Martin Mares : Echo requests may be configured to 27 * be ignored (RFC 1812). 28 * Martin Mares : Limitation of ICMP error message 29 * transmit rate (RFC 1812). 30 * Martin Mares : TOS and Precedence set correctly 31 * (RFC 1812). 32 * Martin Mares : Now copying as much data from the 33 * original packet as we can without 34 * exceeding 576 bytes (RFC 1812). 35 * Willy Konynenberg : Transparent proxying support. 36 * Keith Owens : RFC1191 correction for 4.2BSD based 37 * path MTU bug. 38 * Thomas Quinot : ICMP Dest Unreach codes up to 15 are 39 * valid (RFC 1812). 40 * Andi Kleen : Check all packet lengths properly 41 * and moved all kfree_skb() up to 42 * icmp_rcv. 43 * Andi Kleen : Move the rate limit bookkeeping 44 * into the dest entry and use a token 45 * bucket filter (thanks to ANK). Make 46 * the rates sysctl configurable. 47 * Yu Tianli : Fixed two ugly bugs in icmp_send 48 * - IP option length was accounted wrongly 49 * - ICMP header length was not accounted 50 * at all. 51 * Tristan Greaves : Added sysctl option to ignore bogus 52 * broadcast responses from broken routers. 53 * 54 * To Fix: 55 * 56 * - Should use skb_pull() instead of all the manual checking. 57 * This would also greatly simply some upper layer error handlers. --AK 58 */ 59 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 #include <linux/module.h> 63 #include <linux/types.h> 64 #include <linux/jiffies.h> 65 #include <linux/kernel.h> 66 #include <linux/fcntl.h> 67 #include <linux/socket.h> 68 #include <linux/in.h> 69 #include <linux/inet.h> 70 #include <linux/inetdevice.h> 71 #include <linux/netdevice.h> 72 #include <linux/string.h> 73 #include <linux/netfilter_ipv4.h> 74 #include <linux/slab.h> 75 #include <net/flow.h> 76 #include <net/snmp.h> 77 #include <net/ip.h> 78 #include <net/route.h> 79 #include <net/protocol.h> 80 #include <net/icmp.h> 81 #include <net/tcp.h> 82 #include <net/udp.h> 83 #include <net/raw.h> 84 #include <net/ping.h> 85 #include <linux/skbuff.h> 86 #include <net/sock.h> 87 #include <linux/errno.h> 88 #include <linux/timer.h> 89 #include <linux/init.h> 90 #include <linux/uaccess.h> 91 #include <net/checksum.h> 92 #include <net/xfrm.h> 93 #include <net/inet_common.h> 94 #include <net/ip_fib.h> 95 #include <net/l3mdev.h> 96 #include <net/addrconf.h> 97 #include <net/inet_dscp.h> 98 #define CREATE_TRACE_POINTS 99 #include <trace/events/icmp.h> 100 101 /* 102 * Build xmit assembly blocks 103 */ 104 105 struct icmp_bxm { 106 struct sk_buff *skb; 107 int offset; 108 int data_len; 109 110 struct { 111 struct icmphdr icmph; 112 __be32 times[3]; 113 } data; 114 int head_len; 115 116 /* Must be last as it ends in a flexible-array member. */ 117 struct ip_options_rcu replyopts; 118 }; 119 120 /* An array of errno for error messages from dest unreach. */ 121 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 122 123 const struct icmp_err icmp_err_convert[] = { 124 { 125 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 126 .fatal = 0, 127 }, 128 { 129 .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ 130 .fatal = 0, 131 }, 132 { 133 .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, 134 .fatal = 1, 135 }, 136 { 137 .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ 138 .fatal = 1, 139 }, 140 { 141 .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ 142 .fatal = 0, 143 }, 144 { 145 .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ 146 .fatal = 0, 147 }, 148 { 149 .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ 150 .fatal = 1, 151 }, 152 { 153 .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ 154 .fatal = 1, 155 }, 156 { 157 .errno = ENONET, /* ICMP_HOST_ISOLATED */ 158 .fatal = 1, 159 }, 160 { 161 .errno = ENETUNREACH, /* ICMP_NET_ANO */ 162 .fatal = 1, 163 }, 164 { 165 .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ 166 .fatal = 1, 167 }, 168 { 169 .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ 170 .fatal = 0, 171 }, 172 { 173 .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ 174 .fatal = 0, 175 }, 176 { 177 .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ 178 .fatal = 1, 179 }, 180 { 181 .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ 182 .fatal = 1, 183 }, 184 { 185 .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ 186 .fatal = 1, 187 }, 188 }; 189 EXPORT_SYMBOL(icmp_err_convert); 190 191 /* 192 * ICMP control array. This specifies what to do with each ICMP. 193 */ 194 195 struct icmp_control { 196 enum skb_drop_reason (*handler)(struct sk_buff *skb); 197 short error; /* This ICMP is classed as an error message */ 198 }; 199 200 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; 201 202 static DEFINE_PER_CPU(struct sock *, ipv4_icmp_sk); 203 204 /* Called with BH disabled */ 205 static inline struct sock *icmp_xmit_lock(struct net *net) 206 { 207 struct sock *sk; 208 209 sk = this_cpu_read(ipv4_icmp_sk); 210 211 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 212 /* This can happen if the output path signals a 213 * dst_link_failure() for an outgoing ICMP packet. 214 */ 215 return NULL; 216 } 217 sock_net_set(sk, net); 218 return sk; 219 } 220 221 static inline void icmp_xmit_unlock(struct sock *sk) 222 { 223 sock_net_set(sk, &init_net); 224 spin_unlock(&sk->sk_lock.slock); 225 } 226 227 /** 228 * icmp_global_allow - Are we allowed to send one more ICMP message ? 229 * @net: network namespace 230 * 231 * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. 232 * Returns false if we reached the limit and can not send another packet. 233 * Works in tandem with icmp_global_consume(). 234 */ 235 bool icmp_global_allow(struct net *net) 236 { 237 u32 delta, now, oldstamp; 238 int incr, new, old; 239 240 /* Note: many cpus could find this condition true. 241 * Then later icmp_global_consume() could consume more credits, 242 * this is an acceptable race. 243 */ 244 if (atomic_read(&net->ipv4.icmp_global_credit) > 0) 245 return true; 246 247 now = jiffies; 248 oldstamp = READ_ONCE(net->ipv4.icmp_global_stamp); 249 delta = min_t(u32, now - oldstamp, HZ); 250 if (delta < HZ / 50) 251 return false; 252 253 incr = READ_ONCE(net->ipv4.sysctl_icmp_msgs_per_sec); 254 incr = div_u64((u64)incr * delta, HZ); 255 if (!incr) 256 return false; 257 258 if (cmpxchg(&net->ipv4.icmp_global_stamp, oldstamp, now) == oldstamp) { 259 old = atomic_read(&net->ipv4.icmp_global_credit); 260 do { 261 new = min(old + incr, READ_ONCE(net->ipv4.sysctl_icmp_msgs_burst)); 262 } while (!atomic_try_cmpxchg(&net->ipv4.icmp_global_credit, &old, new)); 263 } 264 return true; 265 } 266 EXPORT_SYMBOL(icmp_global_allow); 267 268 void icmp_global_consume(struct net *net) 269 { 270 int credits = get_random_u32_below(3); 271 272 /* Note: this might make icmp_global.credit negative. */ 273 if (credits) 274 atomic_sub(credits, &net->ipv4.icmp_global_credit); 275 } 276 EXPORT_SYMBOL(icmp_global_consume); 277 278 static bool icmpv4_mask_allow(struct net *net, int type, int code) 279 { 280 if (type > NR_ICMP_TYPES) 281 return true; 282 283 /* Don't limit PMTU discovery. */ 284 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 285 return true; 286 287 /* Limit if icmp type is enabled in ratemask. */ 288 if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask))) 289 return true; 290 291 return false; 292 } 293 294 static bool icmpv4_global_allow(struct net *net, int type, int code, 295 bool *apply_ratelimit) 296 { 297 if (icmpv4_mask_allow(net, type, code)) 298 return true; 299 300 if (icmp_global_allow(net)) { 301 *apply_ratelimit = true; 302 return true; 303 } 304 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); 305 return false; 306 } 307 308 /* 309 * Send an ICMP frame. 310 */ 311 312 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 313 struct flowi4 *fl4, int type, int code, 314 bool apply_ratelimit) 315 { 316 struct dst_entry *dst = &rt->dst; 317 struct inet_peer *peer; 318 struct net_device *dev; 319 int peer_timeout; 320 bool rc = true; 321 322 if (!apply_ratelimit) 323 return true; 324 325 peer_timeout = READ_ONCE(net->ipv4.sysctl_icmp_ratelimit); 326 if (!peer_timeout) 327 goto out; 328 329 /* No rate limit on loopback */ 330 rcu_read_lock(); 331 dev = dst_dev_rcu(dst); 332 if (dev && (dev->flags & IFF_LOOPBACK)) 333 goto out_unlock; 334 335 peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 336 l3mdev_master_ifindex_rcu(dev)); 337 rc = inet_peer_xrlim_allow(peer, peer_timeout); 338 339 out_unlock: 340 rcu_read_unlock(); 341 out: 342 if (!rc) 343 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); 344 else 345 icmp_global_consume(net); 346 return rc; 347 } 348 349 /* 350 * Maintain the counters used in the SNMP statistics for outgoing ICMP 351 */ 352 void icmp_out_count(struct net *net, unsigned char type) 353 { 354 ICMPMSGOUT_INC_STATS(net, type); 355 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); 356 } 357 358 /* 359 * Checksum each fragment, and on the first include the headers and final 360 * checksum. 361 */ 362 static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, 363 struct sk_buff *skb) 364 { 365 DEFINE_RAW_FLEX(struct icmp_bxm, icmp_param, replyopts.opt.__data, 366 IP_OPTIONS_DATA_FIXED_SIZE); 367 __wsum csum; 368 369 icmp_param = from; 370 371 csum = skb_copy_and_csum_bits(icmp_param->skb, 372 icmp_param->offset + offset, 373 to, len); 374 375 skb->csum = csum_block_add(skb->csum, csum, odd); 376 if (icmp_pointers[icmp_param->data.icmph.type].error) 377 nf_ct_attach(skb, icmp_param->skb); 378 return 0; 379 } 380 381 static void icmp_push_reply(struct sock *sk, 382 struct icmp_bxm *icmp_param, 383 struct flowi4 *fl4, 384 struct ipcm_cookie *ipc, struct rtable **rt) 385 { 386 struct sk_buff *skb; 387 388 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, 389 icmp_param->data_len+icmp_param->head_len, 390 icmp_param->head_len, 391 ipc, rt, MSG_DONTWAIT) < 0) { 392 __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS); 393 ip_flush_pending_frames(sk); 394 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 395 struct icmphdr *icmph = icmp_hdr(skb); 396 __wsum csum; 397 struct sk_buff *skb1; 398 399 csum = csum_partial_copy_nocheck((void *)&icmp_param->data, 400 (char *)icmph, 401 icmp_param->head_len); 402 skb_queue_walk(&sk->sk_write_queue, skb1) { 403 csum = csum_add(csum, skb1->csum); 404 } 405 icmph->checksum = csum_fold(csum); 406 skb->ip_summed = CHECKSUM_NONE; 407 ip_push_pending_frames(sk, fl4); 408 } 409 } 410 411 /* 412 * Driving logic for building and sending ICMP messages. 413 */ 414 415 static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 416 { 417 struct rtable *rt = skb_rtable(skb); 418 struct net *net = dev_net_rcu(rt->dst.dev); 419 bool apply_ratelimit = false; 420 struct ipcm_cookie ipc; 421 struct flowi4 fl4; 422 struct sock *sk; 423 __be32 daddr, saddr; 424 u32 mark = IP4_REPLY_MARK(net, skb->mark); 425 int type = icmp_param->data.icmph.type; 426 int code = icmp_param->data.icmph.code; 427 428 if (ip_options_echo(net, &icmp_param->replyopts.opt, skb)) 429 return; 430 431 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ 432 local_bh_disable(); 433 434 /* is global icmp_msgs_per_sec exhausted ? */ 435 if (!icmpv4_global_allow(net, type, code, &apply_ratelimit)) 436 goto out_bh_enable; 437 438 sk = icmp_xmit_lock(net); 439 if (!sk) 440 goto out_bh_enable; 441 442 icmp_param->data.icmph.checksum = 0; 443 444 ipcm_init(&ipc); 445 ipc.tos = ip_hdr(skb)->tos; 446 ipc.sockc.mark = mark; 447 daddr = ipc.addr = ip_hdr(skb)->saddr; 448 saddr = fib_compute_spec_dst(skb); 449 450 if (icmp_param->replyopts.opt.optlen) { 451 ipc.opt = &icmp_param->replyopts; 452 if (ipc.opt->opt.srr) 453 daddr = icmp_param->replyopts.opt.faddr; 454 } 455 memset(&fl4, 0, sizeof(fl4)); 456 fl4.daddr = daddr; 457 fl4.saddr = saddr; 458 fl4.flowi4_mark = mark; 459 fl4.flowi4_uid = sock_net_uid(net, NULL); 460 fl4.flowi4_dscp = ip4h_dscp(ip_hdr(skb)); 461 fl4.flowi4_proto = IPPROTO_ICMP; 462 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); 463 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 464 rt = ip_route_output_key(net, &fl4); 465 if (IS_ERR(rt)) 466 goto out_unlock; 467 if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) 468 icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); 469 ip_rt_put(rt); 470 out_unlock: 471 icmp_xmit_unlock(sk); 472 out_bh_enable: 473 local_bh_enable(); 474 } 475 476 /* 477 * The device used for looking up which routing table to use for sending an ICMP 478 * error is preferably the source whenever it is set, which should ensure the 479 * icmp error can be sent to the source host, else lookup using the routing 480 * table of the destination device, else use the main routing table (index 0). 481 */ 482 static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) 483 { 484 struct net_device *dev = skb->dev; 485 const struct dst_entry *dst; 486 487 if (dev) 488 return dev; 489 dst = skb_dst(skb); 490 return dst ? dst_dev(dst) : NULL; 491 } 492 493 static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, 494 struct sk_buff *skb_in, 495 const struct iphdr *iph, __be32 saddr, 496 dscp_t dscp, u32 mark, int type, 497 int code, struct icmp_bxm *param) 498 { 499 struct net_device *route_lookup_dev; 500 struct dst_entry *dst, *dst2; 501 struct rtable *rt, *rt2; 502 struct flowi4 fl4_dec; 503 int err; 504 505 memset(fl4, 0, sizeof(*fl4)); 506 fl4->daddr = (param->replyopts.opt.srr ? 507 param->replyopts.opt.faddr : iph->saddr); 508 fl4->saddr = saddr; 509 fl4->flowi4_mark = mark; 510 fl4->flowi4_uid = sock_net_uid(net, NULL); 511 fl4->flowi4_dscp = dscp; 512 fl4->flowi4_proto = IPPROTO_ICMP; 513 fl4->fl4_icmp_type = type; 514 fl4->fl4_icmp_code = code; 515 route_lookup_dev = icmp_get_route_lookup_dev(skb_in); 516 fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); 517 518 security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4)); 519 rt = ip_route_output_key_hash(net, fl4, skb_in); 520 if (IS_ERR(rt)) 521 return rt; 522 523 /* No need to clone since we're just using its address. */ 524 rt2 = rt; 525 526 dst = xfrm_lookup(net, &rt->dst, 527 flowi4_to_flowi(fl4), NULL, 0); 528 rt = dst_rtable(dst); 529 if (!IS_ERR(dst)) { 530 if (rt != rt2) 531 return rt; 532 if (inet_addr_type_dev_table(net, route_lookup_dev, 533 fl4->daddr) == RTN_LOCAL) 534 return rt; 535 } else if (PTR_ERR(dst) == -EPERM) { 536 rt = NULL; 537 } else { 538 return rt; 539 } 540 err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); 541 if (err) 542 goto relookup_failed; 543 544 if (inet_addr_type_dev_table(net, route_lookup_dev, 545 fl4_dec.saddr) == RTN_LOCAL) { 546 rt2 = __ip_route_output_key(net, &fl4_dec); 547 if (IS_ERR(rt2)) 548 err = PTR_ERR(rt2); 549 } else { 550 struct flowi4 fl4_2 = {}; 551 unsigned long orefdst; 552 553 fl4_2.daddr = fl4_dec.saddr; 554 rt2 = ip_route_output_key(net, &fl4_2); 555 if (IS_ERR(rt2)) { 556 err = PTR_ERR(rt2); 557 goto relookup_failed; 558 } 559 /* Ugh! */ 560 orefdst = skb_dstref_steal(skb_in); 561 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, 562 dscp, rt2->dst.dev) ? -EINVAL : 0; 563 564 dst_release(&rt2->dst); 565 rt2 = skb_rtable(skb_in); 566 /* steal dst entry from skb_in, don't drop refcnt */ 567 skb_dstref_steal(skb_in); 568 skb_dstref_restore(skb_in, orefdst); 569 570 /* 571 * At this point, fl4_dec.daddr should NOT be local (we 572 * checked fl4_dec.saddr above). However, a race condition 573 * may occur if the address is added to the interface 574 * concurrently. In that case, ip_route_input() returns a 575 * LOCAL route with dst.output=ip_rt_bug, which must not 576 * be used for output. 577 */ 578 if (!err && rt2 && rt2->rt_type == RTN_LOCAL) { 579 net_warn_ratelimited("detected local route for %pI4 during ICMP sending, src %pI4\n", 580 &fl4_dec.daddr, &fl4_dec.saddr); 581 dst_release(&rt2->dst); 582 err = -EINVAL; 583 } 584 } 585 586 if (err) 587 goto relookup_failed; 588 589 dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL, 590 XFRM_LOOKUP_ICMP); 591 rt2 = dst_rtable(dst2); 592 if (!IS_ERR(dst2)) { 593 dst_release(&rt->dst); 594 memcpy(fl4, &fl4_dec, sizeof(*fl4)); 595 rt = rt2; 596 } else if (PTR_ERR(dst2) == -EPERM) { 597 if (rt) 598 dst_release(&rt->dst); 599 return rt2; 600 } else { 601 err = PTR_ERR(dst2); 602 goto relookup_failed; 603 } 604 return rt; 605 606 relookup_failed: 607 if (rt) 608 return rt; 609 return ERR_PTR(err); 610 } 611 612 struct icmp_ext_iio_addr4_subobj { 613 __be16 afi; 614 __be16 reserved; 615 __be32 addr4; 616 }; 617 618 static unsigned int icmp_ext_iio_len(void) 619 { 620 return sizeof(struct icmp_extobj_hdr) + 621 /* ifIndex */ 622 sizeof(__be32) + 623 /* Interface Address Sub-Object */ 624 sizeof(struct icmp_ext_iio_addr4_subobj) + 625 /* Interface Name Sub-Object. Length must be a multiple of 4 626 * bytes. 627 */ 628 ALIGN(sizeof(struct icmp_ext_iio_name_subobj), 4) + 629 /* MTU */ 630 sizeof(__be32); 631 } 632 633 static unsigned int icmp_ext_max_len(u8 ext_objs) 634 { 635 unsigned int ext_max_len; 636 637 ext_max_len = sizeof(struct icmp_ext_hdr); 638 639 if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) 640 ext_max_len += icmp_ext_iio_len(); 641 642 return ext_max_len; 643 } 644 645 static __be32 icmp_ext_iio_addr4_find(const struct net_device *dev) 646 { 647 struct in_device *in_dev; 648 struct in_ifaddr *ifa; 649 650 in_dev = __in_dev_get_rcu(dev); 651 if (!in_dev) 652 return 0; 653 654 /* It is unclear from RFC 5837 which IP address should be chosen, but 655 * it makes sense to choose a global unicast address. 656 */ 657 in_dev_for_each_ifa_rcu(ifa, in_dev) { 658 if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) 659 continue; 660 if (ifa->ifa_scope != RT_SCOPE_UNIVERSE || 661 ipv4_is_multicast(ifa->ifa_address)) 662 continue; 663 return ifa->ifa_address; 664 } 665 666 return 0; 667 } 668 669 static void icmp_ext_iio_iif_append(struct net *net, struct sk_buff *skb, 670 int iif) 671 { 672 struct icmp_ext_iio_name_subobj *name_subobj; 673 struct icmp_extobj_hdr *objh; 674 struct net_device *dev; 675 __be32 data; 676 677 if (!iif) 678 return; 679 680 /* Add the fields in the order specified by RFC 5837. */ 681 objh = skb_put(skb, sizeof(*objh)); 682 objh->class_num = ICMP_EXT_OBJ_CLASS_IIO; 683 objh->class_type = ICMP_EXT_CTYPE_IIO_ROLE(ICMP_EXT_CTYPE_IIO_ROLE_IIF); 684 685 data = htonl(iif); 686 skb_put_data(skb, &data, sizeof(__be32)); 687 objh->class_type |= ICMP_EXT_CTYPE_IIO_IFINDEX; 688 689 rcu_read_lock(); 690 691 dev = dev_get_by_index_rcu(net, iif); 692 if (!dev) 693 goto out; 694 695 data = icmp_ext_iio_addr4_find(dev); 696 if (data) { 697 struct icmp_ext_iio_addr4_subobj *addr4_subobj; 698 699 addr4_subobj = skb_put_zero(skb, sizeof(*addr4_subobj)); 700 addr4_subobj->afi = htons(ICMP_AFI_IP); 701 addr4_subobj->addr4 = data; 702 objh->class_type |= ICMP_EXT_CTYPE_IIO_IPADDR; 703 } 704 705 name_subobj = skb_put_zero(skb, ALIGN(sizeof(*name_subobj), 4)); 706 name_subobj->len = ALIGN(sizeof(*name_subobj), 4); 707 netdev_copy_name(dev, name_subobj->name); 708 objh->class_type |= ICMP_EXT_CTYPE_IIO_NAME; 709 710 data = htonl(READ_ONCE(dev->mtu)); 711 skb_put_data(skb, &data, sizeof(__be32)); 712 objh->class_type |= ICMP_EXT_CTYPE_IIO_MTU; 713 714 out: 715 rcu_read_unlock(); 716 objh->length = htons(skb_tail_pointer(skb) - (unsigned char *)objh); 717 } 718 719 static void icmp_ext_objs_append(struct net *net, struct sk_buff *skb, 720 u8 ext_objs, int iif) 721 { 722 if (ext_objs & BIT(ICMP_ERR_EXT_IIO_IIF)) 723 icmp_ext_iio_iif_append(net, skb, iif); 724 } 725 726 static struct sk_buff * 727 icmp_ext_append(struct net *net, struct sk_buff *skb_in, struct icmphdr *icmph, 728 unsigned int room, int iif) 729 { 730 unsigned int payload_len, ext_max_len, ext_len; 731 struct icmp_ext_hdr *ext_hdr; 732 struct sk_buff *skb; 733 u8 ext_objs; 734 int nhoff; 735 736 switch (icmph->type) { 737 case ICMP_DEST_UNREACH: 738 case ICMP_TIME_EXCEEDED: 739 case ICMP_PARAMETERPROB: 740 break; 741 default: 742 return NULL; 743 } 744 745 ext_objs = READ_ONCE(net->ipv4.sysctl_icmp_errors_extension_mask); 746 if (!ext_objs) 747 return NULL; 748 749 ext_max_len = icmp_ext_max_len(ext_objs); 750 if (ICMP_EXT_ORIG_DGRAM_MIN_LEN + ext_max_len > room) 751 return NULL; 752 753 skb = skb_clone(skb_in, GFP_ATOMIC); 754 if (!skb) 755 return NULL; 756 757 nhoff = skb_network_offset(skb); 758 payload_len = min(skb->len - nhoff, ICMP_EXT_ORIG_DGRAM_MIN_LEN); 759 760 if (!pskb_network_may_pull(skb, payload_len)) 761 goto free_skb; 762 763 if (pskb_trim(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN) || 764 __skb_put_padto(skb, nhoff + ICMP_EXT_ORIG_DGRAM_MIN_LEN, false)) 765 goto free_skb; 766 767 if (pskb_expand_head(skb, 0, ext_max_len, GFP_ATOMIC)) 768 goto free_skb; 769 770 ext_hdr = skb_put_zero(skb, sizeof(*ext_hdr)); 771 ext_hdr->version = ICMP_EXT_VERSION_2; 772 773 icmp_ext_objs_append(net, skb, ext_objs, iif); 774 775 /* Do not send an empty extension structure. */ 776 ext_len = skb_tail_pointer(skb) - (unsigned char *)ext_hdr; 777 if (ext_len == sizeof(*ext_hdr)) 778 goto free_skb; 779 780 ext_hdr->checksum = ip_compute_csum(ext_hdr, ext_len); 781 /* The length of the original datagram in 32-bit words (RFC 4884). */ 782 icmph->un.reserved[1] = ICMP_EXT_ORIG_DGRAM_MIN_LEN / sizeof(u32); 783 784 return skb; 785 786 free_skb: 787 consume_skb(skb); 788 return NULL; 789 } 790 791 /* 792 * Send an ICMP message in response to a situation 793 * 794 * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. 795 * MAY send more (we do). 796 * MUST NOT change this header information. 797 * MUST NOT reply to a multicast/broadcast IP address. 798 * MUST NOT reply to a multicast/broadcast MAC address. 799 * MUST reply to only the first fragment. 800 */ 801 802 void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, 803 const struct inet_skb_parm *parm) 804 { 805 DEFINE_RAW_FLEX(struct icmp_bxm, icmp_param, replyopts.opt.__data, 806 IP_OPTIONS_DATA_FIXED_SIZE); 807 struct iphdr *iph; 808 int room; 809 struct rtable *rt = skb_rtable(skb_in); 810 bool apply_ratelimit = false; 811 struct sk_buff *ext_skb; 812 struct ipcm_cookie ipc; 813 struct flowi4 fl4; 814 __be32 saddr; 815 u8 tos; 816 u32 mark; 817 struct net *net; 818 struct sock *sk; 819 820 if (!rt) 821 return; 822 823 rcu_read_lock(); 824 825 if (rt->dst.dev) 826 net = dev_net_rcu(rt->dst.dev); 827 else if (skb_in->dev) 828 net = dev_net_rcu(skb_in->dev); 829 else 830 goto out; 831 832 /* 833 * Find the original header. It is expected to be valid, of course. 834 * Check this, icmp_send is called from the most obscure devices 835 * sometimes. 836 */ 837 iph = ip_hdr(skb_in); 838 839 if ((u8 *)iph < skb_in->head || 840 (skb_network_header(skb_in) + sizeof(*iph)) > 841 skb_tail_pointer(skb_in)) 842 goto out; 843 844 /* 845 * No replies to physical multicast/broadcast 846 */ 847 if (skb_in->pkt_type != PACKET_HOST) 848 goto out; 849 850 /* 851 * Now check at the protocol level 852 */ 853 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 854 goto out; 855 856 /* 857 * Only reply to fragment 0. We byte re-order the constant 858 * mask for efficiency. 859 */ 860 if (iph->frag_off & htons(IP_OFFSET)) 861 goto out; 862 863 /* 864 * If we send an ICMP error to an ICMP error a mess would result.. 865 */ 866 if (icmp_pointers[type].error) { 867 /* 868 * We are an error, check if we are replying to an 869 * ICMP error 870 */ 871 if (iph->protocol == IPPROTO_ICMP) { 872 u8 _inner_type, *itp; 873 874 itp = skb_header_pointer(skb_in, 875 skb_network_header(skb_in) + 876 (iph->ihl << 2) + 877 offsetof(struct icmphdr, 878 type) - 879 skb_in->data, 880 sizeof(_inner_type), 881 &_inner_type); 882 if (!itp) 883 goto out; 884 885 /* 886 * Assume any unknown ICMP type is an error. This 887 * isn't specified by the RFC, but think about it.. 888 */ 889 if (*itp > NR_ICMP_TYPES || 890 icmp_pointers[*itp].error) 891 goto out; 892 } 893 } 894 895 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ 896 local_bh_disable(); 897 898 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless 899 * incoming dev is loopback. If outgoing dev change to not be 900 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) 901 */ 902 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && 903 !icmpv4_global_allow(net, type, code, &apply_ratelimit)) 904 goto out_bh_enable; 905 906 sk = icmp_xmit_lock(net); 907 if (!sk) 908 goto out_bh_enable; 909 910 /* 911 * Construct source address and options. 912 */ 913 914 saddr = iph->daddr; 915 if (!(rt->rt_flags & RTCF_LOCAL)) { 916 struct net_device *dev = NULL; 917 918 rcu_read_lock(); 919 if (rt_is_input_route(rt) && 920 READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)) 921 dev = dev_get_by_index_rcu(net, parm->iif ? parm->iif : 922 inet_iif(skb_in)); 923 924 if (dev) 925 saddr = inet_select_addr(dev, iph->saddr, 926 RT_SCOPE_LINK); 927 else 928 saddr = 0; 929 rcu_read_unlock(); 930 } 931 932 tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) | 933 IPTOS_PREC_INTERNETCONTROL) : 934 iph->tos; 935 mark = IP4_REPLY_MARK(net, skb_in->mark); 936 937 if (__ip_options_echo(net, &icmp_param->replyopts.opt, skb_in, 938 &parm->opt)) 939 goto out_unlock; 940 941 942 /* 943 * Prepare data for ICMP header. 944 */ 945 946 icmp_param->data.icmph.type = type; 947 icmp_param->data.icmph.code = code; 948 icmp_param->data.icmph.un.gateway = info; 949 icmp_param->data.icmph.checksum = 0; 950 icmp_param->skb = skb_in; 951 icmp_param->offset = skb_network_offset(skb_in); 952 ipcm_init(&ipc); 953 ipc.tos = tos; 954 ipc.addr = iph->saddr; 955 ipc.opt = &icmp_param->replyopts; 956 ipc.sockc.mark = mark; 957 958 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, 959 inet_dsfield_to_dscp(tos), mark, type, code, 960 icmp_param); 961 if (IS_ERR(rt)) 962 goto out_unlock; 963 964 /* peer icmp_ratelimit */ 965 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) 966 goto ende; 967 968 /* RFC says return as much as we can without exceeding 576 bytes. */ 969 970 room = dst4_mtu(&rt->dst); 971 if (room > 576) 972 room = 576; 973 room -= sizeof(struct iphdr) + icmp_param->replyopts.opt.optlen; 974 room -= sizeof(struct icmphdr); 975 /* Guard against tiny mtu. We need to include at least one 976 * IP network header for this message to make any sense. 977 */ 978 if (room <= (int)sizeof(struct iphdr)) 979 goto ende; 980 981 ext_skb = icmp_ext_append(net, skb_in, &icmp_param->data.icmph, room, 982 parm->iif); 983 if (ext_skb) 984 icmp_param->skb = ext_skb; 985 986 icmp_param->data_len = icmp_param->skb->len - icmp_param->offset; 987 if (icmp_param->data_len > room) 988 icmp_param->data_len = room; 989 icmp_param->head_len = sizeof(struct icmphdr); 990 991 /* if we don't have a source address at this point, fall back to the 992 * dummy address instead of sending out a packet with a source address 993 * of 0.0.0.0 994 */ 995 if (!fl4.saddr) 996 fl4.saddr = htonl(INADDR_DUMMY); 997 998 trace_icmp_send(skb_in, type, code); 999 1000 icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); 1001 1002 if (ext_skb) 1003 consume_skb(ext_skb); 1004 ende: 1005 ip_rt_put(rt); 1006 out_unlock: 1007 icmp_xmit_unlock(sk); 1008 out_bh_enable: 1009 local_bh_enable(); 1010 out: 1011 rcu_read_unlock(); 1012 } 1013 EXPORT_SYMBOL(__icmp_send); 1014 1015 #if IS_ENABLED(CONFIG_NF_NAT) 1016 #include <net/netfilter/nf_conntrack.h> 1017 void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) 1018 { 1019 struct sk_buff *cloned_skb = NULL; 1020 enum ip_conntrack_info ctinfo; 1021 enum ip_conntrack_dir dir; 1022 struct inet_skb_parm parm; 1023 struct nf_conn *ct; 1024 __be32 orig_ip; 1025 1026 memset(&parm, 0, sizeof(parm)); 1027 ct = nf_ct_get(skb_in, &ctinfo); 1028 if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) { 1029 __icmp_send(skb_in, type, code, info, &parm); 1030 return; 1031 } 1032 1033 if (skb_shared(skb_in)) 1034 skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); 1035 1036 if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || 1037 (skb_network_header(skb_in) + sizeof(struct iphdr)) > 1038 skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, 1039 skb_network_offset(skb_in) + sizeof(struct iphdr)))) 1040 goto out; 1041 1042 orig_ip = ip_hdr(skb_in)->saddr; 1043 dir = CTINFO2DIR(ctinfo); 1044 ip_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.ip; 1045 __icmp_send(skb_in, type, code, info, &parm); 1046 ip_hdr(skb_in)->saddr = orig_ip; 1047 out: 1048 consume_skb(cloned_skb); 1049 } 1050 EXPORT_SYMBOL(icmp_ndo_send); 1051 #endif 1052 1053 static void icmp_socket_deliver(struct sk_buff *skb, u32 info) 1054 { 1055 const struct iphdr *iph = (const struct iphdr *)skb->data; 1056 const struct net_protocol *ipprot; 1057 int protocol = iph->protocol; 1058 1059 /* Checkin full IP header plus 8 bytes of protocol to 1060 * avoid additional coding at protocol handlers. 1061 */ 1062 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) 1063 goto out; 1064 1065 /* IPPROTO_RAW sockets are not supposed to receive anything. */ 1066 if (protocol == IPPROTO_RAW) 1067 goto out; 1068 1069 raw_icmp_error(skb, protocol, info); 1070 1071 ipprot = rcu_dereference(inet_protos[protocol]); 1072 if (ipprot && ipprot->err_handler) 1073 ipprot->err_handler(skb, info); 1074 return; 1075 1076 out: 1077 __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS); 1078 } 1079 1080 static bool icmp_tag_validation(int proto) 1081 { 1082 bool ok; 1083 1084 rcu_read_lock(); 1085 ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation; 1086 rcu_read_unlock(); 1087 return ok; 1088 } 1089 1090 /* 1091 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and 1092 * ICMP_PARAMETERPROB. 1093 */ 1094 1095 static enum skb_drop_reason icmp_unreach(struct sk_buff *skb) 1096 { 1097 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; 1098 const struct iphdr *iph; 1099 struct icmphdr *icmph; 1100 struct net *net; 1101 u32 info = 0; 1102 1103 net = skb_dst_dev_net_rcu(skb); 1104 1105 /* 1106 * Incomplete header ? 1107 * Only checks for the IP header, there should be an 1108 * additional check for longer headers in upper levels. 1109 */ 1110 1111 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 1112 goto out_err; 1113 1114 icmph = icmp_hdr(skb); 1115 iph = (const struct iphdr *)skb->data; 1116 1117 if (iph->ihl < 5) { /* Mangled header, drop. */ 1118 reason = SKB_DROP_REASON_IP_INHDR; 1119 goto out_err; 1120 } 1121 1122 switch (icmph->type) { 1123 case ICMP_DEST_UNREACH: 1124 switch (icmph->code & 15) { 1125 case ICMP_NET_UNREACH: 1126 case ICMP_HOST_UNREACH: 1127 case ICMP_PROT_UNREACH: 1128 case ICMP_PORT_UNREACH: 1129 break; 1130 case ICMP_FRAG_NEEDED: 1131 /* for documentation of the ip_no_pmtu_disc 1132 * values please see 1133 * Documentation/networking/ip-sysctl.rst 1134 */ 1135 switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { 1136 default: 1137 net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", 1138 &iph->daddr); 1139 break; 1140 case 2: 1141 goto out; 1142 case 3: 1143 if (!icmp_tag_validation(iph->protocol)) 1144 goto out; 1145 fallthrough; 1146 case 0: 1147 info = ntohs(icmph->un.frag.mtu); 1148 } 1149 break; 1150 case ICMP_SR_FAILED: 1151 net_dbg_ratelimited("%pI4: Source Route Failed\n", 1152 &iph->daddr); 1153 break; 1154 default: 1155 break; 1156 } 1157 if (icmph->code > NR_ICMP_UNREACH) 1158 goto out; 1159 break; 1160 case ICMP_PARAMETERPROB: 1161 info = ntohl(icmph->un.gateway) >> 24; 1162 break; 1163 case ICMP_TIME_EXCEEDED: 1164 __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS); 1165 if (icmph->code == ICMP_EXC_FRAGTIME) 1166 goto out; 1167 break; 1168 } 1169 1170 /* 1171 * Throw it at our lower layers 1172 * 1173 * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed 1174 * header. 1175 * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the 1176 * transport layer. 1177 * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to 1178 * transport layer. 1179 */ 1180 1181 /* 1182 * Check the other end isn't violating RFC 1122. Some routers send 1183 * bogus responses to broadcast frames. If you see this message 1184 * first check your netmask matches at both ends, if it does then 1185 * get the other vendor to fix their kit. 1186 */ 1187 1188 if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) && 1189 inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) { 1190 net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", 1191 &ip_hdr(skb)->saddr, 1192 icmph->type, icmph->code, 1193 &iph->daddr, skb->dev->name); 1194 goto out; 1195 } 1196 1197 icmp_socket_deliver(skb, info); 1198 1199 out: 1200 return reason; 1201 out_err: 1202 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 1203 return reason ?: SKB_DROP_REASON_NOT_SPECIFIED; 1204 } 1205 1206 1207 /* 1208 * Handle ICMP_REDIRECT. 1209 */ 1210 1211 static enum skb_drop_reason icmp_redirect(struct sk_buff *skb) 1212 { 1213 if (skb->len < sizeof(struct iphdr)) { 1214 __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS); 1215 return SKB_DROP_REASON_PKT_TOO_SMALL; 1216 } 1217 1218 if (!pskb_may_pull(skb, sizeof(struct iphdr))) { 1219 /* there aught to be a stat */ 1220 return SKB_DROP_REASON_NOMEM; 1221 } 1222 1223 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway)); 1224 return SKB_NOT_DROPPED_YET; 1225 } 1226 1227 /* 1228 * Handle ICMP_ECHO ("ping") and ICMP_EXT_ECHO ("PROBE") requests. 1229 * 1230 * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo 1231 * requests. 1232 * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be 1233 * included in the reply. 1234 * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring 1235 * echo requests, MUST have default=NOT. 1236 * RFC 8335: 8 MUST have a config option to enable/disable ICMP 1237 * Extended Echo Functionality, MUST be disabled by default 1238 * See also WRT handling of options once they are done and working. 1239 */ 1240 1241 static enum skb_drop_reason icmp_echo(struct sk_buff *skb) 1242 { 1243 DEFINE_RAW_FLEX(struct icmp_bxm, icmp_param, replyopts.opt.__data, 1244 IP_OPTIONS_DATA_FIXED_SIZE); 1245 struct net *net; 1246 1247 net = skb_dst_dev_net_rcu(skb); 1248 /* should there be an ICMP stat for ignored echos? */ 1249 if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all)) 1250 return SKB_NOT_DROPPED_YET; 1251 1252 icmp_param->data.icmph = *icmp_hdr(skb); 1253 icmp_param->skb = skb; 1254 icmp_param->offset = 0; 1255 icmp_param->data_len = skb->len; 1256 icmp_param->head_len = sizeof(struct icmphdr); 1257 1258 if (icmp_param->data.icmph.type == ICMP_ECHO) 1259 icmp_param->data.icmph.type = ICMP_ECHOREPLY; 1260 else if (!icmp_build_probe(skb, &icmp_param->data.icmph)) 1261 return SKB_NOT_DROPPED_YET; 1262 1263 icmp_reply(icmp_param, skb); 1264 return SKB_NOT_DROPPED_YET; 1265 } 1266 1267 /* Helper for icmp_echo and icmpv6_echo_reply. 1268 * Searches for net_device that matches PROBE interface identifier 1269 * and builds PROBE reply message in icmphdr. 1270 * 1271 * Returns false if PROBE responses are disabled via sysctl 1272 */ 1273 1274 bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr) 1275 { 1276 struct net *net = dev_net_rcu(skb->dev); 1277 struct icmp_ext_hdr *ext_hdr, _ext_hdr; 1278 struct icmp_ext_echo_iio *iio, _iio; 1279 struct inet6_dev *in6_dev; 1280 struct in_device *in_dev; 1281 struct net_device *dev; 1282 char buff[IFNAMSIZ]; 1283 u16 ident_len; 1284 u8 status; 1285 1286 if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe)) 1287 return false; 1288 1289 /* We currently only support probing interfaces on the proxy node 1290 * Check to ensure L-bit is set 1291 */ 1292 if (!(ntohs(icmphdr->un.echo.sequence) & 1)) 1293 return false; 1294 /* Clear status bits in reply message */ 1295 icmphdr->un.echo.sequence &= htons(0xFF00); 1296 if (icmphdr->type == ICMP_EXT_ECHO) 1297 icmphdr->type = ICMP_EXT_ECHOREPLY; 1298 else 1299 icmphdr->type = ICMPV6_EXT_ECHO_REPLY; 1300 ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr); 1301 /* Size of iio is class_type dependent. 1302 * Only check header here and assign length based on ctype in the switch statement 1303 */ 1304 iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio); 1305 if (!ext_hdr || !iio) 1306 goto send_mal_query; 1307 if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) || 1308 ntohs(iio->extobj_hdr.length) > sizeof(_iio)) 1309 goto send_mal_query; 1310 ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr); 1311 iio = skb_header_pointer(skb, sizeof(_ext_hdr), 1312 sizeof(iio->extobj_hdr) + ident_len, &_iio); 1313 if (!iio) 1314 goto send_mal_query; 1315 1316 status = 0; 1317 dev = NULL; 1318 switch (iio->extobj_hdr.class_type) { 1319 case ICMP_EXT_ECHO_CTYPE_NAME: 1320 if (ident_len >= IFNAMSIZ) 1321 goto send_mal_query; 1322 memset(buff, 0, sizeof(buff)); 1323 memcpy(buff, &iio->ident.name, ident_len); 1324 dev = dev_get_by_name(net, buff); 1325 break; 1326 case ICMP_EXT_ECHO_CTYPE_INDEX: 1327 if (ident_len != sizeof(iio->ident.ifindex)) 1328 goto send_mal_query; 1329 dev = dev_get_by_index(net, ntohl(iio->ident.ifindex)); 1330 break; 1331 case ICMP_EXT_ECHO_CTYPE_ADDR: 1332 if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) || 1333 ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1334 iio->ident.addr.ctype3_hdr.addrlen) 1335 goto send_mal_query; 1336 switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) { 1337 case ICMP_AFI_IP: 1338 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr)) 1339 goto send_mal_query; 1340 dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr); 1341 break; 1342 #if IS_ENABLED(CONFIG_IPV6) 1343 case ICMP_AFI_IP6: 1344 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr)) 1345 goto send_mal_query; 1346 dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev); 1347 dev_hold(dev); 1348 break; 1349 #endif 1350 default: 1351 goto send_mal_query; 1352 } 1353 break; 1354 default: 1355 goto send_mal_query; 1356 } 1357 if (!dev) { 1358 icmphdr->code = ICMP_EXT_CODE_NO_IF; 1359 return true; 1360 } 1361 /* Fill bits in reply message */ 1362 if (dev->flags & IFF_UP) 1363 status |= ICMP_EXT_ECHOREPLY_ACTIVE; 1364 1365 in_dev = __in_dev_get_rcu(dev); 1366 if (in_dev && rcu_access_pointer(in_dev->ifa_list)) 1367 status |= ICMP_EXT_ECHOREPLY_IPV4; 1368 1369 in6_dev = __in6_dev_get(dev); 1370 if (in6_dev && !list_empty(&in6_dev->addr_list)) 1371 status |= ICMP_EXT_ECHOREPLY_IPV6; 1372 1373 dev_put(dev); 1374 icmphdr->un.echo.sequence |= htons(status); 1375 return true; 1376 send_mal_query: 1377 icmphdr->code = ICMP_EXT_CODE_MAL_QUERY; 1378 return true; 1379 } 1380 EXPORT_SYMBOL_GPL(icmp_build_probe); 1381 1382 /* 1383 * Handle ICMP Timestamp requests. 1384 * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. 1385 * SHOULD be in the kernel for minimum random latency. 1386 * MUST be accurate to a few minutes. 1387 * MUST be updated at least at 15Hz. 1388 */ 1389 static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb) 1390 { 1391 DEFINE_RAW_FLEX(struct icmp_bxm, icmp_param, replyopts.opt.__data, 1392 IP_OPTIONS_DATA_FIXED_SIZE); 1393 /* 1394 * Too short. 1395 */ 1396 if (skb->len < 4) 1397 goto out_err; 1398 1399 /* 1400 * Fill in the current time as ms since midnight UT: 1401 */ 1402 icmp_param->data.times[1] = inet_current_timestamp(); 1403 icmp_param->data.times[2] = icmp_param->data.times[1]; 1404 1405 BUG_ON(skb_copy_bits(skb, 0, &icmp_param->data.times[0], 4)); 1406 1407 icmp_param->data.icmph = *icmp_hdr(skb); 1408 icmp_param->data.icmph.type = ICMP_TIMESTAMPREPLY; 1409 icmp_param->data.icmph.code = 0; 1410 icmp_param->skb = skb; 1411 icmp_param->offset = 0; 1412 icmp_param->data_len = 0; 1413 icmp_param->head_len = sizeof(struct icmphdr) + 12; 1414 icmp_reply(icmp_param, skb); 1415 return SKB_NOT_DROPPED_YET; 1416 1417 out_err: 1418 __ICMP_INC_STATS(skb_dst_dev_net_rcu(skb), ICMP_MIB_INERRORS); 1419 return SKB_DROP_REASON_PKT_TOO_SMALL; 1420 } 1421 1422 static enum skb_drop_reason icmp_discard(struct sk_buff *skb) 1423 { 1424 /* pretend it was a success */ 1425 return SKB_NOT_DROPPED_YET; 1426 } 1427 1428 /* 1429 * Deal with incoming ICMP packets. 1430 */ 1431 int icmp_rcv(struct sk_buff *skb) 1432 { 1433 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 1434 struct rtable *rt = skb_rtable(skb); 1435 struct net *net = dev_net_rcu(rt->dst.dev); 1436 struct icmphdr *icmph; 1437 1438 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1439 struct sec_path *sp = skb_sec_path(skb); 1440 int nh; 1441 1442 if (!(sp && sp->xvec[sp->len - 1]->props.flags & 1443 XFRM_STATE_ICMP)) { 1444 reason = SKB_DROP_REASON_XFRM_POLICY; 1445 goto drop; 1446 } 1447 1448 if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) 1449 goto drop; 1450 1451 nh = skb_network_offset(skb); 1452 skb_set_network_header(skb, sizeof(*icmph)); 1453 1454 if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, 1455 skb)) { 1456 reason = SKB_DROP_REASON_XFRM_POLICY; 1457 goto drop; 1458 } 1459 1460 skb_set_network_header(skb, nh); 1461 } 1462 1463 __ICMP_INC_STATS(net, ICMP_MIB_INMSGS); 1464 1465 if (skb_checksum_simple_validate(skb)) 1466 goto csum_error; 1467 1468 if (!pskb_pull(skb, sizeof(*icmph))) 1469 goto error; 1470 1471 icmph = icmp_hdr(skb); 1472 1473 ICMPMSGIN_INC_STATS(net, icmph->type); 1474 1475 /* Check for ICMP Extended Echo (PROBE) messages */ 1476 if (icmph->type == ICMP_EXT_ECHO) { 1477 /* We can't use icmp_pointers[].handler() because it is an array of 1478 * size NR_ICMP_TYPES + 1 (19 elements) and PROBE has code 42. 1479 */ 1480 reason = icmp_echo(skb); 1481 goto reason_check; 1482 } 1483 1484 /* 1485 * Parse the ICMP message 1486 */ 1487 1488 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1489 /* 1490 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 1491 * silently ignored (we let user decide with a sysctl). 1492 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently 1493 * discarded if to broadcast/multicast. 1494 */ 1495 if ((icmph->type == ICMP_ECHO || 1496 icmph->type == ICMP_TIMESTAMP) && 1497 READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) { 1498 reason = SKB_DROP_REASON_INVALID_PROTO; 1499 goto error; 1500 } 1501 if (icmph->type != ICMP_ECHO && 1502 icmph->type != ICMP_TIMESTAMP && 1503 icmph->type != ICMP_ADDRESS && 1504 icmph->type != ICMP_ADDRESSREPLY) { 1505 reason = SKB_DROP_REASON_INVALID_PROTO; 1506 goto error; 1507 } 1508 } 1509 1510 if (icmph->type == ICMP_EXT_ECHOREPLY || 1511 icmph->type == ICMP_ECHOREPLY) { 1512 reason = ping_rcv(skb); 1513 return reason ? NET_RX_DROP : NET_RX_SUCCESS; 1514 } 1515 1516 /* 1517 * 18 is the highest 'known' ICMP type. Anything else is a mystery 1518 * 1519 * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently 1520 * discarded. 1521 */ 1522 if (icmph->type > NR_ICMP_TYPES) { 1523 reason = SKB_DROP_REASON_UNHANDLED_PROTO; 1524 goto error; 1525 } 1526 1527 reason = icmp_pointers[icmph->type].handler(skb); 1528 reason_check: 1529 if (!reason) { 1530 consume_skb(skb); 1531 return NET_RX_SUCCESS; 1532 } 1533 1534 drop: 1535 kfree_skb_reason(skb, reason); 1536 return NET_RX_DROP; 1537 csum_error: 1538 reason = SKB_DROP_REASON_ICMP_CSUM; 1539 __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS); 1540 error: 1541 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 1542 goto drop; 1543 } 1544 1545 static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) 1546 { 1547 struct icmp_extobj_hdr *objh, _objh; 1548 struct icmp_ext_hdr *exth, _exth; 1549 u16 olen; 1550 1551 exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth); 1552 if (!exth) 1553 return false; 1554 if (exth->version != 2) 1555 return true; 1556 1557 if (exth->checksum && 1558 csum_fold(skb_checksum(skb, off, skb->len - off, 0))) 1559 return false; 1560 1561 off += sizeof(_exth); 1562 while (off < skb->len) { 1563 objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh); 1564 if (!objh) 1565 return false; 1566 1567 olen = ntohs(objh->length); 1568 if (olen < sizeof(_objh)) 1569 return false; 1570 1571 off += olen; 1572 if (off > skb->len) 1573 return false; 1574 } 1575 1576 return true; 1577 } 1578 1579 void ip_icmp_error_rfc4884(const struct sk_buff *skb, 1580 struct sock_ee_data_rfc4884 *out, 1581 int thlen, int off) 1582 { 1583 int hlen; 1584 1585 /* original datagram headers: end of icmph to payload (skb->data) */ 1586 hlen = -skb_transport_offset(skb) - thlen; 1587 1588 /* per rfc 4884: minimal datagram length of 128 bytes */ 1589 if (off < 128 || off < hlen) 1590 return; 1591 1592 /* kernel has stripped headers: return payload offset in bytes */ 1593 off -= hlen; 1594 if (off + sizeof(struct icmp_ext_hdr) > skb->len) 1595 return; 1596 1597 out->len = off; 1598 1599 if (!ip_icmp_error_rfc4884_validate(skb, off)) 1600 out->flags |= SO_EE_RFC4884_FLAG_INVALID; 1601 } 1602 EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884); 1603 1604 int icmp_err(struct sk_buff *skb, u32 info) 1605 { 1606 struct iphdr *iph = (struct iphdr *)skb->data; 1607 int offset = iph->ihl<<2; 1608 struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset); 1609 struct net *net = dev_net_rcu(skb->dev); 1610 int type = icmp_hdr(skb)->type; 1611 int code = icmp_hdr(skb)->code; 1612 1613 /* 1614 * Use ping_err to handle all icmp errors except those 1615 * triggered by ICMP_ECHOREPLY which sent from kernel. 1616 */ 1617 if (icmph->type != ICMP_ECHOREPLY) { 1618 ping_err(skb, offset, info); 1619 return 0; 1620 } 1621 1622 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 1623 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP); 1624 else if (type == ICMP_REDIRECT) 1625 ipv4_redirect(skb, net, 0, IPPROTO_ICMP); 1626 1627 return 0; 1628 } 1629 1630 /* 1631 * This table is the definition of how we handle ICMP. 1632 */ 1633 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { 1634 [ICMP_ECHOREPLY] = { 1635 .handler = ping_rcv, 1636 }, 1637 [1] = { 1638 .handler = icmp_discard, 1639 .error = 1, 1640 }, 1641 [2] = { 1642 .handler = icmp_discard, 1643 .error = 1, 1644 }, 1645 [ICMP_DEST_UNREACH] = { 1646 .handler = icmp_unreach, 1647 .error = 1, 1648 }, 1649 [ICMP_SOURCE_QUENCH] = { 1650 .handler = icmp_unreach, 1651 .error = 1, 1652 }, 1653 [ICMP_REDIRECT] = { 1654 .handler = icmp_redirect, 1655 .error = 1, 1656 }, 1657 [6] = { 1658 .handler = icmp_discard, 1659 .error = 1, 1660 }, 1661 [7] = { 1662 .handler = icmp_discard, 1663 .error = 1, 1664 }, 1665 [ICMP_ECHO] = { 1666 .handler = icmp_echo, 1667 }, 1668 [9] = { 1669 .handler = icmp_discard, 1670 .error = 1, 1671 }, 1672 [10] = { 1673 .handler = icmp_discard, 1674 .error = 1, 1675 }, 1676 [ICMP_TIME_EXCEEDED] = { 1677 .handler = icmp_unreach, 1678 .error = 1, 1679 }, 1680 [ICMP_PARAMETERPROB] = { 1681 .handler = icmp_unreach, 1682 .error = 1, 1683 }, 1684 [ICMP_TIMESTAMP] = { 1685 .handler = icmp_timestamp, 1686 }, 1687 [ICMP_TIMESTAMPREPLY] = { 1688 .handler = icmp_discard, 1689 }, 1690 [ICMP_INFO_REQUEST] = { 1691 .handler = icmp_discard, 1692 }, 1693 [ICMP_INFO_REPLY] = { 1694 .handler = icmp_discard, 1695 }, 1696 [ICMP_ADDRESS] = { 1697 .handler = icmp_discard, 1698 }, 1699 [ICMP_ADDRESSREPLY] = { 1700 .handler = icmp_discard, 1701 }, 1702 }; 1703 1704 static int __net_init icmp_sk_init(struct net *net) 1705 { 1706 /* Control parameters for ECHO replies. */ 1707 net->ipv4.sysctl_icmp_echo_ignore_all = 0; 1708 net->ipv4.sysctl_icmp_echo_enable_probe = 0; 1709 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; 1710 1711 /* Control parameter - ignore bogus broadcast responses? */ 1712 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; 1713 1714 /* 1715 * Configurable global rate limit. 1716 * 1717 * ratelimit defines tokens/packet consumed for dst->rate_token 1718 * bucket ratemask defines which icmp types are ratelimited by 1719 * setting it's bit position. 1720 * 1721 * default: 1722 * dest unreachable (3), source quench (4), 1723 * time exceeded (11), parameter problem (12) 1724 */ 1725 1726 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; 1727 net->ipv4.sysctl_icmp_ratemask = 0x1818; 1728 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; 1729 net->ipv4.sysctl_icmp_errors_extension_mask = 0; 1730 net->ipv4.sysctl_icmp_msgs_per_sec = 1000; 1731 net->ipv4.sysctl_icmp_msgs_burst = 50; 1732 1733 return 0; 1734 } 1735 1736 static struct pernet_operations __net_initdata icmp_sk_ops = { 1737 .init = icmp_sk_init, 1738 }; 1739 1740 int __init icmp_init(void) 1741 { 1742 int err, i; 1743 1744 for_each_possible_cpu(i) { 1745 struct sock *sk; 1746 1747 err = inet_ctl_sock_create(&sk, PF_INET, 1748 SOCK_RAW, IPPROTO_ICMP, &init_net); 1749 if (err < 0) 1750 return err; 1751 1752 per_cpu(ipv4_icmp_sk, i) = sk; 1753 1754 /* Enough space for 2 64K ICMP packets, including 1755 * sk_buff/skb_shared_info struct overhead. 1756 */ 1757 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); 1758 1759 /* 1760 * Speedup sock_wfree() 1761 */ 1762 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1763 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; 1764 } 1765 return register_pernet_subsys(&icmp_sk_ops); 1766 } 1767