1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Implementation of the ICMP protocol layer. 4 * 5 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * 7 * Some of the function names and the icmp unreach table for this 8 * module were derived from [icmp.c 1.0.11 06/02/93] by 9 * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. 10 * Other than that this module is a complete rewrite. 11 * 12 * Fixes: 13 * Clemens Fruhwirth : introduce global icmp rate limiting 14 * with icmp type masking ability instead 15 * of broken per type icmp timeouts. 16 * Mike Shaver : RFC1122 checks. 17 * Alan Cox : Multicast ping reply as self. 18 * Alan Cox : Fix atomicity lockup in ip_build_xmit 19 * call. 20 * Alan Cox : Added 216,128 byte paths to the MTU 21 * code. 22 * Martin Mares : RFC1812 checks. 23 * Martin Mares : Can be configured to follow redirects 24 * if acting as a router _without_ a 25 * routing protocol (RFC 1812). 26 * Martin Mares : Echo requests may be configured to 27 * be ignored (RFC 1812). 28 * Martin Mares : Limitation of ICMP error message 29 * transmit rate (RFC 1812). 30 * Martin Mares : TOS and Precedence set correctly 31 * (RFC 1812). 32 * Martin Mares : Now copying as much data from the 33 * original packet as we can without 34 * exceeding 576 bytes (RFC 1812). 35 * Willy Konynenberg : Transparent proxying support. 36 * Keith Owens : RFC1191 correction for 4.2BSD based 37 * path MTU bug. 38 * Thomas Quinot : ICMP Dest Unreach codes up to 15 are 39 * valid (RFC 1812). 40 * Andi Kleen : Check all packet lengths properly 41 * and moved all kfree_skb() up to 42 * icmp_rcv. 43 * Andi Kleen : Move the rate limit bookkeeping 44 * into the dest entry and use a token 45 * bucket filter (thanks to ANK). Make 46 * the rates sysctl configurable. 47 * Yu Tianli : Fixed two ugly bugs in icmp_send 48 * - IP option length was accounted wrongly 49 * - ICMP header length was not accounted 50 * at all. 51 * Tristan Greaves : Added sysctl option to ignore bogus 52 * broadcast responses from broken routers. 53 * 54 * To Fix: 55 * 56 * - Should use skb_pull() instead of all the manual checking. 57 * This would also greatly simply some upper layer error handlers. --AK 58 */ 59 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 #include <linux/module.h> 63 #include <linux/types.h> 64 #include <linux/jiffies.h> 65 #include <linux/kernel.h> 66 #include <linux/fcntl.h> 67 #include <linux/socket.h> 68 #include <linux/in.h> 69 #include <linux/inet.h> 70 #include <linux/inetdevice.h> 71 #include <linux/netdevice.h> 72 #include <linux/string.h> 73 #include <linux/netfilter_ipv4.h> 74 #include <linux/slab.h> 75 #include <net/snmp.h> 76 #include <net/ip.h> 77 #include <net/route.h> 78 #include <net/protocol.h> 79 #include <net/icmp.h> 80 #include <net/tcp.h> 81 #include <net/udp.h> 82 #include <net/raw.h> 83 #include <net/ping.h> 84 #include <linux/skbuff.h> 85 #include <net/sock.h> 86 #include <linux/errno.h> 87 #include <linux/timer.h> 88 #include <linux/init.h> 89 #include <linux/uaccess.h> 90 #include <net/checksum.h> 91 #include <net/xfrm.h> 92 #include <net/inet_common.h> 93 #include <net/ip_fib.h> 94 #include <net/l3mdev.h> 95 96 /* 97 * Build xmit assembly blocks 98 */ 99 100 struct icmp_bxm { 101 struct sk_buff *skb; 102 int offset; 103 int data_len; 104 105 struct { 106 struct icmphdr icmph; 107 __be32 times[3]; 108 } data; 109 int head_len; 110 struct ip_options_data replyopts; 111 }; 112 113 /* An array of errno for error messages from dest unreach. */ 114 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 115 116 const struct icmp_err icmp_err_convert[] = { 117 { 118 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 119 .fatal = 0, 120 }, 121 { 122 .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ 123 .fatal = 0, 124 }, 125 { 126 .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, 127 .fatal = 1, 128 }, 129 { 130 .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ 131 .fatal = 1, 132 }, 133 { 134 .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ 135 .fatal = 0, 136 }, 137 { 138 .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ 139 .fatal = 0, 140 }, 141 { 142 .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ 143 .fatal = 1, 144 }, 145 { 146 .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ 147 .fatal = 1, 148 }, 149 { 150 .errno = ENONET, /* ICMP_HOST_ISOLATED */ 151 .fatal = 1, 152 }, 153 { 154 .errno = ENETUNREACH, /* ICMP_NET_ANO */ 155 .fatal = 1, 156 }, 157 { 158 .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ 159 .fatal = 1, 160 }, 161 { 162 .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ 163 .fatal = 0, 164 }, 165 { 166 .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ 167 .fatal = 0, 168 }, 169 { 170 .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ 171 .fatal = 1, 172 }, 173 { 174 .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ 175 .fatal = 1, 176 }, 177 { 178 .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ 179 .fatal = 1, 180 }, 181 }; 182 EXPORT_SYMBOL(icmp_err_convert); 183 184 /* 185 * ICMP control array. This specifies what to do with each ICMP. 186 */ 187 188 struct icmp_control { 189 bool (*handler)(struct sk_buff *skb); 190 short error; /* This ICMP is classed as an error message */ 191 }; 192 193 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; 194 195 /* 196 * The ICMP socket(s). This is the most convenient way to flow control 197 * our ICMP output as well as maintain a clean interface throughout 198 * all layers. All Socketless IP sends will soon be gone. 199 * 200 * On SMP we have one ICMP socket per-cpu. 201 */ 202 static struct sock *icmp_sk(struct net *net) 203 { 204 return this_cpu_read(*net->ipv4.icmp_sk); 205 } 206 207 /* Called with BH disabled */ 208 static inline struct sock *icmp_xmit_lock(struct net *net) 209 { 210 struct sock *sk; 211 212 sk = icmp_sk(net); 213 214 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 215 /* This can happen if the output path signals a 216 * dst_link_failure() for an outgoing ICMP packet. 217 */ 218 return NULL; 219 } 220 return sk; 221 } 222 223 static inline void icmp_xmit_unlock(struct sock *sk) 224 { 225 spin_unlock(&sk->sk_lock.slock); 226 } 227 228 int sysctl_icmp_msgs_per_sec __read_mostly = 1000; 229 int sysctl_icmp_msgs_burst __read_mostly = 50; 230 231 static struct { 232 spinlock_t lock; 233 u32 credit; 234 u32 stamp; 235 } icmp_global = { 236 .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock), 237 }; 238 239 /** 240 * icmp_global_allow - Are we allowed to send one more ICMP message ? 241 * 242 * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec. 243 * Returns false if we reached the limit and can not send another packet. 244 * Note: called with BH disabled 245 */ 246 bool icmp_global_allow(void) 247 { 248 u32 credit, delta, incr = 0, now = (u32)jiffies; 249 bool rc = false; 250 251 /* Check if token bucket is empty and cannot be refilled 252 * without taking the spinlock. 253 */ 254 if (!icmp_global.credit) { 255 delta = min_t(u32, now - icmp_global.stamp, HZ); 256 if (delta < HZ / 50) 257 return false; 258 } 259 260 spin_lock(&icmp_global.lock); 261 delta = min_t(u32, now - icmp_global.stamp, HZ); 262 if (delta >= HZ / 50) { 263 incr = sysctl_icmp_msgs_per_sec * delta / HZ ; 264 if (incr) 265 icmp_global.stamp = now; 266 } 267 credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); 268 if (credit) { 269 credit--; 270 rc = true; 271 } 272 icmp_global.credit = credit; 273 spin_unlock(&icmp_global.lock); 274 return rc; 275 } 276 EXPORT_SYMBOL(icmp_global_allow); 277 278 static bool icmpv4_mask_allow(struct net *net, int type, int code) 279 { 280 if (type > NR_ICMP_TYPES) 281 return true; 282 283 /* Don't limit PMTU discovery. */ 284 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 285 return true; 286 287 /* Limit if icmp type is enabled in ratemask. */ 288 if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) 289 return true; 290 291 return false; 292 } 293 294 static bool icmpv4_global_allow(struct net *net, int type, int code) 295 { 296 if (icmpv4_mask_allow(net, type, code)) 297 return true; 298 299 if (icmp_global_allow()) 300 return true; 301 302 return false; 303 } 304 305 /* 306 * Send an ICMP frame. 307 */ 308 309 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 310 struct flowi4 *fl4, int type, int code) 311 { 312 struct dst_entry *dst = &rt->dst; 313 struct inet_peer *peer; 314 bool rc = true; 315 int vif; 316 317 if (icmpv4_mask_allow(net, type, code)) 318 goto out; 319 320 /* No rate limit on loopback */ 321 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) 322 goto out; 323 324 vif = l3mdev_master_ifindex(dst->dev); 325 peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); 326 rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); 327 if (peer) 328 inet_putpeer(peer); 329 out: 330 return rc; 331 } 332 333 /* 334 * Maintain the counters used in the SNMP statistics for outgoing ICMP 335 */ 336 void icmp_out_count(struct net *net, unsigned char type) 337 { 338 ICMPMSGOUT_INC_STATS(net, type); 339 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); 340 } 341 342 /* 343 * Checksum each fragment, and on the first include the headers and final 344 * checksum. 345 */ 346 static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, 347 struct sk_buff *skb) 348 { 349 struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; 350 __wsum csum; 351 352 csum = skb_copy_and_csum_bits(icmp_param->skb, 353 icmp_param->offset + offset, 354 to, len, 0); 355 356 skb->csum = csum_block_add(skb->csum, csum, odd); 357 if (icmp_pointers[icmp_param->data.icmph.type].error) 358 nf_ct_attach(skb, icmp_param->skb); 359 return 0; 360 } 361 362 static void icmp_push_reply(struct icmp_bxm *icmp_param, 363 struct flowi4 *fl4, 364 struct ipcm_cookie *ipc, struct rtable **rt) 365 { 366 struct sock *sk; 367 struct sk_buff *skb; 368 369 sk = icmp_sk(dev_net((*rt)->dst.dev)); 370 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, 371 icmp_param->data_len+icmp_param->head_len, 372 icmp_param->head_len, 373 ipc, rt, MSG_DONTWAIT) < 0) { 374 __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS); 375 ip_flush_pending_frames(sk); 376 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 377 struct icmphdr *icmph = icmp_hdr(skb); 378 __wsum csum = 0; 379 struct sk_buff *skb1; 380 381 skb_queue_walk(&sk->sk_write_queue, skb1) { 382 csum = csum_add(csum, skb1->csum); 383 } 384 csum = csum_partial_copy_nocheck((void *)&icmp_param->data, 385 (char *)icmph, 386 icmp_param->head_len, csum); 387 icmph->checksum = csum_fold(csum); 388 skb->ip_summed = CHECKSUM_NONE; 389 ip_push_pending_frames(sk, fl4); 390 } 391 } 392 393 /* 394 * Driving logic for building and sending ICMP messages. 395 */ 396 397 static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 398 { 399 struct ipcm_cookie ipc; 400 struct rtable *rt = skb_rtable(skb); 401 struct net *net = dev_net(rt->dst.dev); 402 struct flowi4 fl4; 403 struct sock *sk; 404 struct inet_sock *inet; 405 __be32 daddr, saddr; 406 u32 mark = IP4_REPLY_MARK(net, skb->mark); 407 int type = icmp_param->data.icmph.type; 408 int code = icmp_param->data.icmph.code; 409 410 if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) 411 return; 412 413 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 414 local_bh_disable(); 415 416 /* global icmp_msgs_per_sec */ 417 if (!icmpv4_global_allow(net, type, code)) 418 goto out_bh_enable; 419 420 sk = icmp_xmit_lock(net); 421 if (!sk) 422 goto out_bh_enable; 423 inet = inet_sk(sk); 424 425 icmp_param->data.icmph.checksum = 0; 426 427 ipcm_init(&ipc); 428 inet->tos = ip_hdr(skb)->tos; 429 sk->sk_mark = mark; 430 daddr = ipc.addr = ip_hdr(skb)->saddr; 431 saddr = fib_compute_spec_dst(skb); 432 433 if (icmp_param->replyopts.opt.opt.optlen) { 434 ipc.opt = &icmp_param->replyopts.opt; 435 if (ipc.opt->opt.srr) 436 daddr = icmp_param->replyopts.opt.opt.faddr; 437 } 438 memset(&fl4, 0, sizeof(fl4)); 439 fl4.daddr = daddr; 440 fl4.saddr = saddr; 441 fl4.flowi4_mark = mark; 442 fl4.flowi4_uid = sock_net_uid(net, NULL); 443 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 444 fl4.flowi4_proto = IPPROTO_ICMP; 445 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); 446 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 447 rt = ip_route_output_key(net, &fl4); 448 if (IS_ERR(rt)) 449 goto out_unlock; 450 if (icmpv4_xrlim_allow(net, rt, &fl4, type, code)) 451 icmp_push_reply(icmp_param, &fl4, &ipc, &rt); 452 ip_rt_put(rt); 453 out_unlock: 454 icmp_xmit_unlock(sk); 455 out_bh_enable: 456 local_bh_enable(); 457 } 458 459 static struct rtable *icmp_route_lookup(struct net *net, 460 struct flowi4 *fl4, 461 struct sk_buff *skb_in, 462 const struct iphdr *iph, 463 __be32 saddr, u8 tos, u32 mark, 464 int type, int code, 465 struct icmp_bxm *param) 466 { 467 struct rtable *rt, *rt2; 468 struct flowi4 fl4_dec; 469 int err; 470 471 memset(fl4, 0, sizeof(*fl4)); 472 fl4->daddr = (param->replyopts.opt.opt.srr ? 473 param->replyopts.opt.opt.faddr : iph->saddr); 474 fl4->saddr = saddr; 475 fl4->flowi4_mark = mark; 476 fl4->flowi4_uid = sock_net_uid(net, NULL); 477 fl4->flowi4_tos = RT_TOS(tos); 478 fl4->flowi4_proto = IPPROTO_ICMP; 479 fl4->fl4_icmp_type = type; 480 fl4->fl4_icmp_code = code; 481 fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); 482 483 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 484 rt = ip_route_output_key_hash(net, fl4, skb_in); 485 if (IS_ERR(rt)) 486 return rt; 487 488 /* No need to clone since we're just using its address. */ 489 rt2 = rt; 490 491 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 492 flowi4_to_flowi(fl4), NULL, 0); 493 if (!IS_ERR(rt)) { 494 if (rt != rt2) 495 return rt; 496 } else if (PTR_ERR(rt) == -EPERM) { 497 rt = NULL; 498 } else 499 return rt; 500 501 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); 502 if (err) 503 goto relookup_failed; 504 505 if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, 506 fl4_dec.saddr) == RTN_LOCAL) { 507 rt2 = __ip_route_output_key(net, &fl4_dec); 508 if (IS_ERR(rt2)) 509 err = PTR_ERR(rt2); 510 } else { 511 struct flowi4 fl4_2 = {}; 512 unsigned long orefdst; 513 514 fl4_2.daddr = fl4_dec.saddr; 515 rt2 = ip_route_output_key(net, &fl4_2); 516 if (IS_ERR(rt2)) { 517 err = PTR_ERR(rt2); 518 goto relookup_failed; 519 } 520 /* Ugh! */ 521 orefdst = skb_in->_skb_refdst; /* save old refdst */ 522 skb_dst_set(skb_in, NULL); 523 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, 524 RT_TOS(tos), rt2->dst.dev); 525 526 dst_release(&rt2->dst); 527 rt2 = skb_rtable(skb_in); 528 skb_in->_skb_refdst = orefdst; /* restore old refdst */ 529 } 530 531 if (err) 532 goto relookup_failed; 533 534 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, 535 flowi4_to_flowi(&fl4_dec), NULL, 536 XFRM_LOOKUP_ICMP); 537 if (!IS_ERR(rt2)) { 538 dst_release(&rt->dst); 539 memcpy(fl4, &fl4_dec, sizeof(*fl4)); 540 rt = rt2; 541 } else if (PTR_ERR(rt2) == -EPERM) { 542 if (rt) 543 dst_release(&rt->dst); 544 return rt2; 545 } else { 546 err = PTR_ERR(rt2); 547 goto relookup_failed; 548 } 549 return rt; 550 551 relookup_failed: 552 if (rt) 553 return rt; 554 return ERR_PTR(err); 555 } 556 557 /* 558 * Send an ICMP message in response to a situation 559 * 560 * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. 561 * MAY send more (we do). 562 * MUST NOT change this header information. 563 * MUST NOT reply to a multicast/broadcast IP address. 564 * MUST NOT reply to a multicast/broadcast MAC address. 565 * MUST reply to only the first fragment. 566 */ 567 568 void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, 569 const struct ip_options *opt) 570 { 571 struct iphdr *iph; 572 int room; 573 struct icmp_bxm icmp_param; 574 struct rtable *rt = skb_rtable(skb_in); 575 struct ipcm_cookie ipc; 576 struct flowi4 fl4; 577 __be32 saddr; 578 u8 tos; 579 u32 mark; 580 struct net *net; 581 struct sock *sk; 582 583 if (!rt) 584 goto out; 585 586 if (rt->dst.dev) 587 net = dev_net(rt->dst.dev); 588 else if (skb_in->dev) 589 net = dev_net(skb_in->dev); 590 else 591 goto out; 592 593 /* 594 * Find the original header. It is expected to be valid, of course. 595 * Check this, icmp_send is called from the most obscure devices 596 * sometimes. 597 */ 598 iph = ip_hdr(skb_in); 599 600 if ((u8 *)iph < skb_in->head || 601 (skb_network_header(skb_in) + sizeof(*iph)) > 602 skb_tail_pointer(skb_in)) 603 goto out; 604 605 /* 606 * No replies to physical multicast/broadcast 607 */ 608 if (skb_in->pkt_type != PACKET_HOST) 609 goto out; 610 611 /* 612 * Now check at the protocol level 613 */ 614 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 615 goto out; 616 617 /* 618 * Only reply to fragment 0. We byte re-order the constant 619 * mask for efficiency. 620 */ 621 if (iph->frag_off & htons(IP_OFFSET)) 622 goto out; 623 624 /* 625 * If we send an ICMP error to an ICMP error a mess would result.. 626 */ 627 if (icmp_pointers[type].error) { 628 /* 629 * We are an error, check if we are replying to an 630 * ICMP error 631 */ 632 if (iph->protocol == IPPROTO_ICMP) { 633 u8 _inner_type, *itp; 634 635 itp = skb_header_pointer(skb_in, 636 skb_network_header(skb_in) + 637 (iph->ihl << 2) + 638 offsetof(struct icmphdr, 639 type) - 640 skb_in->data, 641 sizeof(_inner_type), 642 &_inner_type); 643 if (!itp) 644 goto out; 645 646 /* 647 * Assume any unknown ICMP type is an error. This 648 * isn't specified by the RFC, but think about it.. 649 */ 650 if (*itp > NR_ICMP_TYPES || 651 icmp_pointers[*itp].error) 652 goto out; 653 } 654 } 655 656 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 657 local_bh_disable(); 658 659 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless 660 * incoming dev is loopback. If outgoing dev change to not be 661 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) 662 */ 663 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && 664 !icmpv4_global_allow(net, type, code)) 665 goto out_bh_enable; 666 667 sk = icmp_xmit_lock(net); 668 if (!sk) 669 goto out_bh_enable; 670 671 /* 672 * Construct source address and options. 673 */ 674 675 saddr = iph->daddr; 676 if (!(rt->rt_flags & RTCF_LOCAL)) { 677 struct net_device *dev = NULL; 678 679 rcu_read_lock(); 680 if (rt_is_input_route(rt) && 681 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) 682 dev = dev_get_by_index_rcu(net, inet_iif(skb_in)); 683 684 if (dev) 685 saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); 686 else 687 saddr = 0; 688 rcu_read_unlock(); 689 } 690 691 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | 692 IPTOS_PREC_INTERNETCONTROL) : 693 iph->tos; 694 mark = IP4_REPLY_MARK(net, skb_in->mark); 695 696 if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt)) 697 goto out_unlock; 698 699 700 /* 701 * Prepare data for ICMP header. 702 */ 703 704 icmp_param.data.icmph.type = type; 705 icmp_param.data.icmph.code = code; 706 icmp_param.data.icmph.un.gateway = info; 707 icmp_param.data.icmph.checksum = 0; 708 icmp_param.skb = skb_in; 709 icmp_param.offset = skb_network_offset(skb_in); 710 inet_sk(sk)->tos = tos; 711 sk->sk_mark = mark; 712 ipcm_init(&ipc); 713 ipc.addr = iph->saddr; 714 ipc.opt = &icmp_param.replyopts.opt; 715 716 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, 717 type, code, &icmp_param); 718 if (IS_ERR(rt)) 719 goto out_unlock; 720 721 /* peer icmp_ratelimit */ 722 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) 723 goto ende; 724 725 /* RFC says return as much as we can without exceeding 576 bytes. */ 726 727 room = dst_mtu(&rt->dst); 728 if (room > 576) 729 room = 576; 730 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; 731 room -= sizeof(struct icmphdr); 732 733 icmp_param.data_len = skb_in->len - icmp_param.offset; 734 if (icmp_param.data_len > room) 735 icmp_param.data_len = room; 736 icmp_param.head_len = sizeof(struct icmphdr); 737 738 icmp_push_reply(&icmp_param, &fl4, &ipc, &rt); 739 ende: 740 ip_rt_put(rt); 741 out_unlock: 742 icmp_xmit_unlock(sk); 743 out_bh_enable: 744 local_bh_enable(); 745 out:; 746 } 747 EXPORT_SYMBOL(__icmp_send); 748 749 750 static void icmp_socket_deliver(struct sk_buff *skb, u32 info) 751 { 752 const struct iphdr *iph = (const struct iphdr *) skb->data; 753 const struct net_protocol *ipprot; 754 int protocol = iph->protocol; 755 756 /* Checkin full IP header plus 8 bytes of protocol to 757 * avoid additional coding at protocol handlers. 758 */ 759 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) { 760 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); 761 return; 762 } 763 764 raw_icmp_error(skb, protocol, info); 765 766 ipprot = rcu_dereference(inet_protos[protocol]); 767 if (ipprot && ipprot->err_handler) 768 ipprot->err_handler(skb, info); 769 } 770 771 static bool icmp_tag_validation(int proto) 772 { 773 bool ok; 774 775 rcu_read_lock(); 776 ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation; 777 rcu_read_unlock(); 778 return ok; 779 } 780 781 /* 782 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and 783 * ICMP_PARAMETERPROB. 784 */ 785 786 static bool icmp_unreach(struct sk_buff *skb) 787 { 788 const struct iphdr *iph; 789 struct icmphdr *icmph; 790 struct net *net; 791 u32 info = 0; 792 793 net = dev_net(skb_dst(skb)->dev); 794 795 /* 796 * Incomplete header ? 797 * Only checks for the IP header, there should be an 798 * additional check for longer headers in upper levels. 799 */ 800 801 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 802 goto out_err; 803 804 icmph = icmp_hdr(skb); 805 iph = (const struct iphdr *)skb->data; 806 807 if (iph->ihl < 5) /* Mangled header, drop. */ 808 goto out_err; 809 810 switch (icmph->type) { 811 case ICMP_DEST_UNREACH: 812 switch (icmph->code & 15) { 813 case ICMP_NET_UNREACH: 814 case ICMP_HOST_UNREACH: 815 case ICMP_PROT_UNREACH: 816 case ICMP_PORT_UNREACH: 817 break; 818 case ICMP_FRAG_NEEDED: 819 /* for documentation of the ip_no_pmtu_disc 820 * values please see 821 * Documentation/networking/ip-sysctl.txt 822 */ 823 switch (net->ipv4.sysctl_ip_no_pmtu_disc) { 824 default: 825 net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", 826 &iph->daddr); 827 break; 828 case 2: 829 goto out; 830 case 3: 831 if (!icmp_tag_validation(iph->protocol)) 832 goto out; 833 /* fall through */ 834 case 0: 835 info = ntohs(icmph->un.frag.mtu); 836 } 837 break; 838 case ICMP_SR_FAILED: 839 net_dbg_ratelimited("%pI4: Source Route Failed\n", 840 &iph->daddr); 841 break; 842 default: 843 break; 844 } 845 if (icmph->code > NR_ICMP_UNREACH) 846 goto out; 847 break; 848 case ICMP_PARAMETERPROB: 849 info = ntohl(icmph->un.gateway) >> 24; 850 break; 851 case ICMP_TIME_EXCEEDED: 852 __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS); 853 if (icmph->code == ICMP_EXC_FRAGTIME) 854 goto out; 855 break; 856 } 857 858 /* 859 * Throw it at our lower layers 860 * 861 * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed 862 * header. 863 * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the 864 * transport layer. 865 * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to 866 * transport layer. 867 */ 868 869 /* 870 * Check the other end isn't violating RFC 1122. Some routers send 871 * bogus responses to broadcast frames. If you see this message 872 * first check your netmask matches at both ends, if it does then 873 * get the other vendor to fix their kit. 874 */ 875 876 if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && 877 inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) { 878 net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", 879 &ip_hdr(skb)->saddr, 880 icmph->type, icmph->code, 881 &iph->daddr, skb->dev->name); 882 goto out; 883 } 884 885 icmp_socket_deliver(skb, info); 886 887 out: 888 return true; 889 out_err: 890 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 891 return false; 892 } 893 894 895 /* 896 * Handle ICMP_REDIRECT. 897 */ 898 899 static bool icmp_redirect(struct sk_buff *skb) 900 { 901 if (skb->len < sizeof(struct iphdr)) { 902 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); 903 return false; 904 } 905 906 if (!pskb_may_pull(skb, sizeof(struct iphdr))) { 907 /* there aught to be a stat */ 908 return false; 909 } 910 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway)); 912 return true; 913 } 914 915 /* 916 * Handle ICMP_ECHO ("ping") requests. 917 * 918 * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo 919 * requests. 920 * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be 921 * included in the reply. 922 * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring 923 * echo requests, MUST have default=NOT. 924 * See also WRT handling of options once they are done and working. 925 */ 926 927 static bool icmp_echo(struct sk_buff *skb) 928 { 929 struct net *net; 930 931 net = dev_net(skb_dst(skb)->dev); 932 if (!net->ipv4.sysctl_icmp_echo_ignore_all) { 933 struct icmp_bxm icmp_param; 934 935 icmp_param.data.icmph = *icmp_hdr(skb); 936 icmp_param.data.icmph.type = ICMP_ECHOREPLY; 937 icmp_param.skb = skb; 938 icmp_param.offset = 0; 939 icmp_param.data_len = skb->len; 940 icmp_param.head_len = sizeof(struct icmphdr); 941 icmp_reply(&icmp_param, skb); 942 } 943 /* should there be an ICMP stat for ignored echos? */ 944 return true; 945 } 946 947 /* 948 * Handle ICMP Timestamp requests. 949 * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. 950 * SHOULD be in the kernel for minimum random latency. 951 * MUST be accurate to a few minutes. 952 * MUST be updated at least at 15Hz. 953 */ 954 static bool icmp_timestamp(struct sk_buff *skb) 955 { 956 struct icmp_bxm icmp_param; 957 /* 958 * Too short. 959 */ 960 if (skb->len < 4) 961 goto out_err; 962 963 /* 964 * Fill in the current time as ms since midnight UT: 965 */ 966 icmp_param.data.times[1] = inet_current_timestamp(); 967 icmp_param.data.times[2] = icmp_param.data.times[1]; 968 969 BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)); 970 971 icmp_param.data.icmph = *icmp_hdr(skb); 972 icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; 973 icmp_param.data.icmph.code = 0; 974 icmp_param.skb = skb; 975 icmp_param.offset = 0; 976 icmp_param.data_len = 0; 977 icmp_param.head_len = sizeof(struct icmphdr) + 12; 978 icmp_reply(&icmp_param, skb); 979 return true; 980 981 out_err: 982 __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); 983 return false; 984 } 985 986 static bool icmp_discard(struct sk_buff *skb) 987 { 988 /* pretend it was a success */ 989 return true; 990 } 991 992 /* 993 * Deal with incoming ICMP packets. 994 */ 995 int icmp_rcv(struct sk_buff *skb) 996 { 997 struct icmphdr *icmph; 998 struct rtable *rt = skb_rtable(skb); 999 struct net *net = dev_net(rt->dst.dev); 1000 bool success; 1001 1002 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1003 struct sec_path *sp = skb_sec_path(skb); 1004 int nh; 1005 1006 if (!(sp && sp->xvec[sp->len - 1]->props.flags & 1007 XFRM_STATE_ICMP)) 1008 goto drop; 1009 1010 if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) 1011 goto drop; 1012 1013 nh = skb_network_offset(skb); 1014 skb_set_network_header(skb, sizeof(*icmph)); 1015 1016 if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) 1017 goto drop; 1018 1019 skb_set_network_header(skb, nh); 1020 } 1021 1022 __ICMP_INC_STATS(net, ICMP_MIB_INMSGS); 1023 1024 if (skb_checksum_simple_validate(skb)) 1025 goto csum_error; 1026 1027 if (!pskb_pull(skb, sizeof(*icmph))) 1028 goto error; 1029 1030 icmph = icmp_hdr(skb); 1031 1032 ICMPMSGIN_INC_STATS(net, icmph->type); 1033 /* 1034 * 18 is the highest 'known' ICMP type. Anything else is a mystery 1035 * 1036 * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently 1037 * discarded. 1038 */ 1039 if (icmph->type > NR_ICMP_TYPES) 1040 goto error; 1041 1042 1043 /* 1044 * Parse the ICMP message 1045 */ 1046 1047 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1048 /* 1049 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 1050 * silently ignored (we let user decide with a sysctl). 1051 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently 1052 * discarded if to broadcast/multicast. 1053 */ 1054 if ((icmph->type == ICMP_ECHO || 1055 icmph->type == ICMP_TIMESTAMP) && 1056 net->ipv4.sysctl_icmp_echo_ignore_broadcasts) { 1057 goto error; 1058 } 1059 if (icmph->type != ICMP_ECHO && 1060 icmph->type != ICMP_TIMESTAMP && 1061 icmph->type != ICMP_ADDRESS && 1062 icmph->type != ICMP_ADDRESSREPLY) { 1063 goto error; 1064 } 1065 } 1066 1067 success = icmp_pointers[icmph->type].handler(skb); 1068 1069 if (success) { 1070 consume_skb(skb); 1071 return NET_RX_SUCCESS; 1072 } 1073 1074 drop: 1075 kfree_skb(skb); 1076 return NET_RX_DROP; 1077 csum_error: 1078 __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS); 1079 error: 1080 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 1081 goto drop; 1082 } 1083 1084 int icmp_err(struct sk_buff *skb, u32 info) 1085 { 1086 struct iphdr *iph = (struct iphdr *)skb->data; 1087 int offset = iph->ihl<<2; 1088 struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset); 1089 int type = icmp_hdr(skb)->type; 1090 int code = icmp_hdr(skb)->code; 1091 struct net *net = dev_net(skb->dev); 1092 1093 /* 1094 * Use ping_err to handle all icmp errors except those 1095 * triggered by ICMP_ECHOREPLY which sent from kernel. 1096 */ 1097 if (icmph->type != ICMP_ECHOREPLY) { 1098 ping_err(skb, offset, info); 1099 return 0; 1100 } 1101 1102 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 1103 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP); 1104 else if (type == ICMP_REDIRECT) 1105 ipv4_redirect(skb, net, 0, IPPROTO_ICMP); 1106 1107 return 0; 1108 } 1109 1110 /* 1111 * This table is the definition of how we handle ICMP. 1112 */ 1113 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { 1114 [ICMP_ECHOREPLY] = { 1115 .handler = ping_rcv, 1116 }, 1117 [1] = { 1118 .handler = icmp_discard, 1119 .error = 1, 1120 }, 1121 [2] = { 1122 .handler = icmp_discard, 1123 .error = 1, 1124 }, 1125 [ICMP_DEST_UNREACH] = { 1126 .handler = icmp_unreach, 1127 .error = 1, 1128 }, 1129 [ICMP_SOURCE_QUENCH] = { 1130 .handler = icmp_unreach, 1131 .error = 1, 1132 }, 1133 [ICMP_REDIRECT] = { 1134 .handler = icmp_redirect, 1135 .error = 1, 1136 }, 1137 [6] = { 1138 .handler = icmp_discard, 1139 .error = 1, 1140 }, 1141 [7] = { 1142 .handler = icmp_discard, 1143 .error = 1, 1144 }, 1145 [ICMP_ECHO] = { 1146 .handler = icmp_echo, 1147 }, 1148 [9] = { 1149 .handler = icmp_discard, 1150 .error = 1, 1151 }, 1152 [10] = { 1153 .handler = icmp_discard, 1154 .error = 1, 1155 }, 1156 [ICMP_TIME_EXCEEDED] = { 1157 .handler = icmp_unreach, 1158 .error = 1, 1159 }, 1160 [ICMP_PARAMETERPROB] = { 1161 .handler = icmp_unreach, 1162 .error = 1, 1163 }, 1164 [ICMP_TIMESTAMP] = { 1165 .handler = icmp_timestamp, 1166 }, 1167 [ICMP_TIMESTAMPREPLY] = { 1168 .handler = icmp_discard, 1169 }, 1170 [ICMP_INFO_REQUEST] = { 1171 .handler = icmp_discard, 1172 }, 1173 [ICMP_INFO_REPLY] = { 1174 .handler = icmp_discard, 1175 }, 1176 [ICMP_ADDRESS] = { 1177 .handler = icmp_discard, 1178 }, 1179 [ICMP_ADDRESSREPLY] = { 1180 .handler = icmp_discard, 1181 }, 1182 }; 1183 1184 static void __net_exit icmp_sk_exit(struct net *net) 1185 { 1186 int i; 1187 1188 for_each_possible_cpu(i) 1189 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); 1190 free_percpu(net->ipv4.icmp_sk); 1191 net->ipv4.icmp_sk = NULL; 1192 } 1193 1194 static int __net_init icmp_sk_init(struct net *net) 1195 { 1196 int i, err; 1197 1198 net->ipv4.icmp_sk = alloc_percpu(struct sock *); 1199 if (!net->ipv4.icmp_sk) 1200 return -ENOMEM; 1201 1202 for_each_possible_cpu(i) { 1203 struct sock *sk; 1204 1205 err = inet_ctl_sock_create(&sk, PF_INET, 1206 SOCK_RAW, IPPROTO_ICMP, net); 1207 if (err < 0) 1208 goto fail; 1209 1210 *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk; 1211 1212 /* Enough space for 2 64K ICMP packets, including 1213 * sk_buff/skb_shared_info struct overhead. 1214 */ 1215 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); 1216 1217 /* 1218 * Speedup sock_wfree() 1219 */ 1220 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1221 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; 1222 } 1223 1224 /* Control parameters for ECHO replies. */ 1225 net->ipv4.sysctl_icmp_echo_ignore_all = 0; 1226 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; 1227 1228 /* Control parameter - ignore bogus broadcast responses? */ 1229 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; 1230 1231 /* 1232 * Configurable global rate limit. 1233 * 1234 * ratelimit defines tokens/packet consumed for dst->rate_token 1235 * bucket ratemask defines which icmp types are ratelimited by 1236 * setting it's bit position. 1237 * 1238 * default: 1239 * dest unreachable (3), source quench (4), 1240 * time exceeded (11), parameter problem (12) 1241 */ 1242 1243 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; 1244 net->ipv4.sysctl_icmp_ratemask = 0x1818; 1245 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; 1246 1247 return 0; 1248 1249 fail: 1250 icmp_sk_exit(net); 1251 return err; 1252 } 1253 1254 static struct pernet_operations __net_initdata icmp_sk_ops = { 1255 .init = icmp_sk_init, 1256 .exit = icmp_sk_exit, 1257 }; 1258 1259 int __init icmp_init(void) 1260 { 1261 return register_pernet_subsys(&icmp_sk_ops); 1262 } 1263