1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * The IP to API glue. 8 * 9 * Authors: see ip.c 10 * 11 * Fixes: 12 * Many : Split from ip.c , see ip.c for history. 13 * Martin Mares : TOS setting fixed. 14 * Alan Cox : Fixed a couple of oopses in Martin's 15 * TOS tweaks. 16 * Mike McLagan : Routing by source 17 */ 18 19 #include <linux/module.h> 20 #include <linux/types.h> 21 #include <linux/mm.h> 22 #include <linux/skbuff.h> 23 #include <linux/ip.h> 24 #include <linux/icmp.h> 25 #include <linux/inetdevice.h> 26 #include <linux/netdevice.h> 27 #include <linux/slab.h> 28 #include <net/sock.h> 29 #include <net/ip.h> 30 #include <net/icmp.h> 31 #include <net/tcp_states.h> 32 #include <linux/udp.h> 33 #include <linux/igmp.h> 34 #include <linux/netfilter.h> 35 #include <linux/route.h> 36 #include <linux/mroute.h> 37 #include <net/inet_ecn.h> 38 #include <net/route.h> 39 #include <net/xfrm.h> 40 #include <net/compat.h> 41 #include <net/checksum.h> 42 #if IS_ENABLED(CONFIG_IPV6) 43 #include <net/transp_v6.h> 44 #endif 45 #include <net/ip_fib.h> 46 47 #include <linux/errqueue.h> 48 #include <linux/uaccess.h> 49 50 #include <linux/bpfilter.h> 51 52 /* 53 * SOL_IP control messages. 54 */ 55 56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 57 { 58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb); 59 60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr; 61 62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); 63 } 64 65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb) 66 { 67 int ttl = ip_hdr(skb)->ttl; 68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl); 69 } 70 71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb) 72 { 73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos); 74 } 75 76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) 77 { 78 if (IPCB(skb)->opt.optlen == 0) 79 return; 80 81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, 82 ip_hdr(skb) + 1); 83 } 84 85 86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg, 87 struct sk_buff *skb) 88 { 89 unsigned char optbuf[sizeof(struct ip_options) + 40]; 90 struct ip_options *opt = (struct ip_options *)optbuf; 91 92 if (IPCB(skb)->opt.optlen == 0) 93 return; 94 95 if (ip_options_echo(net, opt, skb)) { 96 msg->msg_flags |= MSG_CTRUNC; 97 return; 98 } 99 ip_options_undo(opt); 100 101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data); 102 } 103 104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb) 105 { 106 int val; 107 108 if (IPCB(skb)->frag_max_size == 0) 109 return; 110 111 val = IPCB(skb)->frag_max_size; 112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val); 113 } 114 115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, 116 int tlen, int offset) 117 { 118 __wsum csum = skb->csum; 119 120 if (skb->ip_summed != CHECKSUM_COMPLETE) 121 return; 122 123 if (offset != 0) { 124 int tend_off = skb_transport_offset(skb) + tlen; 125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); 126 } 127 128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); 129 } 130 131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 132 { 133 char *secdata; 134 u32 seclen, secid; 135 int err; 136 137 err = security_socket_getpeersec_dgram(NULL, skb, &secid); 138 if (err) 139 return; 140 141 err = security_secid_to_secctx(secid, &secdata, &seclen); 142 if (err) 143 return; 144 145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 146 security_release_secctx(secdata, seclen); 147 } 148 149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 150 { 151 __be16 _ports[2], *ports; 152 struct sockaddr_in sin; 153 154 /* All current transport protocols have the port numbers in the 155 * first four bytes of the transport header and this function is 156 * written with this assumption in mind. 157 */ 158 ports = skb_header_pointer(skb, skb_transport_offset(skb), 159 sizeof(_ports), &_ports); 160 if (!ports) 161 return; 162 163 sin.sin_family = AF_INET; 164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr; 165 sin.sin_port = ports[1]; 166 memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 167 168 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin); 169 } 170 171 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, 172 struct sk_buff *skb, int tlen, int offset) 173 { 174 struct inet_sock *inet = inet_sk(sk); 175 unsigned int flags = inet->cmsg_flags; 176 177 /* Ordered by supposed usage frequency */ 178 if (flags & IP_CMSG_PKTINFO) { 179 ip_cmsg_recv_pktinfo(msg, skb); 180 181 flags &= ~IP_CMSG_PKTINFO; 182 if (!flags) 183 return; 184 } 185 186 if (flags & IP_CMSG_TTL) { 187 ip_cmsg_recv_ttl(msg, skb); 188 189 flags &= ~IP_CMSG_TTL; 190 if (!flags) 191 return; 192 } 193 194 if (flags & IP_CMSG_TOS) { 195 ip_cmsg_recv_tos(msg, skb); 196 197 flags &= ~IP_CMSG_TOS; 198 if (!flags) 199 return; 200 } 201 202 if (flags & IP_CMSG_RECVOPTS) { 203 ip_cmsg_recv_opts(msg, skb); 204 205 flags &= ~IP_CMSG_RECVOPTS; 206 if (!flags) 207 return; 208 } 209 210 if (flags & IP_CMSG_RETOPTS) { 211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb); 212 213 flags &= ~IP_CMSG_RETOPTS; 214 if (!flags) 215 return; 216 } 217 218 if (flags & IP_CMSG_PASSSEC) { 219 ip_cmsg_recv_security(msg, skb); 220 221 flags &= ~IP_CMSG_PASSSEC; 222 if (!flags) 223 return; 224 } 225 226 if (flags & IP_CMSG_ORIGDSTADDR) { 227 ip_cmsg_recv_dstaddr(msg, skb); 228 229 flags &= ~IP_CMSG_ORIGDSTADDR; 230 if (!flags) 231 return; 232 } 233 234 if (flags & IP_CMSG_CHECKSUM) 235 ip_cmsg_recv_checksum(msg, skb, tlen, offset); 236 237 if (flags & IP_CMSG_RECVFRAGSIZE) 238 ip_cmsg_recv_fragsize(msg, skb); 239 } 240 EXPORT_SYMBOL(ip_cmsg_recv_offset); 241 242 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, 243 bool allow_ipv6) 244 { 245 int err, val; 246 struct cmsghdr *cmsg; 247 struct net *net = sock_net(sk); 248 249 for_each_cmsghdr(cmsg, msg) { 250 if (!CMSG_OK(msg, cmsg)) 251 return -EINVAL; 252 #if IS_ENABLED(CONFIG_IPV6) 253 if (allow_ipv6 && 254 cmsg->cmsg_level == SOL_IPV6 && 255 cmsg->cmsg_type == IPV6_PKTINFO) { 256 struct in6_pktinfo *src_info; 257 258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info))) 259 return -EINVAL; 260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); 261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) 262 return -EINVAL; 263 if (src_info->ipi6_ifindex) 264 ipc->oif = src_info->ipi6_ifindex; 265 ipc->addr = src_info->ipi6_addr.s6_addr32[3]; 266 continue; 267 } 268 #endif 269 if (cmsg->cmsg_level == SOL_SOCKET) { 270 err = __sock_cmsg_send(sk, cmsg, &ipc->sockc); 271 if (err) 272 return err; 273 continue; 274 } 275 276 if (cmsg->cmsg_level != SOL_IP) 277 continue; 278 switch (cmsg->cmsg_type) { 279 case IP_RETOPTS: 280 err = cmsg->cmsg_len - sizeof(struct cmsghdr); 281 282 /* Our caller is responsible for freeing ipc->opt */ 283 err = ip_options_get(net, &ipc->opt, 284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)), 285 err < 40 ? err : 40); 286 if (err) 287 return err; 288 break; 289 case IP_PKTINFO: 290 { 291 struct in_pktinfo *info; 292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) 293 return -EINVAL; 294 info = (struct in_pktinfo *)CMSG_DATA(cmsg); 295 if (info->ipi_ifindex) 296 ipc->oif = info->ipi_ifindex; 297 ipc->addr = info->ipi_spec_dst.s_addr; 298 break; 299 } 300 case IP_TTL: 301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) 302 return -EINVAL; 303 val = *(int *)CMSG_DATA(cmsg); 304 if (val < 1 || val > 255) 305 return -EINVAL; 306 ipc->ttl = val; 307 break; 308 case IP_TOS: 309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int))) 310 val = *(int *)CMSG_DATA(cmsg); 311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8))) 312 val = *(u8 *)CMSG_DATA(cmsg); 313 else 314 return -EINVAL; 315 if (val < 0 || val > 255) 316 return -EINVAL; 317 ipc->tos = val; 318 ipc->priority = rt_tos2priority(ipc->tos); 319 break; 320 case IP_PROTOCOL: 321 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) 322 return -EINVAL; 323 val = *(int *)CMSG_DATA(cmsg); 324 if (val < 1 || val > 255) 325 return -EINVAL; 326 ipc->protocol = val; 327 break; 328 default: 329 return -EINVAL; 330 } 331 } 332 return 0; 333 } 334 335 static void ip_ra_destroy_rcu(struct rcu_head *head) 336 { 337 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); 338 339 sock_put(ra->saved_sk); 340 kfree(ra); 341 } 342 343 int ip_ra_control(struct sock *sk, unsigned char on, 344 void (*destructor)(struct sock *)) 345 { 346 struct ip_ra_chain *ra, *new_ra; 347 struct ip_ra_chain __rcu **rap; 348 struct net *net = sock_net(sk); 349 350 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) 351 return -EINVAL; 352 353 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 354 if (on && !new_ra) 355 return -ENOMEM; 356 357 mutex_lock(&net->ipv4.ra_mutex); 358 for (rap = &net->ipv4.ra_chain; 359 (ra = rcu_dereference_protected(*rap, 360 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL; 361 rap = &ra->next) { 362 if (ra->sk == sk) { 363 if (on) { 364 mutex_unlock(&net->ipv4.ra_mutex); 365 kfree(new_ra); 366 return -EADDRINUSE; 367 } 368 /* dont let ip_call_ra_chain() use sk again */ 369 ra->sk = NULL; 370 RCU_INIT_POINTER(*rap, ra->next); 371 mutex_unlock(&net->ipv4.ra_mutex); 372 373 if (ra->destructor) 374 ra->destructor(sk); 375 /* 376 * Delay sock_put(sk) and kfree(ra) after one rcu grace 377 * period. This guarantee ip_call_ra_chain() dont need 378 * to mess with socket refcounts. 379 */ 380 ra->saved_sk = sk; 381 call_rcu(&ra->rcu, ip_ra_destroy_rcu); 382 return 0; 383 } 384 } 385 if (!new_ra) { 386 mutex_unlock(&net->ipv4.ra_mutex); 387 return -ENOBUFS; 388 } 389 new_ra->sk = sk; 390 new_ra->destructor = destructor; 391 392 RCU_INIT_POINTER(new_ra->next, ra); 393 rcu_assign_pointer(*rap, new_ra); 394 sock_hold(sk); 395 mutex_unlock(&net->ipv4.ra_mutex); 396 397 return 0; 398 } 399 400 static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb, 401 struct sock_ee_data_rfc4884 *out) 402 { 403 switch (icmp_hdr(skb)->type) { 404 case ICMP_DEST_UNREACH: 405 case ICMP_TIME_EXCEEDED: 406 case ICMP_PARAMETERPROB: 407 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr), 408 icmp_hdr(skb)->un.reserved[1] * 4); 409 } 410 } 411 412 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 413 __be16 port, u32 info, u8 *payload) 414 { 415 struct sock_exterr_skb *serr; 416 417 skb = skb_clone(skb, GFP_ATOMIC); 418 if (!skb) 419 return; 420 421 serr = SKB_EXT_ERR(skb); 422 serr->ee.ee_errno = err; 423 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; 424 serr->ee.ee_type = icmp_hdr(skb)->type; 425 serr->ee.ee_code = icmp_hdr(skb)->code; 426 serr->ee.ee_pad = 0; 427 serr->ee.ee_info = info; 428 serr->ee.ee_data = 0; 429 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) - 430 skb_network_header(skb); 431 serr->port = port; 432 433 if (skb_pull(skb, payload - skb->data)) { 434 if (inet_sk(sk)->recverr_rfc4884) 435 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); 436 437 skb_reset_transport_header(skb); 438 if (sock_queue_err_skb(sk, skb) == 0) 439 return; 440 } 441 kfree_skb(skb); 442 } 443 EXPORT_SYMBOL_GPL(ip_icmp_error); 444 445 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info) 446 { 447 struct inet_sock *inet = inet_sk(sk); 448 struct sock_exterr_skb *serr; 449 struct iphdr *iph; 450 struct sk_buff *skb; 451 452 if (!inet->recverr) 453 return; 454 455 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC); 456 if (!skb) 457 return; 458 459 skb_put(skb, sizeof(struct iphdr)); 460 skb_reset_network_header(skb); 461 iph = ip_hdr(skb); 462 iph->daddr = daddr; 463 464 serr = SKB_EXT_ERR(skb); 465 serr->ee.ee_errno = err; 466 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 467 serr->ee.ee_type = 0; 468 serr->ee.ee_code = 0; 469 serr->ee.ee_pad = 0; 470 serr->ee.ee_info = info; 471 serr->ee.ee_data = 0; 472 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 473 serr->port = port; 474 475 __skb_pull(skb, skb_tail_pointer(skb) - skb->data); 476 skb_reset_transport_header(skb); 477 478 if (sock_queue_err_skb(sk, skb)) 479 kfree_skb(skb); 480 } 481 482 /* For some errors we have valid addr_offset even with zero payload and 483 * zero port. Also, addr_offset should be supported if port is set. 484 */ 485 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr) 486 { 487 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || 488 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port; 489 } 490 491 /* IPv4 supports cmsg on all imcp errors and some timestamps 492 * 493 * Timestamp code paths do not initialize the fields expected by cmsg: 494 * the PKTINFO fields in skb->cb[]. Fill those in here. 495 */ 496 static bool ipv4_datagram_support_cmsg(const struct sock *sk, 497 struct sk_buff *skb, 498 int ee_origin) 499 { 500 struct in_pktinfo *info; 501 502 if (ee_origin == SO_EE_ORIGIN_ICMP) 503 return true; 504 505 if (ee_origin == SO_EE_ORIGIN_LOCAL) 506 return false; 507 508 /* Support IP_PKTINFO on tstamp packets if requested, to correlate 509 * timestamp with egress dev. Not possible for packets without iif 510 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). 511 */ 512 info = PKTINFO_SKB_CB(skb); 513 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) || 514 !info->ipi_ifindex) 515 return false; 516 517 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; 518 return true; 519 } 520 521 /* 522 * Handle MSG_ERRQUEUE 523 */ 524 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) 525 { 526 struct sock_exterr_skb *serr; 527 struct sk_buff *skb; 528 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 529 struct { 530 struct sock_extended_err ee; 531 struct sockaddr_in offender; 532 } errhdr; 533 int err; 534 int copied; 535 536 err = -EAGAIN; 537 skb = sock_dequeue_err_skb(sk); 538 if (!skb) 539 goto out; 540 541 copied = skb->len; 542 if (copied > len) { 543 msg->msg_flags |= MSG_TRUNC; 544 copied = len; 545 } 546 err = skb_copy_datagram_msg(skb, 0, msg, copied); 547 if (unlikely(err)) { 548 kfree_skb(skb); 549 return err; 550 } 551 sock_recv_timestamp(msg, sk, skb); 552 553 serr = SKB_EXT_ERR(skb); 554 555 if (sin && ipv4_datagram_support_addr(serr)) { 556 sin->sin_family = AF_INET; 557 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 558 serr->addr_offset); 559 sin->sin_port = serr->port; 560 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 561 *addr_len = sizeof(*sin); 562 } 563 564 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 565 sin = &errhdr.offender; 566 memset(sin, 0, sizeof(*sin)); 567 568 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { 569 sin->sin_family = AF_INET; 570 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 571 if (inet_sk(sk)->cmsg_flags) 572 ip_cmsg_recv(msg, skb); 573 } 574 575 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr); 576 577 /* Now we could try to dump offended packet options */ 578 579 msg->msg_flags |= MSG_ERRQUEUE; 580 err = copied; 581 582 consume_skb(skb); 583 out: 584 return err; 585 } 586 587 void __ip_sock_set_tos(struct sock *sk, int val) 588 { 589 if (sk->sk_type == SOCK_STREAM) { 590 val &= ~INET_ECN_MASK; 591 val |= inet_sk(sk)->tos & INET_ECN_MASK; 592 } 593 if (inet_sk(sk)->tos != val) { 594 inet_sk(sk)->tos = val; 595 WRITE_ONCE(sk->sk_priority, rt_tos2priority(val)); 596 sk_dst_reset(sk); 597 } 598 } 599 600 void ip_sock_set_tos(struct sock *sk, int val) 601 { 602 lock_sock(sk); 603 __ip_sock_set_tos(sk, val); 604 release_sock(sk); 605 } 606 EXPORT_SYMBOL(ip_sock_set_tos); 607 608 void ip_sock_set_freebind(struct sock *sk) 609 { 610 lock_sock(sk); 611 inet_sk(sk)->freebind = true; 612 release_sock(sk); 613 } 614 EXPORT_SYMBOL(ip_sock_set_freebind); 615 616 void ip_sock_set_recverr(struct sock *sk) 617 { 618 lock_sock(sk); 619 inet_sk(sk)->recverr = true; 620 release_sock(sk); 621 } 622 EXPORT_SYMBOL(ip_sock_set_recverr); 623 624 int ip_sock_set_mtu_discover(struct sock *sk, int val) 625 { 626 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) 627 return -EINVAL; 628 lock_sock(sk); 629 inet_sk(sk)->pmtudisc = val; 630 release_sock(sk); 631 return 0; 632 } 633 EXPORT_SYMBOL(ip_sock_set_mtu_discover); 634 635 void ip_sock_set_pktinfo(struct sock *sk) 636 { 637 lock_sock(sk); 638 inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO; 639 release_sock(sk); 640 } 641 EXPORT_SYMBOL(ip_sock_set_pktinfo); 642 643 /* 644 * Socket option code for IP. This is the end of the line after any 645 * TCP,UDP etc options on an IP socket. 646 */ 647 static bool setsockopt_needs_rtnl(int optname) 648 { 649 switch (optname) { 650 case IP_ADD_MEMBERSHIP: 651 case IP_ADD_SOURCE_MEMBERSHIP: 652 case IP_BLOCK_SOURCE: 653 case IP_DROP_MEMBERSHIP: 654 case IP_DROP_SOURCE_MEMBERSHIP: 655 case IP_MSFILTER: 656 case IP_UNBLOCK_SOURCE: 657 case MCAST_BLOCK_SOURCE: 658 case MCAST_MSFILTER: 659 case MCAST_JOIN_GROUP: 660 case MCAST_JOIN_SOURCE_GROUP: 661 case MCAST_LEAVE_GROUP: 662 case MCAST_LEAVE_SOURCE_GROUP: 663 case MCAST_UNBLOCK_SOURCE: 664 return true; 665 } 666 return false; 667 } 668 669 static int set_mcast_msfilter(struct sock *sk, int ifindex, 670 int numsrc, int fmode, 671 struct sockaddr_storage *group, 672 struct sockaddr_storage *list) 673 { 674 struct ip_msfilter *msf; 675 struct sockaddr_in *psin; 676 int err, i; 677 678 msf = kmalloc(IP_MSFILTER_SIZE(numsrc), GFP_KERNEL); 679 if (!msf) 680 return -ENOBUFS; 681 682 psin = (struct sockaddr_in *)group; 683 if (psin->sin_family != AF_INET) 684 goto Eaddrnotavail; 685 msf->imsf_multiaddr = psin->sin_addr.s_addr; 686 msf->imsf_interface = 0; 687 msf->imsf_fmode = fmode; 688 msf->imsf_numsrc = numsrc; 689 for (i = 0; i < numsrc; ++i) { 690 psin = (struct sockaddr_in *)&list[i]; 691 692 if (psin->sin_family != AF_INET) 693 goto Eaddrnotavail; 694 msf->imsf_slist_flex[i] = psin->sin_addr.s_addr; 695 } 696 err = ip_mc_msfilter(sk, msf, ifindex); 697 kfree(msf); 698 return err; 699 700 Eaddrnotavail: 701 kfree(msf); 702 return -EADDRNOTAVAIL; 703 } 704 705 static int copy_group_source_from_sockptr(struct group_source_req *greqs, 706 sockptr_t optval, int optlen) 707 { 708 if (in_compat_syscall()) { 709 struct compat_group_source_req gr32; 710 711 if (optlen != sizeof(gr32)) 712 return -EINVAL; 713 if (copy_from_sockptr(&gr32, optval, sizeof(gr32))) 714 return -EFAULT; 715 greqs->gsr_interface = gr32.gsr_interface; 716 greqs->gsr_group = gr32.gsr_group; 717 greqs->gsr_source = gr32.gsr_source; 718 } else { 719 if (optlen != sizeof(*greqs)) 720 return -EINVAL; 721 if (copy_from_sockptr(greqs, optval, sizeof(*greqs))) 722 return -EFAULT; 723 } 724 725 return 0; 726 } 727 728 static int do_mcast_group_source(struct sock *sk, int optname, 729 sockptr_t optval, int optlen) 730 { 731 struct group_source_req greqs; 732 struct ip_mreq_source mreqs; 733 struct sockaddr_in *psin; 734 int omode, add, err; 735 736 err = copy_group_source_from_sockptr(&greqs, optval, optlen); 737 if (err) 738 return err; 739 740 if (greqs.gsr_group.ss_family != AF_INET || 741 greqs.gsr_source.ss_family != AF_INET) 742 return -EADDRNOTAVAIL; 743 744 psin = (struct sockaddr_in *)&greqs.gsr_group; 745 mreqs.imr_multiaddr = psin->sin_addr.s_addr; 746 psin = (struct sockaddr_in *)&greqs.gsr_source; 747 mreqs.imr_sourceaddr = psin->sin_addr.s_addr; 748 mreqs.imr_interface = 0; /* use index for mc_source */ 749 750 if (optname == MCAST_BLOCK_SOURCE) { 751 omode = MCAST_EXCLUDE; 752 add = 1; 753 } else if (optname == MCAST_UNBLOCK_SOURCE) { 754 omode = MCAST_EXCLUDE; 755 add = 0; 756 } else if (optname == MCAST_JOIN_SOURCE_GROUP) { 757 struct ip_mreqn mreq; 758 759 psin = (struct sockaddr_in *)&greqs.gsr_group; 760 mreq.imr_multiaddr = psin->sin_addr; 761 mreq.imr_address.s_addr = 0; 762 mreq.imr_ifindex = greqs.gsr_interface; 763 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); 764 if (err && err != -EADDRINUSE) 765 return err; 766 greqs.gsr_interface = mreq.imr_ifindex; 767 omode = MCAST_INCLUDE; 768 add = 1; 769 } else /* MCAST_LEAVE_SOURCE_GROUP */ { 770 omode = MCAST_INCLUDE; 771 add = 0; 772 } 773 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); 774 } 775 776 static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) 777 { 778 struct group_filter *gsf = NULL; 779 int err; 780 781 if (optlen < GROUP_FILTER_SIZE(0)) 782 return -EINVAL; 783 if (optlen > READ_ONCE(sysctl_optmem_max)) 784 return -ENOBUFS; 785 786 gsf = memdup_sockptr(optval, optlen); 787 if (IS_ERR(gsf)) 788 return PTR_ERR(gsf); 789 790 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 791 err = -ENOBUFS; 792 if (gsf->gf_numsrc >= 0x1ffffff || 793 gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) 794 goto out_free_gsf; 795 796 err = -EINVAL; 797 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) 798 goto out_free_gsf; 799 800 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc, 801 gsf->gf_fmode, &gsf->gf_group, 802 gsf->gf_slist_flex); 803 out_free_gsf: 804 kfree(gsf); 805 return err; 806 } 807 808 static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, 809 int optlen) 810 { 811 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex); 812 struct compat_group_filter *gf32; 813 unsigned int n; 814 void *p; 815 int err; 816 817 if (optlen < size0) 818 return -EINVAL; 819 if (optlen > READ_ONCE(sysctl_optmem_max) - 4) 820 return -ENOBUFS; 821 822 p = kmalloc(optlen + 4, GFP_KERNEL); 823 if (!p) 824 return -ENOMEM; 825 gf32 = p + 4; /* we want ->gf_group and ->gf_slist_flex aligned */ 826 827 err = -EFAULT; 828 if (copy_from_sockptr(gf32, optval, optlen)) 829 goto out_free_gsf; 830 831 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 832 n = gf32->gf_numsrc; 833 err = -ENOBUFS; 834 if (n >= 0x1ffffff) 835 goto out_free_gsf; 836 837 err = -EINVAL; 838 if (offsetof(struct compat_group_filter, gf_slist_flex[n]) > optlen) 839 goto out_free_gsf; 840 841 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 842 err = -ENOBUFS; 843 if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) 844 goto out_free_gsf; 845 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, 846 &gf32->gf_group, gf32->gf_slist_flex); 847 out_free_gsf: 848 kfree(p); 849 return err; 850 } 851 852 static int ip_mcast_join_leave(struct sock *sk, int optname, 853 sockptr_t optval, int optlen) 854 { 855 struct ip_mreqn mreq = { }; 856 struct sockaddr_in *psin; 857 struct group_req greq; 858 859 if (optlen < sizeof(struct group_req)) 860 return -EINVAL; 861 if (copy_from_sockptr(&greq, optval, sizeof(greq))) 862 return -EFAULT; 863 864 psin = (struct sockaddr_in *)&greq.gr_group; 865 if (psin->sin_family != AF_INET) 866 return -EINVAL; 867 mreq.imr_multiaddr = psin->sin_addr; 868 mreq.imr_ifindex = greq.gr_interface; 869 if (optname == MCAST_JOIN_GROUP) 870 return ip_mc_join_group(sk, &mreq); 871 return ip_mc_leave_group(sk, &mreq); 872 } 873 874 static int compat_ip_mcast_join_leave(struct sock *sk, int optname, 875 sockptr_t optval, int optlen) 876 { 877 struct compat_group_req greq; 878 struct ip_mreqn mreq = { }; 879 struct sockaddr_in *psin; 880 881 if (optlen < sizeof(struct compat_group_req)) 882 return -EINVAL; 883 if (copy_from_sockptr(&greq, optval, sizeof(greq))) 884 return -EFAULT; 885 886 psin = (struct sockaddr_in *)&greq.gr_group; 887 if (psin->sin_family != AF_INET) 888 return -EINVAL; 889 mreq.imr_multiaddr = psin->sin_addr; 890 mreq.imr_ifindex = greq.gr_interface; 891 892 if (optname == MCAST_JOIN_GROUP) 893 return ip_mc_join_group(sk, &mreq); 894 return ip_mc_leave_group(sk, &mreq); 895 } 896 897 DEFINE_STATIC_KEY_FALSE(ip4_min_ttl); 898 899 int do_ip_setsockopt(struct sock *sk, int level, int optname, 900 sockptr_t optval, unsigned int optlen) 901 { 902 struct inet_sock *inet = inet_sk(sk); 903 struct net *net = sock_net(sk); 904 int val = 0, err; 905 bool needs_rtnl = setsockopt_needs_rtnl(optname); 906 907 switch (optname) { 908 case IP_PKTINFO: 909 case IP_RECVTTL: 910 case IP_RECVOPTS: 911 case IP_RECVTOS: 912 case IP_RETOPTS: 913 case IP_TOS: 914 case IP_TTL: 915 case IP_HDRINCL: 916 case IP_MTU_DISCOVER: 917 case IP_RECVERR: 918 case IP_ROUTER_ALERT: 919 case IP_FREEBIND: 920 case IP_PASSSEC: 921 case IP_TRANSPARENT: 922 case IP_MINTTL: 923 case IP_NODEFRAG: 924 case IP_BIND_ADDRESS_NO_PORT: 925 case IP_UNICAST_IF: 926 case IP_MULTICAST_TTL: 927 case IP_MULTICAST_ALL: 928 case IP_MULTICAST_LOOP: 929 case IP_RECVORIGDSTADDR: 930 case IP_CHECKSUM: 931 case IP_RECVFRAGSIZE: 932 case IP_RECVERR_RFC4884: 933 case IP_LOCAL_PORT_RANGE: 934 if (optlen >= sizeof(int)) { 935 if (copy_from_sockptr(&val, optval, sizeof(val))) 936 return -EFAULT; 937 } else if (optlen >= sizeof(char)) { 938 unsigned char ucval; 939 940 if (copy_from_sockptr(&ucval, optval, sizeof(ucval))) 941 return -EFAULT; 942 val = (int) ucval; 943 } 944 } 945 946 /* If optlen==0, it is equivalent to val == 0 */ 947 948 if (optname == IP_ROUTER_ALERT) 949 return ip_ra_control(sk, val ? 1 : 0, NULL); 950 if (ip_mroute_opt(optname)) 951 return ip_mroute_setsockopt(sk, optname, optval, optlen); 952 953 err = 0; 954 if (needs_rtnl) 955 rtnl_lock(); 956 sockopt_lock_sock(sk); 957 958 switch (optname) { 959 case IP_OPTIONS: 960 { 961 struct ip_options_rcu *old, *opt = NULL; 962 963 if (optlen > 40) 964 goto e_inval; 965 err = ip_options_get(sock_net(sk), &opt, optval, optlen); 966 if (err) 967 break; 968 old = rcu_dereference_protected(inet->inet_opt, 969 lockdep_sock_is_held(sk)); 970 if (inet->is_icsk) { 971 struct inet_connection_sock *icsk = inet_csk(sk); 972 #if IS_ENABLED(CONFIG_IPV6) 973 if (sk->sk_family == PF_INET || 974 (!((1 << sk->sk_state) & 975 (TCPF_LISTEN | TCPF_CLOSE)) && 976 inet->inet_daddr != LOOPBACK4_IPV6)) { 977 #endif 978 if (old) 979 icsk->icsk_ext_hdr_len -= old->opt.optlen; 980 if (opt) 981 icsk->icsk_ext_hdr_len += opt->opt.optlen; 982 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 983 #if IS_ENABLED(CONFIG_IPV6) 984 } 985 #endif 986 } 987 rcu_assign_pointer(inet->inet_opt, opt); 988 if (old) 989 kfree_rcu(old, rcu); 990 break; 991 } 992 case IP_PKTINFO: 993 if (val) 994 inet->cmsg_flags |= IP_CMSG_PKTINFO; 995 else 996 inet->cmsg_flags &= ~IP_CMSG_PKTINFO; 997 break; 998 case IP_RECVTTL: 999 if (val) 1000 inet->cmsg_flags |= IP_CMSG_TTL; 1001 else 1002 inet->cmsg_flags &= ~IP_CMSG_TTL; 1003 break; 1004 case IP_RECVTOS: 1005 if (val) 1006 inet->cmsg_flags |= IP_CMSG_TOS; 1007 else 1008 inet->cmsg_flags &= ~IP_CMSG_TOS; 1009 break; 1010 case IP_RECVOPTS: 1011 if (val) 1012 inet->cmsg_flags |= IP_CMSG_RECVOPTS; 1013 else 1014 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS; 1015 break; 1016 case IP_RETOPTS: 1017 if (val) 1018 inet->cmsg_flags |= IP_CMSG_RETOPTS; 1019 else 1020 inet->cmsg_flags &= ~IP_CMSG_RETOPTS; 1021 break; 1022 case IP_PASSSEC: 1023 if (val) 1024 inet->cmsg_flags |= IP_CMSG_PASSSEC; 1025 else 1026 inet->cmsg_flags &= ~IP_CMSG_PASSSEC; 1027 break; 1028 case IP_RECVORIGDSTADDR: 1029 if (val) 1030 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR; 1031 else 1032 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR; 1033 break; 1034 case IP_CHECKSUM: 1035 if (val) { 1036 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) { 1037 inet_inc_convert_csum(sk); 1038 inet->cmsg_flags |= IP_CMSG_CHECKSUM; 1039 } 1040 } else { 1041 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) { 1042 inet_dec_convert_csum(sk); 1043 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM; 1044 } 1045 } 1046 break; 1047 case IP_RECVFRAGSIZE: 1048 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM) 1049 goto e_inval; 1050 if (val) 1051 inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE; 1052 else 1053 inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE; 1054 break; 1055 case IP_TOS: /* This sets both TOS and Precedence */ 1056 __ip_sock_set_tos(sk, val); 1057 break; 1058 case IP_TTL: 1059 if (optlen < 1) 1060 goto e_inval; 1061 if (val != -1 && (val < 1 || val > 255)) 1062 goto e_inval; 1063 inet->uc_ttl = val; 1064 break; 1065 case IP_HDRINCL: 1066 if (sk->sk_type != SOCK_RAW) { 1067 err = -ENOPROTOOPT; 1068 break; 1069 } 1070 inet->hdrincl = val ? 1 : 0; 1071 break; 1072 case IP_NODEFRAG: 1073 if (sk->sk_type != SOCK_RAW) { 1074 err = -ENOPROTOOPT; 1075 break; 1076 } 1077 inet->nodefrag = val ? 1 : 0; 1078 break; 1079 case IP_BIND_ADDRESS_NO_PORT: 1080 inet->bind_address_no_port = val ? 1 : 0; 1081 break; 1082 case IP_MTU_DISCOVER: 1083 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) 1084 goto e_inval; 1085 inet->pmtudisc = val; 1086 break; 1087 case IP_RECVERR: 1088 inet->recverr = !!val; 1089 if (!val) 1090 skb_queue_purge(&sk->sk_error_queue); 1091 break; 1092 case IP_RECVERR_RFC4884: 1093 if (val < 0 || val > 1) 1094 goto e_inval; 1095 inet->recverr_rfc4884 = !!val; 1096 break; 1097 case IP_MULTICAST_TTL: 1098 if (sk->sk_type == SOCK_STREAM) 1099 goto e_inval; 1100 if (optlen < 1) 1101 goto e_inval; 1102 if (val == -1) 1103 val = 1; 1104 if (val < 0 || val > 255) 1105 goto e_inval; 1106 inet->mc_ttl = val; 1107 break; 1108 case IP_MULTICAST_LOOP: 1109 if (optlen < 1) 1110 goto e_inval; 1111 inet->mc_loop = !!val; 1112 break; 1113 case IP_UNICAST_IF: 1114 { 1115 struct net_device *dev = NULL; 1116 int ifindex; 1117 int midx; 1118 1119 if (optlen != sizeof(int)) 1120 goto e_inval; 1121 1122 ifindex = (__force int)ntohl((__force __be32)val); 1123 if (ifindex == 0) { 1124 inet->uc_index = 0; 1125 err = 0; 1126 break; 1127 } 1128 1129 dev = dev_get_by_index(sock_net(sk), ifindex); 1130 err = -EADDRNOTAVAIL; 1131 if (!dev) 1132 break; 1133 1134 midx = l3mdev_master_ifindex(dev); 1135 dev_put(dev); 1136 1137 err = -EINVAL; 1138 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if) 1139 break; 1140 1141 inet->uc_index = ifindex; 1142 err = 0; 1143 break; 1144 } 1145 case IP_MULTICAST_IF: 1146 { 1147 struct ip_mreqn mreq; 1148 struct net_device *dev = NULL; 1149 int midx; 1150 1151 if (sk->sk_type == SOCK_STREAM) 1152 goto e_inval; 1153 /* 1154 * Check the arguments are allowable 1155 */ 1156 1157 if (optlen < sizeof(struct in_addr)) 1158 goto e_inval; 1159 1160 err = -EFAULT; 1161 if (optlen >= sizeof(struct ip_mreqn)) { 1162 if (copy_from_sockptr(&mreq, optval, sizeof(mreq))) 1163 break; 1164 } else { 1165 memset(&mreq, 0, sizeof(mreq)); 1166 if (optlen >= sizeof(struct ip_mreq)) { 1167 if (copy_from_sockptr(&mreq, optval, 1168 sizeof(struct ip_mreq))) 1169 break; 1170 } else if (optlen >= sizeof(struct in_addr)) { 1171 if (copy_from_sockptr(&mreq.imr_address, optval, 1172 sizeof(struct in_addr))) 1173 break; 1174 } 1175 } 1176 1177 if (!mreq.imr_ifindex) { 1178 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) { 1179 inet->mc_index = 0; 1180 inet->mc_addr = 0; 1181 err = 0; 1182 break; 1183 } 1184 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); 1185 if (dev) 1186 mreq.imr_ifindex = dev->ifindex; 1187 } else 1188 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 1189 1190 1191 err = -EADDRNOTAVAIL; 1192 if (!dev) 1193 break; 1194 1195 midx = l3mdev_master_ifindex(dev); 1196 1197 dev_put(dev); 1198 1199 err = -EINVAL; 1200 if (sk->sk_bound_dev_if && 1201 mreq.imr_ifindex != sk->sk_bound_dev_if && 1202 midx != sk->sk_bound_dev_if) 1203 break; 1204 1205 inet->mc_index = mreq.imr_ifindex; 1206 inet->mc_addr = mreq.imr_address.s_addr; 1207 err = 0; 1208 break; 1209 } 1210 1211 case IP_ADD_MEMBERSHIP: 1212 case IP_DROP_MEMBERSHIP: 1213 { 1214 struct ip_mreqn mreq; 1215 1216 err = -EPROTO; 1217 if (inet_sk(sk)->is_icsk) 1218 break; 1219 1220 if (optlen < sizeof(struct ip_mreq)) 1221 goto e_inval; 1222 err = -EFAULT; 1223 if (optlen >= sizeof(struct ip_mreqn)) { 1224 if (copy_from_sockptr(&mreq, optval, sizeof(mreq))) 1225 break; 1226 } else { 1227 memset(&mreq, 0, sizeof(mreq)); 1228 if (copy_from_sockptr(&mreq, optval, 1229 sizeof(struct ip_mreq))) 1230 break; 1231 } 1232 1233 if (optname == IP_ADD_MEMBERSHIP) 1234 err = ip_mc_join_group(sk, &mreq); 1235 else 1236 err = ip_mc_leave_group(sk, &mreq); 1237 break; 1238 } 1239 case IP_MSFILTER: 1240 { 1241 struct ip_msfilter *msf; 1242 1243 if (optlen < IP_MSFILTER_SIZE(0)) 1244 goto e_inval; 1245 if (optlen > READ_ONCE(sysctl_optmem_max)) { 1246 err = -ENOBUFS; 1247 break; 1248 } 1249 msf = memdup_sockptr(optval, optlen); 1250 if (IS_ERR(msf)) { 1251 err = PTR_ERR(msf); 1252 break; 1253 } 1254 /* numsrc >= (1G-4) overflow in 32 bits */ 1255 if (msf->imsf_numsrc >= 0x3ffffffcU || 1256 msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { 1257 kfree(msf); 1258 err = -ENOBUFS; 1259 break; 1260 } 1261 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) { 1262 kfree(msf); 1263 err = -EINVAL; 1264 break; 1265 } 1266 err = ip_mc_msfilter(sk, msf, 0); 1267 kfree(msf); 1268 break; 1269 } 1270 case IP_BLOCK_SOURCE: 1271 case IP_UNBLOCK_SOURCE: 1272 case IP_ADD_SOURCE_MEMBERSHIP: 1273 case IP_DROP_SOURCE_MEMBERSHIP: 1274 { 1275 struct ip_mreq_source mreqs; 1276 int omode, add; 1277 1278 if (optlen != sizeof(struct ip_mreq_source)) 1279 goto e_inval; 1280 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) { 1281 err = -EFAULT; 1282 break; 1283 } 1284 if (optname == IP_BLOCK_SOURCE) { 1285 omode = MCAST_EXCLUDE; 1286 add = 1; 1287 } else if (optname == IP_UNBLOCK_SOURCE) { 1288 omode = MCAST_EXCLUDE; 1289 add = 0; 1290 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) { 1291 struct ip_mreqn mreq; 1292 1293 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; 1294 mreq.imr_address.s_addr = mreqs.imr_interface; 1295 mreq.imr_ifindex = 0; 1296 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); 1297 if (err && err != -EADDRINUSE) 1298 break; 1299 omode = MCAST_INCLUDE; 1300 add = 1; 1301 } else /* IP_DROP_SOURCE_MEMBERSHIP */ { 1302 omode = MCAST_INCLUDE; 1303 add = 0; 1304 } 1305 err = ip_mc_source(add, omode, sk, &mreqs, 0); 1306 break; 1307 } 1308 case MCAST_JOIN_GROUP: 1309 case MCAST_LEAVE_GROUP: 1310 if (in_compat_syscall()) 1311 err = compat_ip_mcast_join_leave(sk, optname, optval, 1312 optlen); 1313 else 1314 err = ip_mcast_join_leave(sk, optname, optval, optlen); 1315 break; 1316 case MCAST_JOIN_SOURCE_GROUP: 1317 case MCAST_LEAVE_SOURCE_GROUP: 1318 case MCAST_BLOCK_SOURCE: 1319 case MCAST_UNBLOCK_SOURCE: 1320 err = do_mcast_group_source(sk, optname, optval, optlen); 1321 break; 1322 case MCAST_MSFILTER: 1323 if (in_compat_syscall()) 1324 err = compat_ip_set_mcast_msfilter(sk, optval, optlen); 1325 else 1326 err = ip_set_mcast_msfilter(sk, optval, optlen); 1327 break; 1328 case IP_MULTICAST_ALL: 1329 if (optlen < 1) 1330 goto e_inval; 1331 if (val != 0 && val != 1) 1332 goto e_inval; 1333 inet->mc_all = val; 1334 break; 1335 1336 case IP_FREEBIND: 1337 if (optlen < 1) 1338 goto e_inval; 1339 inet->freebind = !!val; 1340 break; 1341 1342 case IP_IPSEC_POLICY: 1343 case IP_XFRM_POLICY: 1344 err = -EPERM; 1345 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1346 break; 1347 err = xfrm_user_policy(sk, optname, optval, optlen); 1348 break; 1349 1350 case IP_TRANSPARENT: 1351 if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1352 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1353 err = -EPERM; 1354 break; 1355 } 1356 if (optlen < 1) 1357 goto e_inval; 1358 inet->transparent = !!val; 1359 break; 1360 1361 case IP_MINTTL: 1362 if (optlen < 1) 1363 goto e_inval; 1364 if (val < 0 || val > 255) 1365 goto e_inval; 1366 1367 if (val) 1368 static_branch_enable(&ip4_min_ttl); 1369 1370 /* tcp_v4_err() and tcp_v4_rcv() might read min_ttl 1371 * while we are changint it. 1372 */ 1373 WRITE_ONCE(inet->min_ttl, val); 1374 break; 1375 1376 case IP_LOCAL_PORT_RANGE: 1377 { 1378 const __u16 lo = val; 1379 const __u16 hi = val >> 16; 1380 1381 if (optlen != sizeof(__u32)) 1382 goto e_inval; 1383 if (lo != 0 && hi != 0 && lo > hi) 1384 goto e_inval; 1385 1386 inet->local_port_range.lo = lo; 1387 inet->local_port_range.hi = hi; 1388 break; 1389 } 1390 default: 1391 err = -ENOPROTOOPT; 1392 break; 1393 } 1394 sockopt_release_sock(sk); 1395 if (needs_rtnl) 1396 rtnl_unlock(); 1397 return err; 1398 1399 e_inval: 1400 sockopt_release_sock(sk); 1401 if (needs_rtnl) 1402 rtnl_unlock(); 1403 return -EINVAL; 1404 } 1405 1406 /** 1407 * ipv4_pktinfo_prepare - transfer some info from rtable to skb 1408 * @sk: socket 1409 * @skb: buffer 1410 * 1411 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific 1412 * destination in skb->cb[] before dst drop. 1413 * This way, receiver doesn't make cache line misses to read rtable. 1414 */ 1415 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) 1416 { 1417 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); 1418 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || 1419 ipv6_sk_rxinfo(sk); 1420 1421 if (prepare && skb_rtable(skb)) { 1422 /* skb->cb is overloaded: prior to this point it is IP{6}CB 1423 * which has interface index (iif) as the first member of the 1424 * underlying inet{6}_skb_parm struct. This code then overlays 1425 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first 1426 * element so the iif is picked up from the prior IPCB. If iif 1427 * is the loopback interface, then return the sending interface 1428 * (e.g., process binds socket to eth0 for Tx which is 1429 * redirected to loopback in the rtable/dst). 1430 */ 1431 struct rtable *rt = skb_rtable(skb); 1432 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags); 1433 1434 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX) 1435 pktinfo->ipi_ifindex = inet_iif(skb); 1436 else if (l3slave && rt && rt->rt_iif) 1437 pktinfo->ipi_ifindex = rt->rt_iif; 1438 1439 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); 1440 } else { 1441 pktinfo->ipi_ifindex = 0; 1442 pktinfo->ipi_spec_dst.s_addr = 0; 1443 } 1444 skb_dst_drop(skb); 1445 } 1446 1447 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1448 unsigned int optlen) 1449 { 1450 int err; 1451 1452 if (level != SOL_IP) 1453 return -ENOPROTOOPT; 1454 1455 err = do_ip_setsockopt(sk, level, optname, optval, optlen); 1456 #if IS_ENABLED(CONFIG_BPFILTER_UMH) 1457 if (optname >= BPFILTER_IPT_SO_SET_REPLACE && 1458 optname < BPFILTER_IPT_SET_MAX) 1459 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); 1460 #endif 1461 #ifdef CONFIG_NETFILTER 1462 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1463 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 1464 optname != IP_IPSEC_POLICY && 1465 optname != IP_XFRM_POLICY && 1466 !ip_mroute_opt(optname)) 1467 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); 1468 #endif 1469 return err; 1470 } 1471 EXPORT_SYMBOL(ip_setsockopt); 1472 1473 /* 1474 * Get the options. Note for future reference. The GET of IP options gets 1475 * the _received_ ones. The set sets the _sent_ ones. 1476 */ 1477 1478 static bool getsockopt_needs_rtnl(int optname) 1479 { 1480 switch (optname) { 1481 case IP_MSFILTER: 1482 case MCAST_MSFILTER: 1483 return true; 1484 } 1485 return false; 1486 } 1487 1488 static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, 1489 sockptr_t optlen, int len) 1490 { 1491 const int size0 = offsetof(struct group_filter, gf_slist_flex); 1492 struct group_filter gsf; 1493 int num, gsf_size; 1494 int err; 1495 1496 if (len < size0) 1497 return -EINVAL; 1498 if (copy_from_sockptr(&gsf, optval, size0)) 1499 return -EFAULT; 1500 1501 num = gsf.gf_numsrc; 1502 err = ip_mc_gsfget(sk, &gsf, optval, 1503 offsetof(struct group_filter, gf_slist_flex)); 1504 if (err) 1505 return err; 1506 if (gsf.gf_numsrc < num) 1507 num = gsf.gf_numsrc; 1508 gsf_size = GROUP_FILTER_SIZE(num); 1509 if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) || 1510 copy_to_sockptr(optval, &gsf, size0)) 1511 return -EFAULT; 1512 return 0; 1513 } 1514 1515 static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, 1516 sockptr_t optlen, int len) 1517 { 1518 const int size0 = offsetof(struct compat_group_filter, gf_slist_flex); 1519 struct compat_group_filter gf32; 1520 struct group_filter gf; 1521 int num; 1522 int err; 1523 1524 if (len < size0) 1525 return -EINVAL; 1526 if (copy_from_sockptr(&gf32, optval, size0)) 1527 return -EFAULT; 1528 1529 gf.gf_interface = gf32.gf_interface; 1530 gf.gf_fmode = gf32.gf_fmode; 1531 num = gf.gf_numsrc = gf32.gf_numsrc; 1532 gf.gf_group = gf32.gf_group; 1533 1534 err = ip_mc_gsfget(sk, &gf, optval, 1535 offsetof(struct compat_group_filter, gf_slist_flex)); 1536 if (err) 1537 return err; 1538 if (gf.gf_numsrc < num) 1539 num = gf.gf_numsrc; 1540 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32)); 1541 if (copy_to_sockptr(optlen, &len, sizeof(int)) || 1542 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode), 1543 &gf.gf_fmode, sizeof(gf.gf_fmode)) || 1544 copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc), 1545 &gf.gf_numsrc, sizeof(gf.gf_numsrc))) 1546 return -EFAULT; 1547 return 0; 1548 } 1549 1550 int do_ip_getsockopt(struct sock *sk, int level, int optname, 1551 sockptr_t optval, sockptr_t optlen) 1552 { 1553 struct inet_sock *inet = inet_sk(sk); 1554 bool needs_rtnl = getsockopt_needs_rtnl(optname); 1555 int val, err = 0; 1556 int len; 1557 1558 if (level != SOL_IP) 1559 return -EOPNOTSUPP; 1560 1561 if (ip_mroute_opt(optname)) 1562 return ip_mroute_getsockopt(sk, optname, optval, optlen); 1563 1564 if (copy_from_sockptr(&len, optlen, sizeof(int))) 1565 return -EFAULT; 1566 if (len < 0) 1567 return -EINVAL; 1568 1569 if (needs_rtnl) 1570 rtnl_lock(); 1571 sockopt_lock_sock(sk); 1572 1573 switch (optname) { 1574 case IP_OPTIONS: 1575 { 1576 unsigned char optbuf[sizeof(struct ip_options)+40]; 1577 struct ip_options *opt = (struct ip_options *)optbuf; 1578 struct ip_options_rcu *inet_opt; 1579 1580 inet_opt = rcu_dereference_protected(inet->inet_opt, 1581 lockdep_sock_is_held(sk)); 1582 opt->optlen = 0; 1583 if (inet_opt) 1584 memcpy(optbuf, &inet_opt->opt, 1585 sizeof(struct ip_options) + 1586 inet_opt->opt.optlen); 1587 sockopt_release_sock(sk); 1588 1589 if (opt->optlen == 0) { 1590 len = 0; 1591 return copy_to_sockptr(optlen, &len, sizeof(int)); 1592 } 1593 1594 ip_options_undo(opt); 1595 1596 len = min_t(unsigned int, len, opt->optlen); 1597 if (copy_to_sockptr(optlen, &len, sizeof(int))) 1598 return -EFAULT; 1599 if (copy_to_sockptr(optval, opt->__data, len)) 1600 return -EFAULT; 1601 return 0; 1602 } 1603 case IP_PKTINFO: 1604 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0; 1605 break; 1606 case IP_RECVTTL: 1607 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0; 1608 break; 1609 case IP_RECVTOS: 1610 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0; 1611 break; 1612 case IP_RECVOPTS: 1613 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0; 1614 break; 1615 case IP_RETOPTS: 1616 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0; 1617 break; 1618 case IP_PASSSEC: 1619 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0; 1620 break; 1621 case IP_RECVORIGDSTADDR: 1622 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0; 1623 break; 1624 case IP_CHECKSUM: 1625 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0; 1626 break; 1627 case IP_RECVFRAGSIZE: 1628 val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0; 1629 break; 1630 case IP_TOS: 1631 val = inet->tos; 1632 break; 1633 case IP_TTL: 1634 { 1635 struct net *net = sock_net(sk); 1636 val = (inet->uc_ttl == -1 ? 1637 READ_ONCE(net->ipv4.sysctl_ip_default_ttl) : 1638 inet->uc_ttl); 1639 break; 1640 } 1641 case IP_HDRINCL: 1642 val = inet->hdrincl; 1643 break; 1644 case IP_NODEFRAG: 1645 val = inet->nodefrag; 1646 break; 1647 case IP_BIND_ADDRESS_NO_PORT: 1648 val = inet->bind_address_no_port; 1649 break; 1650 case IP_MTU_DISCOVER: 1651 val = inet->pmtudisc; 1652 break; 1653 case IP_MTU: 1654 { 1655 struct dst_entry *dst; 1656 val = 0; 1657 dst = sk_dst_get(sk); 1658 if (dst) { 1659 val = dst_mtu(dst); 1660 dst_release(dst); 1661 } 1662 if (!val) { 1663 sockopt_release_sock(sk); 1664 return -ENOTCONN; 1665 } 1666 break; 1667 } 1668 case IP_RECVERR: 1669 val = inet->recverr; 1670 break; 1671 case IP_RECVERR_RFC4884: 1672 val = inet->recverr_rfc4884; 1673 break; 1674 case IP_MULTICAST_TTL: 1675 val = inet->mc_ttl; 1676 break; 1677 case IP_MULTICAST_LOOP: 1678 val = inet->mc_loop; 1679 break; 1680 case IP_UNICAST_IF: 1681 val = (__force int)htonl((__u32) inet->uc_index); 1682 break; 1683 case IP_MULTICAST_IF: 1684 { 1685 struct in_addr addr; 1686 len = min_t(unsigned int, len, sizeof(struct in_addr)); 1687 addr.s_addr = inet->mc_addr; 1688 sockopt_release_sock(sk); 1689 1690 if (copy_to_sockptr(optlen, &len, sizeof(int))) 1691 return -EFAULT; 1692 if (copy_to_sockptr(optval, &addr, len)) 1693 return -EFAULT; 1694 return 0; 1695 } 1696 case IP_MSFILTER: 1697 { 1698 struct ip_msfilter msf; 1699 1700 if (len < IP_MSFILTER_SIZE(0)) { 1701 err = -EINVAL; 1702 goto out; 1703 } 1704 if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) { 1705 err = -EFAULT; 1706 goto out; 1707 } 1708 err = ip_mc_msfget(sk, &msf, optval, optlen); 1709 goto out; 1710 } 1711 case MCAST_MSFILTER: 1712 if (in_compat_syscall()) 1713 err = compat_ip_get_mcast_msfilter(sk, optval, optlen, 1714 len); 1715 else 1716 err = ip_get_mcast_msfilter(sk, optval, optlen, len); 1717 goto out; 1718 case IP_MULTICAST_ALL: 1719 val = inet->mc_all; 1720 break; 1721 case IP_PKTOPTIONS: 1722 { 1723 struct msghdr msg; 1724 1725 sockopt_release_sock(sk); 1726 1727 if (sk->sk_type != SOCK_STREAM) 1728 return -ENOPROTOOPT; 1729 1730 if (optval.is_kernel) { 1731 msg.msg_control_is_user = false; 1732 msg.msg_control = optval.kernel; 1733 } else { 1734 msg.msg_control_is_user = true; 1735 msg.msg_control_user = optval.user; 1736 } 1737 msg.msg_controllen = len; 1738 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0; 1739 1740 if (inet->cmsg_flags & IP_CMSG_PKTINFO) { 1741 struct in_pktinfo info; 1742 1743 info.ipi_addr.s_addr = inet->inet_rcv_saddr; 1744 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; 1745 info.ipi_ifindex = inet->mc_index; 1746 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); 1747 } 1748 if (inet->cmsg_flags & IP_CMSG_TTL) { 1749 int hlim = inet->mc_ttl; 1750 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); 1751 } 1752 if (inet->cmsg_flags & IP_CMSG_TOS) { 1753 int tos = inet->rcv_tos; 1754 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); 1755 } 1756 len -= msg.msg_controllen; 1757 return copy_to_sockptr(optlen, &len, sizeof(int)); 1758 } 1759 case IP_FREEBIND: 1760 val = inet->freebind; 1761 break; 1762 case IP_TRANSPARENT: 1763 val = inet->transparent; 1764 break; 1765 case IP_MINTTL: 1766 val = inet->min_ttl; 1767 break; 1768 case IP_LOCAL_PORT_RANGE: 1769 val = inet->local_port_range.hi << 16 | inet->local_port_range.lo; 1770 break; 1771 case IP_PROTOCOL: 1772 val = inet_sk(sk)->inet_num; 1773 break; 1774 default: 1775 sockopt_release_sock(sk); 1776 return -ENOPROTOOPT; 1777 } 1778 sockopt_release_sock(sk); 1779 1780 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { 1781 unsigned char ucval = (unsigned char)val; 1782 len = 1; 1783 if (copy_to_sockptr(optlen, &len, sizeof(int))) 1784 return -EFAULT; 1785 if (copy_to_sockptr(optval, &ucval, 1)) 1786 return -EFAULT; 1787 } else { 1788 len = min_t(unsigned int, sizeof(int), len); 1789 if (copy_to_sockptr(optlen, &len, sizeof(int))) 1790 return -EFAULT; 1791 if (copy_to_sockptr(optval, &val, len)) 1792 return -EFAULT; 1793 } 1794 return 0; 1795 1796 out: 1797 sockopt_release_sock(sk); 1798 if (needs_rtnl) 1799 rtnl_unlock(); 1800 return err; 1801 } 1802 1803 int ip_getsockopt(struct sock *sk, int level, 1804 int optname, char __user *optval, int __user *optlen) 1805 { 1806 int err; 1807 1808 err = do_ip_getsockopt(sk, level, optname, 1809 USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); 1810 1811 #if IS_ENABLED(CONFIG_BPFILTER_UMH) 1812 if (optname >= BPFILTER_IPT_SO_GET_INFO && 1813 optname < BPFILTER_IPT_GET_MAX) 1814 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen); 1815 #endif 1816 #ifdef CONFIG_NETFILTER 1817 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1818 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && 1819 !ip_mroute_opt(optname)) { 1820 int len; 1821 1822 if (get_user(len, optlen)) 1823 return -EFAULT; 1824 1825 err = nf_getsockopt(sk, PF_INET, optname, optval, &len); 1826 if (err >= 0) 1827 err = put_user(len, optlen); 1828 return err; 1829 } 1830 #endif 1831 return err; 1832 } 1833 EXPORT_SYMBOL(ip_getsockopt); 1834