1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPv6 Address [auto]configuration 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 */ 10 11 /* 12 * Changes: 13 * 14 * Janos Farkas : delete timer on ifdown 15 * <chexum@bankinf.banki.hu> 16 * Andi Kleen : kill double kfree on module 17 * unload. 18 * Maciej W. Rozycki : FDDI support 19 * sekiya@USAGI : Don't send too many RS 20 * packets. 21 * yoshfuji@USAGI : Fixed interval between DAD 22 * packets. 23 * YOSHIFUJI Hideaki @USAGI : improved accuracy of 24 * address validation timer. 25 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041) 26 * support. 27 * Yuji SEKIYA @USAGI : Don't assign a same IPv6 28 * address on a same interface. 29 * YOSHIFUJI Hideaki @USAGI : ARCnet support 30 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to 31 * seq_file. 32 * YOSHIFUJI Hideaki @USAGI : improved source address 33 * selection; consider scope, 34 * status etc. 35 */ 36 37 #define pr_fmt(fmt) "IPv6: " fmt 38 39 #include <linux/errno.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/sched/signal.h> 43 #include <linux/socket.h> 44 #include <linux/sockios.h> 45 #include <linux/net.h> 46 #include <linux/inet.h> 47 #include <linux/in6.h> 48 #include <linux/netdevice.h> 49 #include <linux/if_addr.h> 50 #include <linux/if_arp.h> 51 #include <linux/if_arcnet.h> 52 #include <linux/if_infiniband.h> 53 #include <linux/route.h> 54 #include <linux/inetdevice.h> 55 #include <linux/init.h> 56 #include <linux/slab.h> 57 #ifdef CONFIG_SYSCTL 58 #include <linux/sysctl.h> 59 #endif 60 #include <linux/capability.h> 61 #include <linux/delay.h> 62 #include <linux/notifier.h> 63 #include <linux/string.h> 64 #include <linux/hash.h> 65 66 #include <net/ip_tunnels.h> 67 #include <net/net_namespace.h> 68 #include <net/sock.h> 69 #include <net/snmp.h> 70 71 #include <net/6lowpan.h> 72 #include <net/firewire.h> 73 #include <net/ipv6.h> 74 #include <net/protocol.h> 75 #include <net/ndisc.h> 76 #include <net/ip6_route.h> 77 #include <net/addrconf.h> 78 #include <net/tcp.h> 79 #include <net/ip.h> 80 #include <net/netlink.h> 81 #include <net/pkt_sched.h> 82 #include <net/l3mdev.h> 83 #include <linux/if_tunnel.h> 84 #include <linux/rtnetlink.h> 85 #include <linux/netconf.h> 86 #include <linux/random.h> 87 #include <linux/uaccess.h> 88 #include <asm/unaligned.h> 89 90 #include <linux/proc_fs.h> 91 #include <linux/seq_file.h> 92 #include <linux/export.h> 93 #include <linux/ioam6.h> 94 95 #define INFINITY_LIFE_TIME 0xFFFFFFFF 96 97 #define IPV6_MAX_STRLEN \ 98 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255") 99 100 static inline u32 cstamp_delta(unsigned long cstamp) 101 { 102 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; 103 } 104 105 static inline s32 rfc3315_s14_backoff_init(s32 irt) 106 { 107 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ 108 u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt; 109 do_div(tmp, 1000000); 110 return (s32)tmp; 111 } 112 113 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) 114 { 115 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ 116 u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt; 117 do_div(tmp, 1000000); 118 if ((s32)tmp > mrt) { 119 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ 120 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt; 121 do_div(tmp, 1000000); 122 } 123 return (s32)tmp; 124 } 125 126 #ifdef CONFIG_SYSCTL 127 static int addrconf_sysctl_register(struct inet6_dev *idev); 128 static void addrconf_sysctl_unregister(struct inet6_dev *idev); 129 #else 130 static inline int addrconf_sysctl_register(struct inet6_dev *idev) 131 { 132 return 0; 133 } 134 135 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev) 136 { 137 } 138 #endif 139 140 static void ipv6_gen_rnd_iid(struct in6_addr *addr); 141 142 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); 143 static int ipv6_count_addresses(const struct inet6_dev *idev); 144 static int ipv6_generate_stable_address(struct in6_addr *addr, 145 u8 dad_count, 146 const struct inet6_dev *idev); 147 148 #define IN6_ADDR_HSIZE_SHIFT 8 149 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT) 150 151 static void addrconf_verify(struct net *net); 152 static void addrconf_verify_rtnl(struct net *net); 153 154 static struct workqueue_struct *addrconf_wq; 155 156 static void addrconf_join_anycast(struct inet6_ifaddr *ifp); 157 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); 158 159 static void addrconf_type_change(struct net_device *dev, 160 unsigned long event); 161 static int addrconf_ifdown(struct net_device *dev, bool unregister); 162 163 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, 164 int plen, 165 const struct net_device *dev, 166 u32 flags, u32 noflags, 167 bool no_gw); 168 169 static void addrconf_dad_start(struct inet6_ifaddr *ifp); 170 static void addrconf_dad_work(struct work_struct *w); 171 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, 172 bool send_na); 173 static void addrconf_dad_run(struct inet6_dev *idev, bool restart); 174 static void addrconf_rs_timer(struct timer_list *t); 175 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 176 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 177 178 static void inet6_prefix_notify(int event, struct inet6_dev *idev, 179 struct prefix_info *pinfo); 180 181 static struct ipv6_devconf ipv6_devconf __read_mostly = { 182 .forwarding = 0, 183 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 184 .mtu6 = IPV6_MIN_MTU, 185 .accept_ra = 1, 186 .accept_redirects = 1, 187 .autoconf = 1, 188 .force_mld_version = 0, 189 .mldv1_unsolicited_report_interval = 10 * HZ, 190 .mldv2_unsolicited_report_interval = HZ, 191 .dad_transmits = 1, 192 .rtr_solicits = MAX_RTR_SOLICITATIONS, 193 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, 194 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL, 195 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, 196 .use_tempaddr = 0, 197 .temp_valid_lft = TEMP_VALID_LIFETIME, 198 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, 199 .regen_min_advance = REGEN_MIN_ADVANCE, 200 .regen_max_retry = REGEN_MAX_RETRY, 201 .max_desync_factor = MAX_DESYNC_FACTOR, 202 .max_addresses = IPV6_MAX_ADDRESSES, 203 .accept_ra_defrtr = 1, 204 .ra_defrtr_metric = IP6_RT_PRIO_USER, 205 .accept_ra_from_local = 0, 206 .accept_ra_min_hop_limit= 1, 207 .accept_ra_min_lft = 0, 208 .accept_ra_pinfo = 1, 209 #ifdef CONFIG_IPV6_ROUTER_PREF 210 .accept_ra_rtr_pref = 1, 211 .rtr_probe_interval = 60 * HZ, 212 #ifdef CONFIG_IPV6_ROUTE_INFO 213 .accept_ra_rt_info_min_plen = 0, 214 .accept_ra_rt_info_max_plen = 0, 215 #endif 216 #endif 217 .proxy_ndp = 0, 218 .accept_source_route = 0, /* we do not accept RH0 by default. */ 219 .disable_ipv6 = 0, 220 .accept_dad = 0, 221 .suppress_frag_ndisc = 1, 222 .accept_ra_mtu = 1, 223 .stable_secret = { 224 .initialized = false, 225 }, 226 .use_oif_addrs_only = 0, 227 .ignore_routes_with_linkdown = 0, 228 .keep_addr_on_down = 0, 229 .seg6_enabled = 0, 230 #ifdef CONFIG_IPV6_SEG6_HMAC 231 .seg6_require_hmac = 0, 232 #endif 233 .enhanced_dad = 1, 234 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, 235 .disable_policy = 0, 236 .rpl_seg_enabled = 0, 237 .ioam6_enabled = 0, 238 .ioam6_id = IOAM6_DEFAULT_IF_ID, 239 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, 240 .ndisc_evict_nocarrier = 1, 241 .ra_honor_pio_life = 0, 242 }; 243 244 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { 245 .forwarding = 0, 246 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 247 .mtu6 = IPV6_MIN_MTU, 248 .accept_ra = 1, 249 .accept_redirects = 1, 250 .autoconf = 1, 251 .force_mld_version = 0, 252 .mldv1_unsolicited_report_interval = 10 * HZ, 253 .mldv2_unsolicited_report_interval = HZ, 254 .dad_transmits = 1, 255 .rtr_solicits = MAX_RTR_SOLICITATIONS, 256 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, 257 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL, 258 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, 259 .use_tempaddr = 0, 260 .temp_valid_lft = TEMP_VALID_LIFETIME, 261 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, 262 .regen_min_advance = REGEN_MIN_ADVANCE, 263 .regen_max_retry = REGEN_MAX_RETRY, 264 .max_desync_factor = MAX_DESYNC_FACTOR, 265 .max_addresses = IPV6_MAX_ADDRESSES, 266 .accept_ra_defrtr = 1, 267 .ra_defrtr_metric = IP6_RT_PRIO_USER, 268 .accept_ra_from_local = 0, 269 .accept_ra_min_hop_limit= 1, 270 .accept_ra_min_lft = 0, 271 .accept_ra_pinfo = 1, 272 #ifdef CONFIG_IPV6_ROUTER_PREF 273 .accept_ra_rtr_pref = 1, 274 .rtr_probe_interval = 60 * HZ, 275 #ifdef CONFIG_IPV6_ROUTE_INFO 276 .accept_ra_rt_info_min_plen = 0, 277 .accept_ra_rt_info_max_plen = 0, 278 #endif 279 #endif 280 .proxy_ndp = 0, 281 .accept_source_route = 0, /* we do not accept RH0 by default. */ 282 .disable_ipv6 = 0, 283 .accept_dad = 1, 284 .suppress_frag_ndisc = 1, 285 .accept_ra_mtu = 1, 286 .stable_secret = { 287 .initialized = false, 288 }, 289 .use_oif_addrs_only = 0, 290 .ignore_routes_with_linkdown = 0, 291 .keep_addr_on_down = 0, 292 .seg6_enabled = 0, 293 #ifdef CONFIG_IPV6_SEG6_HMAC 294 .seg6_require_hmac = 0, 295 #endif 296 .enhanced_dad = 1, 297 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, 298 .disable_policy = 0, 299 .rpl_seg_enabled = 0, 300 .ioam6_enabled = 0, 301 .ioam6_id = IOAM6_DEFAULT_IF_ID, 302 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, 303 .ndisc_evict_nocarrier = 1, 304 .ra_honor_pio_life = 0, 305 }; 306 307 /* Check if link is ready: is it up and is a valid qdisc available */ 308 static inline bool addrconf_link_ready(const struct net_device *dev) 309 { 310 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev); 311 } 312 313 static void addrconf_del_rs_timer(struct inet6_dev *idev) 314 { 315 if (del_timer(&idev->rs_timer)) 316 __in6_dev_put(idev); 317 } 318 319 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) 320 { 321 if (cancel_delayed_work(&ifp->dad_work)) 322 __in6_ifa_put(ifp); 323 } 324 325 static void addrconf_mod_rs_timer(struct inet6_dev *idev, 326 unsigned long when) 327 { 328 if (!mod_timer(&idev->rs_timer, jiffies + when)) 329 in6_dev_hold(idev); 330 } 331 332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 333 unsigned long delay) 334 { 335 in6_ifa_hold(ifp); 336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) 337 in6_ifa_put(ifp); 338 } 339 340 static int snmp6_alloc_dev(struct inet6_dev *idev) 341 { 342 int i; 343 344 idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT); 345 if (!idev->stats.ipv6) 346 goto err_ip; 347 348 for_each_possible_cpu(i) { 349 struct ipstats_mib *addrconf_stats; 350 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i); 351 u64_stats_init(&addrconf_stats->syncp); 352 } 353 354 355 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device), 356 GFP_KERNEL); 357 if (!idev->stats.icmpv6dev) 358 goto err_icmp; 359 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device), 360 GFP_KERNEL_ACCOUNT); 361 if (!idev->stats.icmpv6msgdev) 362 goto err_icmpmsg; 363 364 return 0; 365 366 err_icmpmsg: 367 kfree(idev->stats.icmpv6dev); 368 err_icmp: 369 free_percpu(idev->stats.ipv6); 370 err_ip: 371 return -ENOMEM; 372 } 373 374 static struct inet6_dev *ipv6_add_dev(struct net_device *dev) 375 { 376 struct inet6_dev *ndev; 377 int err = -ENOMEM; 378 379 ASSERT_RTNL(); 380 381 if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev) 382 return ERR_PTR(-EINVAL); 383 384 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT); 385 if (!ndev) 386 return ERR_PTR(err); 387 388 rwlock_init(&ndev->lock); 389 ndev->dev = dev; 390 INIT_LIST_HEAD(&ndev->addr_list); 391 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0); 392 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 393 394 if (ndev->cnf.stable_secret.initialized) 395 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; 396 397 ndev->cnf.mtu6 = dev->mtu; 398 ndev->ra_mtu = 0; 399 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); 400 if (!ndev->nd_parms) { 401 kfree(ndev); 402 return ERR_PTR(err); 403 } 404 if (ndev->cnf.forwarding) 405 dev_disable_lro(dev); 406 /* We refer to the device */ 407 netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL); 408 409 if (snmp6_alloc_dev(ndev) < 0) { 410 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n", 411 __func__); 412 neigh_parms_release(&nd_tbl, ndev->nd_parms); 413 netdev_put(dev, &ndev->dev_tracker); 414 kfree(ndev); 415 return ERR_PTR(err); 416 } 417 418 if (dev != blackhole_netdev) { 419 if (snmp6_register_dev(ndev) < 0) { 420 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n", 421 __func__, dev->name); 422 goto err_release; 423 } 424 } 425 /* One reference from device. */ 426 refcount_set(&ndev->refcnt, 1); 427 428 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 429 ndev->cnf.accept_dad = -1; 430 431 #if IS_ENABLED(CONFIG_IPV6_SIT) 432 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { 433 pr_info("%s: Disabled Multicast RS\n", dev->name); 434 ndev->cnf.rtr_solicits = 0; 435 } 436 #endif 437 438 INIT_LIST_HEAD(&ndev->tempaddr_list); 439 ndev->desync_factor = U32_MAX; 440 if ((dev->flags&IFF_LOOPBACK) || 441 dev->type == ARPHRD_TUNNEL || 442 dev->type == ARPHRD_TUNNEL6 || 443 dev->type == ARPHRD_SIT || 444 dev->type == ARPHRD_NONE) { 445 ndev->cnf.use_tempaddr = -1; 446 } 447 448 ndev->token = in6addr_any; 449 450 if (netif_running(dev) && addrconf_link_ready(dev)) 451 ndev->if_flags |= IF_READY; 452 453 ipv6_mc_init_dev(ndev); 454 ndev->tstamp = jiffies; 455 if (dev != blackhole_netdev) { 456 err = addrconf_sysctl_register(ndev); 457 if (err) { 458 ipv6_mc_destroy_dev(ndev); 459 snmp6_unregister_dev(ndev); 460 goto err_release; 461 } 462 } 463 /* protected by rtnl_lock */ 464 rcu_assign_pointer(dev->ip6_ptr, ndev); 465 466 if (dev != blackhole_netdev) { 467 /* Join interface-local all-node multicast group */ 468 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes); 469 470 /* Join all-node multicast group */ 471 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 472 473 /* Join all-router multicast group if forwarding is set */ 474 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST)) 475 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 476 } 477 return ndev; 478 479 err_release: 480 neigh_parms_release(&nd_tbl, ndev->nd_parms); 481 ndev->dead = 1; 482 in6_dev_finish_destroy(ndev); 483 return ERR_PTR(err); 484 } 485 486 static struct inet6_dev *ipv6_find_idev(struct net_device *dev) 487 { 488 struct inet6_dev *idev; 489 490 ASSERT_RTNL(); 491 492 idev = __in6_dev_get(dev); 493 if (!idev) { 494 idev = ipv6_add_dev(dev); 495 if (IS_ERR(idev)) 496 return idev; 497 } 498 499 if (dev->flags&IFF_UP) 500 ipv6_mc_up(idev); 501 return idev; 502 } 503 504 static int inet6_netconf_msgsize_devconf(int type) 505 { 506 int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) 507 + nla_total_size(4); /* NETCONFA_IFINDEX */ 508 bool all = false; 509 510 if (type == NETCONFA_ALL) 511 all = true; 512 513 if (all || type == NETCONFA_FORWARDING) 514 size += nla_total_size(4); 515 #ifdef CONFIG_IPV6_MROUTE 516 if (all || type == NETCONFA_MC_FORWARDING) 517 size += nla_total_size(4); 518 #endif 519 if (all || type == NETCONFA_PROXY_NEIGH) 520 size += nla_total_size(4); 521 522 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) 523 size += nla_total_size(4); 524 525 return size; 526 } 527 528 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, 529 struct ipv6_devconf *devconf, u32 portid, 530 u32 seq, int event, unsigned int flags, 531 int type) 532 { 533 struct nlmsghdr *nlh; 534 struct netconfmsg *ncm; 535 bool all = false; 536 537 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), 538 flags); 539 if (!nlh) 540 return -EMSGSIZE; 541 542 if (type == NETCONFA_ALL) 543 all = true; 544 545 ncm = nlmsg_data(nlh); 546 ncm->ncm_family = AF_INET6; 547 548 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) 549 goto nla_put_failure; 550 551 if (!devconf) 552 goto out; 553 554 if ((all || type == NETCONFA_FORWARDING) && 555 nla_put_s32(skb, NETCONFA_FORWARDING, 556 READ_ONCE(devconf->forwarding)) < 0) 557 goto nla_put_failure; 558 #ifdef CONFIG_IPV6_MROUTE 559 if ((all || type == NETCONFA_MC_FORWARDING) && 560 nla_put_s32(skb, NETCONFA_MC_FORWARDING, 561 atomic_read(&devconf->mc_forwarding)) < 0) 562 goto nla_put_failure; 563 #endif 564 if ((all || type == NETCONFA_PROXY_NEIGH) && 565 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 566 READ_ONCE(devconf->proxy_ndp)) < 0) 567 goto nla_put_failure; 568 569 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && 570 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 571 READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0) 572 goto nla_put_failure; 573 574 out: 575 nlmsg_end(skb, nlh); 576 return 0; 577 578 nla_put_failure: 579 nlmsg_cancel(skb, nlh); 580 return -EMSGSIZE; 581 } 582 583 void inet6_netconf_notify_devconf(struct net *net, int event, int type, 584 int ifindex, struct ipv6_devconf *devconf) 585 { 586 struct sk_buff *skb; 587 int err = -ENOBUFS; 588 589 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL); 590 if (!skb) 591 goto errout; 592 593 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, 594 event, 0, type); 595 if (err < 0) { 596 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ 597 WARN_ON(err == -EMSGSIZE); 598 kfree_skb(skb); 599 goto errout; 600 } 601 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL); 602 return; 603 errout: 604 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err); 605 } 606 607 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { 608 [NETCONFA_IFINDEX] = { .len = sizeof(int) }, 609 [NETCONFA_FORWARDING] = { .len = sizeof(int) }, 610 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, 611 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, 612 }; 613 614 static int inet6_netconf_valid_get_req(struct sk_buff *skb, 615 const struct nlmsghdr *nlh, 616 struct nlattr **tb, 617 struct netlink_ext_ack *extack) 618 { 619 int i, err; 620 621 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) { 622 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request"); 623 return -EINVAL; 624 } 625 626 if (!netlink_strict_get_check(skb)) 627 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg), 628 tb, NETCONFA_MAX, 629 devconf_ipv6_policy, extack); 630 631 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg), 632 tb, NETCONFA_MAX, 633 devconf_ipv6_policy, extack); 634 if (err) 635 return err; 636 637 for (i = 0; i <= NETCONFA_MAX; i++) { 638 if (!tb[i]) 639 continue; 640 641 switch (i) { 642 case NETCONFA_IFINDEX: 643 break; 644 default: 645 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request"); 646 return -EINVAL; 647 } 648 } 649 650 return 0; 651 } 652 653 static int inet6_netconf_get_devconf(struct sk_buff *in_skb, 654 struct nlmsghdr *nlh, 655 struct netlink_ext_ack *extack) 656 { 657 struct net *net = sock_net(in_skb->sk); 658 struct nlattr *tb[NETCONFA_MAX+1]; 659 struct inet6_dev *in6_dev = NULL; 660 struct net_device *dev = NULL; 661 struct sk_buff *skb; 662 struct ipv6_devconf *devconf; 663 int ifindex; 664 int err; 665 666 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack); 667 if (err < 0) 668 return err; 669 670 if (!tb[NETCONFA_IFINDEX]) 671 return -EINVAL; 672 673 err = -EINVAL; 674 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); 675 switch (ifindex) { 676 case NETCONFA_IFINDEX_ALL: 677 devconf = net->ipv6.devconf_all; 678 break; 679 case NETCONFA_IFINDEX_DEFAULT: 680 devconf = net->ipv6.devconf_dflt; 681 break; 682 default: 683 dev = dev_get_by_index(net, ifindex); 684 if (!dev) 685 return -EINVAL; 686 in6_dev = in6_dev_get(dev); 687 if (!in6_dev) 688 goto errout; 689 devconf = &in6_dev->cnf; 690 break; 691 } 692 693 err = -ENOBUFS; 694 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); 695 if (!skb) 696 goto errout; 697 698 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 699 NETLINK_CB(in_skb).portid, 700 nlh->nlmsg_seq, RTM_NEWNETCONF, 0, 701 NETCONFA_ALL); 702 if (err < 0) { 703 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ 704 WARN_ON(err == -EMSGSIZE); 705 kfree_skb(skb); 706 goto errout; 707 } 708 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 709 errout: 710 if (in6_dev) 711 in6_dev_put(in6_dev); 712 dev_put(dev); 713 return err; 714 } 715 716 /* Combine dev_addr_genid and dev_base_seq to detect changes. 717 */ 718 static u32 inet6_base_seq(const struct net *net) 719 { 720 u32 res = atomic_read(&net->ipv6.dev_addr_genid) + 721 READ_ONCE(net->dev_base_seq); 722 723 /* Must not return 0 (see nl_dump_check_consistent()). 724 * Chose a value far away from 0. 725 */ 726 if (!res) 727 res = 0x80000000; 728 return res; 729 } 730 731 static int inet6_netconf_dump_devconf(struct sk_buff *skb, 732 struct netlink_callback *cb) 733 { 734 const struct nlmsghdr *nlh = cb->nlh; 735 struct net *net = sock_net(skb->sk); 736 struct { 737 unsigned long ifindex; 738 unsigned int all_default; 739 } *ctx = (void *)cb->ctx; 740 struct net_device *dev; 741 struct inet6_dev *idev; 742 int err = 0; 743 744 if (cb->strict_check) { 745 struct netlink_ext_ack *extack = cb->extack; 746 struct netconfmsg *ncm; 747 748 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { 749 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request"); 750 return -EINVAL; 751 } 752 753 if (nlmsg_attrlen(nlh, sizeof(*ncm))) { 754 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request"); 755 return -EINVAL; 756 } 757 } 758 759 rcu_read_lock(); 760 for_each_netdev_dump(net, dev, ctx->ifindex) { 761 idev = __in6_dev_get(dev); 762 if (!idev) 763 continue; 764 err = inet6_netconf_fill_devconf(skb, dev->ifindex, 765 &idev->cnf, 766 NETLINK_CB(cb->skb).portid, 767 nlh->nlmsg_seq, 768 RTM_NEWNETCONF, 769 NLM_F_MULTI, 770 NETCONFA_ALL); 771 if (err < 0) 772 goto done; 773 } 774 if (ctx->all_default == 0) { 775 err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, 776 net->ipv6.devconf_all, 777 NETLINK_CB(cb->skb).portid, 778 nlh->nlmsg_seq, 779 RTM_NEWNETCONF, NLM_F_MULTI, 780 NETCONFA_ALL); 781 if (err < 0) 782 goto done; 783 ctx->all_default++; 784 } 785 if (ctx->all_default == 1) { 786 err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, 787 net->ipv6.devconf_dflt, 788 NETLINK_CB(cb->skb).portid, 789 nlh->nlmsg_seq, 790 RTM_NEWNETCONF, NLM_F_MULTI, 791 NETCONFA_ALL); 792 if (err < 0) 793 goto done; 794 ctx->all_default++; 795 } 796 done: 797 rcu_read_unlock(); 798 return err; 799 } 800 801 #ifdef CONFIG_SYSCTL 802 static void dev_forward_change(struct inet6_dev *idev) 803 { 804 struct net_device *dev; 805 struct inet6_ifaddr *ifa; 806 LIST_HEAD(tmp_addr_list); 807 808 if (!idev) 809 return; 810 dev = idev->dev; 811 if (idev->cnf.forwarding) 812 dev_disable_lro(dev); 813 if (dev->flags & IFF_MULTICAST) { 814 if (idev->cnf.forwarding) { 815 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 816 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters); 817 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters); 818 } else { 819 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); 820 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters); 821 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters); 822 } 823 } 824 825 read_lock_bh(&idev->lock); 826 list_for_each_entry(ifa, &idev->addr_list, if_list) { 827 if (ifa->flags&IFA_F_TENTATIVE) 828 continue; 829 list_add_tail(&ifa->if_list_aux, &tmp_addr_list); 830 } 831 read_unlock_bh(&idev->lock); 832 833 while (!list_empty(&tmp_addr_list)) { 834 ifa = list_first_entry(&tmp_addr_list, 835 struct inet6_ifaddr, if_list_aux); 836 list_del(&ifa->if_list_aux); 837 if (idev->cnf.forwarding) 838 addrconf_join_anycast(ifa); 839 else 840 addrconf_leave_anycast(ifa); 841 } 842 843 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 844 NETCONFA_FORWARDING, 845 dev->ifindex, &idev->cnf); 846 } 847 848 849 static void addrconf_forward_change(struct net *net, __s32 newf) 850 { 851 struct net_device *dev; 852 struct inet6_dev *idev; 853 854 for_each_netdev(net, dev) { 855 idev = __in6_dev_get(dev); 856 if (idev) { 857 int changed = (!idev->cnf.forwarding) ^ (!newf); 858 859 WRITE_ONCE(idev->cnf.forwarding, newf); 860 if (changed) 861 dev_forward_change(idev); 862 } 863 } 864 } 865 866 static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf) 867 { 868 struct net *net; 869 int old; 870 871 if (!rtnl_trylock()) 872 return restart_syscall(); 873 874 net = (struct net *)table->extra2; 875 old = *p; 876 WRITE_ONCE(*p, newf); 877 878 if (p == &net->ipv6.devconf_dflt->forwarding) { 879 if ((!newf) ^ (!old)) 880 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 881 NETCONFA_FORWARDING, 882 NETCONFA_IFINDEX_DEFAULT, 883 net->ipv6.devconf_dflt); 884 rtnl_unlock(); 885 return 0; 886 } 887 888 if (p == &net->ipv6.devconf_all->forwarding) { 889 int old_dflt = net->ipv6.devconf_dflt->forwarding; 890 891 WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf); 892 if ((!newf) ^ (!old_dflt)) 893 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 894 NETCONFA_FORWARDING, 895 NETCONFA_IFINDEX_DEFAULT, 896 net->ipv6.devconf_dflt); 897 898 addrconf_forward_change(net, newf); 899 if ((!newf) ^ (!old)) 900 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 901 NETCONFA_FORWARDING, 902 NETCONFA_IFINDEX_ALL, 903 net->ipv6.devconf_all); 904 } else if ((!newf) ^ (!old)) 905 dev_forward_change((struct inet6_dev *)table->extra1); 906 rtnl_unlock(); 907 908 if (newf) 909 rt6_purge_dflt_routers(net); 910 return 1; 911 } 912 913 static void addrconf_linkdown_change(struct net *net, __s32 newf) 914 { 915 struct net_device *dev; 916 struct inet6_dev *idev; 917 918 for_each_netdev(net, dev) { 919 idev = __in6_dev_get(dev); 920 if (idev) { 921 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf); 922 923 WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf); 924 if (changed) 925 inet6_netconf_notify_devconf(dev_net(dev), 926 RTM_NEWNETCONF, 927 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 928 dev->ifindex, 929 &idev->cnf); 930 } 931 } 932 } 933 934 static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf) 935 { 936 struct net *net; 937 int old; 938 939 if (!rtnl_trylock()) 940 return restart_syscall(); 941 942 net = (struct net *)table->extra2; 943 old = *p; 944 WRITE_ONCE(*p, newf); 945 946 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) { 947 if ((!newf) ^ (!old)) 948 inet6_netconf_notify_devconf(net, 949 RTM_NEWNETCONF, 950 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 951 NETCONFA_IFINDEX_DEFAULT, 952 net->ipv6.devconf_dflt); 953 rtnl_unlock(); 954 return 0; 955 } 956 957 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) { 958 WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf); 959 addrconf_linkdown_change(net, newf); 960 if ((!newf) ^ (!old)) 961 inet6_netconf_notify_devconf(net, 962 RTM_NEWNETCONF, 963 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 964 NETCONFA_IFINDEX_ALL, 965 net->ipv6.devconf_all); 966 } 967 rtnl_unlock(); 968 969 return 1; 970 } 971 972 #endif 973 974 /* Nobody refers to this ifaddr, destroy it */ 975 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) 976 { 977 WARN_ON(!hlist_unhashed(&ifp->addr_lst)); 978 979 #ifdef NET_REFCNT_DEBUG 980 pr_debug("%s\n", __func__); 981 #endif 982 983 in6_dev_put(ifp->idev); 984 985 if (cancel_delayed_work(&ifp->dad_work)) 986 pr_notice("delayed DAD work was pending while freeing ifa=%p\n", 987 ifp); 988 989 if (ifp->state != INET6_IFADDR_STATE_DEAD) { 990 pr_warn("Freeing alive inet6 address %p\n", ifp); 991 return; 992 } 993 994 kfree_rcu(ifp, rcu); 995 } 996 997 static void 998 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) 999 { 1000 struct list_head *p; 1001 int ifp_scope = ipv6_addr_src_scope(&ifp->addr); 1002 1003 /* 1004 * Each device address list is sorted in order of scope - 1005 * global before linklocal. 1006 */ 1007 list_for_each(p, &idev->addr_list) { 1008 struct inet6_ifaddr *ifa 1009 = list_entry(p, struct inet6_ifaddr, if_list); 1010 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) 1011 break; 1012 } 1013 1014 list_add_tail_rcu(&ifp->if_list, p); 1015 } 1016 1017 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr) 1018 { 1019 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net); 1020 1021 return hash_32(val, IN6_ADDR_HSIZE_SHIFT); 1022 } 1023 1024 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 1025 struct net_device *dev, unsigned int hash) 1026 { 1027 struct inet6_ifaddr *ifp; 1028 1029 hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { 1030 if (ipv6_addr_equal(&ifp->addr, addr)) { 1031 if (!dev || ifp->idev->dev == dev) 1032 return true; 1033 } 1034 } 1035 return false; 1036 } 1037 1038 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa) 1039 { 1040 struct net *net = dev_net(dev); 1041 unsigned int hash = inet6_addr_hash(net, &ifa->addr); 1042 int err = 0; 1043 1044 spin_lock_bh(&net->ipv6.addrconf_hash_lock); 1045 1046 /* Ignore adding duplicate addresses on an interface */ 1047 if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) { 1048 netdev_dbg(dev, "ipv6_add_addr: already assigned\n"); 1049 err = -EEXIST; 1050 } else { 1051 hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]); 1052 } 1053 1054 spin_unlock_bh(&net->ipv6.addrconf_hash_lock); 1055 1056 return err; 1057 } 1058 1059 /* On success it returns ifp with increased reference count */ 1060 1061 static struct inet6_ifaddr * 1062 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, 1063 bool can_block, struct netlink_ext_ack *extack) 1064 { 1065 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC; 1066 int addr_type = ipv6_addr_type(cfg->pfx); 1067 struct net *net = dev_net(idev->dev); 1068 struct inet6_ifaddr *ifa = NULL; 1069 struct fib6_info *f6i = NULL; 1070 int err = 0; 1071 1072 if (addr_type == IPV6_ADDR_ANY) { 1073 NL_SET_ERR_MSG_MOD(extack, "Invalid address"); 1074 return ERR_PTR(-EADDRNOTAVAIL); 1075 } else if (addr_type & IPV6_ADDR_MULTICAST && 1076 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) { 1077 NL_SET_ERR_MSG_MOD(extack, "Cannot assign multicast address without \"IFA_F_MCAUTOJOIN\" flag"); 1078 return ERR_PTR(-EADDRNOTAVAIL); 1079 } else if (!(idev->dev->flags & IFF_LOOPBACK) && 1080 !netif_is_l3_master(idev->dev) && 1081 addr_type & IPV6_ADDR_LOOPBACK) { 1082 NL_SET_ERR_MSG_MOD(extack, "Cannot assign loopback address on this device"); 1083 return ERR_PTR(-EADDRNOTAVAIL); 1084 } 1085 1086 if (idev->dead) { 1087 NL_SET_ERR_MSG_MOD(extack, "device is going away"); 1088 err = -ENODEV; 1089 goto out; 1090 } 1091 1092 if (idev->cnf.disable_ipv6) { 1093 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); 1094 err = -EACCES; 1095 goto out; 1096 } 1097 1098 /* validator notifier needs to be blocking; 1099 * do not call in atomic context 1100 */ 1101 if (can_block) { 1102 struct in6_validator_info i6vi = { 1103 .i6vi_addr = *cfg->pfx, 1104 .i6vi_dev = idev, 1105 .extack = extack, 1106 }; 1107 1108 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi); 1109 err = notifier_to_errno(err); 1110 if (err < 0) 1111 goto out; 1112 } 1113 1114 ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT); 1115 if (!ifa) { 1116 err = -ENOBUFS; 1117 goto out; 1118 } 1119 1120 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags, extack); 1121 if (IS_ERR(f6i)) { 1122 err = PTR_ERR(f6i); 1123 f6i = NULL; 1124 goto out; 1125 } 1126 1127 neigh_parms_data_state_setall(idev->nd_parms); 1128 1129 ifa->addr = *cfg->pfx; 1130 if (cfg->peer_pfx) 1131 ifa->peer_addr = *cfg->peer_pfx; 1132 1133 spin_lock_init(&ifa->lock); 1134 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); 1135 INIT_HLIST_NODE(&ifa->addr_lst); 1136 ifa->scope = cfg->scope; 1137 ifa->prefix_len = cfg->plen; 1138 ifa->rt_priority = cfg->rt_priority; 1139 ifa->flags = cfg->ifa_flags; 1140 ifa->ifa_proto = cfg->ifa_proto; 1141 /* No need to add the TENTATIVE flag for addresses with NODAD */ 1142 if (!(cfg->ifa_flags & IFA_F_NODAD)) 1143 ifa->flags |= IFA_F_TENTATIVE; 1144 ifa->valid_lft = cfg->valid_lft; 1145 ifa->prefered_lft = cfg->preferred_lft; 1146 ifa->cstamp = ifa->tstamp = jiffies; 1147 ifa->tokenized = false; 1148 1149 ifa->rt = f6i; 1150 1151 ifa->idev = idev; 1152 in6_dev_hold(idev); 1153 1154 /* For caller */ 1155 refcount_set(&ifa->refcnt, 1); 1156 1157 rcu_read_lock(); 1158 1159 err = ipv6_add_addr_hash(idev->dev, ifa); 1160 if (err < 0) { 1161 rcu_read_unlock(); 1162 goto out; 1163 } 1164 1165 write_lock_bh(&idev->lock); 1166 1167 /* Add to inet6_dev unicast addr list. */ 1168 ipv6_link_dev_addr(idev, ifa); 1169 1170 if (ifa->flags&IFA_F_TEMPORARY) { 1171 list_add(&ifa->tmp_list, &idev->tempaddr_list); 1172 in6_ifa_hold(ifa); 1173 } 1174 1175 in6_ifa_hold(ifa); 1176 write_unlock_bh(&idev->lock); 1177 1178 rcu_read_unlock(); 1179 1180 inet6addr_notifier_call_chain(NETDEV_UP, ifa); 1181 out: 1182 if (unlikely(err < 0)) { 1183 fib6_info_release(f6i); 1184 1185 if (ifa) { 1186 if (ifa->idev) 1187 in6_dev_put(ifa->idev); 1188 kfree(ifa); 1189 } 1190 ifa = ERR_PTR(err); 1191 } 1192 1193 return ifa; 1194 } 1195 1196 enum cleanup_prefix_rt_t { 1197 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */ 1198 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */ 1199 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */ 1200 }; 1201 1202 /* 1203 * Check, whether the prefix for ifp would still need a prefix route 1204 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_* 1205 * constants. 1206 * 1207 * 1) we don't purge prefix if address was not permanent. 1208 * prefix is managed by its own lifetime. 1209 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE. 1210 * 3) if there are no addresses, delete prefix. 1211 * 4) if there are still other permanent address(es), 1212 * corresponding prefix is still permanent. 1213 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE, 1214 * don't purge the prefix, assume user space is managing it. 1215 * 6) otherwise, update prefix lifetime to the 1216 * longest valid lifetime among the corresponding 1217 * addresses on the device. 1218 * Note: subsequent RA will update lifetime. 1219 **/ 1220 static enum cleanup_prefix_rt_t 1221 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) 1222 { 1223 struct inet6_ifaddr *ifa; 1224 struct inet6_dev *idev = ifp->idev; 1225 unsigned long lifetime; 1226 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL; 1227 1228 *expires = jiffies; 1229 1230 list_for_each_entry(ifa, &idev->addr_list, if_list) { 1231 if (ifa == ifp) 1232 continue; 1233 if (ifa->prefix_len != ifp->prefix_len || 1234 !ipv6_prefix_equal(&ifa->addr, &ifp->addr, 1235 ifp->prefix_len)) 1236 continue; 1237 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) 1238 return CLEANUP_PREFIX_RT_NOP; 1239 1240 action = CLEANUP_PREFIX_RT_EXPIRE; 1241 1242 spin_lock(&ifa->lock); 1243 1244 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ); 1245 /* 1246 * Note: Because this address is 1247 * not permanent, lifetime < 1248 * LONG_MAX / HZ here. 1249 */ 1250 if (time_before(*expires, ifa->tstamp + lifetime * HZ)) 1251 *expires = ifa->tstamp + lifetime * HZ; 1252 spin_unlock(&ifa->lock); 1253 } 1254 1255 return action; 1256 } 1257 1258 static void 1259 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, 1260 bool del_rt, bool del_peer) 1261 { 1262 struct fib6_table *table; 1263 struct fib6_info *f6i; 1264 1265 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr, 1266 ifp->prefix_len, 1267 ifp->idev->dev, 0, RTF_DEFAULT, true); 1268 if (f6i) { 1269 if (del_rt) 1270 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false); 1271 else { 1272 if (!(f6i->fib6_flags & RTF_EXPIRES)) { 1273 table = f6i->fib6_table; 1274 spin_lock_bh(&table->tb6_lock); 1275 1276 fib6_set_expires(f6i, expires); 1277 fib6_add_gc_list(f6i); 1278 1279 spin_unlock_bh(&table->tb6_lock); 1280 } 1281 fib6_info_release(f6i); 1282 } 1283 } 1284 } 1285 1286 1287 /* This function wants to get referenced ifp and releases it before return */ 1288 1289 static void ipv6_del_addr(struct inet6_ifaddr *ifp) 1290 { 1291 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; 1292 struct net *net = dev_net(ifp->idev->dev); 1293 unsigned long expires; 1294 int state; 1295 1296 ASSERT_RTNL(); 1297 1298 spin_lock_bh(&ifp->lock); 1299 state = ifp->state; 1300 ifp->state = INET6_IFADDR_STATE_DEAD; 1301 spin_unlock_bh(&ifp->lock); 1302 1303 if (state == INET6_IFADDR_STATE_DEAD) 1304 goto out; 1305 1306 spin_lock_bh(&net->ipv6.addrconf_hash_lock); 1307 hlist_del_init_rcu(&ifp->addr_lst); 1308 spin_unlock_bh(&net->ipv6.addrconf_hash_lock); 1309 1310 write_lock_bh(&ifp->idev->lock); 1311 1312 if (ifp->flags&IFA_F_TEMPORARY) { 1313 list_del(&ifp->tmp_list); 1314 if (ifp->ifpub) { 1315 in6_ifa_put(ifp->ifpub); 1316 ifp->ifpub = NULL; 1317 } 1318 __in6_ifa_put(ifp); 1319 } 1320 1321 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE)) 1322 action = check_cleanup_prefix_route(ifp, &expires); 1323 1324 list_del_rcu(&ifp->if_list); 1325 __in6_ifa_put(ifp); 1326 1327 write_unlock_bh(&ifp->idev->lock); 1328 1329 addrconf_del_dad_work(ifp); 1330 1331 ipv6_ifa_notify(RTM_DELADDR, ifp); 1332 1333 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp); 1334 1335 if (action != CLEANUP_PREFIX_RT_NOP) { 1336 cleanup_prefix_route(ifp, expires, 1337 action == CLEANUP_PREFIX_RT_DEL, false); 1338 } 1339 1340 /* clean up prefsrc entries */ 1341 rt6_remove_prefsrc(ifp); 1342 out: 1343 in6_ifa_put(ifp); 1344 } 1345 1346 static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev) 1347 { 1348 return READ_ONCE(idev->cnf.regen_min_advance) + 1349 READ_ONCE(idev->cnf.regen_max_retry) * 1350 READ_ONCE(idev->cnf.dad_transmits) * 1351 max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; 1352 } 1353 1354 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block) 1355 { 1356 struct inet6_dev *idev = ifp->idev; 1357 unsigned long tmp_tstamp, age; 1358 unsigned long regen_advance; 1359 unsigned long now = jiffies; 1360 u32 if_public_preferred_lft; 1361 s32 cnf_temp_preferred_lft; 1362 struct inet6_ifaddr *ift; 1363 struct ifa6_config cfg; 1364 long max_desync_factor; 1365 struct in6_addr addr; 1366 int ret = 0; 1367 1368 write_lock_bh(&idev->lock); 1369 1370 retry: 1371 in6_dev_hold(idev); 1372 if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) { 1373 write_unlock_bh(&idev->lock); 1374 pr_info("%s: use_tempaddr is disabled\n", __func__); 1375 in6_dev_put(idev); 1376 ret = -1; 1377 goto out; 1378 } 1379 spin_lock_bh(&ifp->lock); 1380 if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) { 1381 WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/ 1382 spin_unlock_bh(&ifp->lock); 1383 write_unlock_bh(&idev->lock); 1384 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n", 1385 __func__); 1386 in6_dev_put(idev); 1387 ret = -1; 1388 goto out; 1389 } 1390 in6_ifa_hold(ifp); 1391 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); 1392 ipv6_gen_rnd_iid(&addr); 1393 1394 age = (now - ifp->tstamp) / HZ; 1395 1396 regen_advance = ipv6_get_regen_advance(idev); 1397 1398 /* recalculate max_desync_factor each time and update 1399 * idev->desync_factor if it's larger 1400 */ 1401 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); 1402 max_desync_factor = min_t(long, 1403 READ_ONCE(idev->cnf.max_desync_factor), 1404 cnf_temp_preferred_lft - regen_advance); 1405 1406 if (unlikely(idev->desync_factor > max_desync_factor)) { 1407 if (max_desync_factor > 0) { 1408 get_random_bytes(&idev->desync_factor, 1409 sizeof(idev->desync_factor)); 1410 idev->desync_factor %= max_desync_factor; 1411 } else { 1412 idev->desync_factor = 0; 1413 } 1414 } 1415 1416 if_public_preferred_lft = ifp->prefered_lft; 1417 1418 memset(&cfg, 0, sizeof(cfg)); 1419 cfg.valid_lft = min_t(__u32, ifp->valid_lft, 1420 READ_ONCE(idev->cnf.temp_valid_lft) + age); 1421 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; 1422 cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft); 1423 cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft); 1424 1425 cfg.plen = ifp->prefix_len; 1426 tmp_tstamp = ifp->tstamp; 1427 spin_unlock_bh(&ifp->lock); 1428 1429 write_unlock_bh(&idev->lock); 1430 1431 /* From RFC 4941: 1432 * 1433 * A temporary address is created only if this calculated Preferred 1434 * Lifetime is greater than REGEN_ADVANCE time units. In 1435 * particular, an implementation must not create a temporary address 1436 * with a zero Preferred Lifetime. 1437 * 1438 * ... 1439 * 1440 * When creating a temporary address, the lifetime values MUST be 1441 * derived from the corresponding prefix as follows: 1442 * 1443 * ... 1444 * 1445 * * Its Preferred Lifetime is the lower of the Preferred Lifetime 1446 * of the public address or TEMP_PREFERRED_LIFETIME - 1447 * DESYNC_FACTOR. 1448 * 1449 * To comply with the RFC's requirements, clamp the preferred lifetime 1450 * to a minimum of regen_advance, unless that would exceed valid_lft or 1451 * ifp->prefered_lft. 1452 * 1453 * Use age calculation as in addrconf_verify to avoid unnecessary 1454 * temporary addresses being generated. 1455 */ 1456 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; 1457 if (cfg.preferred_lft <= regen_advance + age) { 1458 cfg.preferred_lft = regen_advance + age + 1; 1459 if (cfg.preferred_lft > cfg.valid_lft || 1460 cfg.preferred_lft > if_public_preferred_lft) { 1461 in6_ifa_put(ifp); 1462 in6_dev_put(idev); 1463 ret = -1; 1464 goto out; 1465 } 1466 } 1467 1468 cfg.ifa_flags = IFA_F_TEMPORARY; 1469 /* set in addrconf_prefix_rcv() */ 1470 if (ifp->flags & IFA_F_OPTIMISTIC) 1471 cfg.ifa_flags |= IFA_F_OPTIMISTIC; 1472 1473 cfg.pfx = &addr; 1474 cfg.scope = ipv6_addr_scope(cfg.pfx); 1475 1476 ift = ipv6_add_addr(idev, &cfg, block, NULL); 1477 if (IS_ERR(ift)) { 1478 in6_ifa_put(ifp); 1479 in6_dev_put(idev); 1480 pr_info("%s: retry temporary address regeneration\n", __func__); 1481 write_lock_bh(&idev->lock); 1482 goto retry; 1483 } 1484 1485 spin_lock_bh(&ift->lock); 1486 ift->ifpub = ifp; 1487 ift->cstamp = now; 1488 ift->tstamp = tmp_tstamp; 1489 spin_unlock_bh(&ift->lock); 1490 1491 addrconf_dad_start(ift); 1492 in6_ifa_put(ift); 1493 in6_dev_put(idev); 1494 out: 1495 return ret; 1496 } 1497 1498 /* 1499 * Choose an appropriate source address (RFC3484) 1500 */ 1501 enum { 1502 IPV6_SADDR_RULE_INIT = 0, 1503 IPV6_SADDR_RULE_LOCAL, 1504 IPV6_SADDR_RULE_SCOPE, 1505 IPV6_SADDR_RULE_PREFERRED, 1506 #ifdef CONFIG_IPV6_MIP6 1507 IPV6_SADDR_RULE_HOA, 1508 #endif 1509 IPV6_SADDR_RULE_OIF, 1510 IPV6_SADDR_RULE_LABEL, 1511 IPV6_SADDR_RULE_PRIVACY, 1512 IPV6_SADDR_RULE_ORCHID, 1513 IPV6_SADDR_RULE_PREFIX, 1514 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1515 IPV6_SADDR_RULE_NOT_OPTIMISTIC, 1516 #endif 1517 IPV6_SADDR_RULE_MAX 1518 }; 1519 1520 struct ipv6_saddr_score { 1521 int rule; 1522 int addr_type; 1523 struct inet6_ifaddr *ifa; 1524 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX); 1525 int scopedist; 1526 int matchlen; 1527 }; 1528 1529 struct ipv6_saddr_dst { 1530 const struct in6_addr *addr; 1531 int ifindex; 1532 int scope; 1533 int label; 1534 unsigned int prefs; 1535 }; 1536 1537 static inline int ipv6_saddr_preferred(int type) 1538 { 1539 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK)) 1540 return 1; 1541 return 0; 1542 } 1543 1544 static bool ipv6_use_optimistic_addr(const struct net *net, 1545 const struct inet6_dev *idev) 1546 { 1547 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1548 if (!idev) 1549 return false; 1550 if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && 1551 !READ_ONCE(idev->cnf.optimistic_dad)) 1552 return false; 1553 if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) && 1554 !READ_ONCE(idev->cnf.use_optimistic)) 1555 return false; 1556 1557 return true; 1558 #else 1559 return false; 1560 #endif 1561 } 1562 1563 static bool ipv6_allow_optimistic_dad(const struct net *net, 1564 const struct inet6_dev *idev) 1565 { 1566 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1567 if (!idev) 1568 return false; 1569 if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && 1570 !READ_ONCE(idev->cnf.optimistic_dad)) 1571 return false; 1572 1573 return true; 1574 #else 1575 return false; 1576 #endif 1577 } 1578 1579 static int ipv6_get_saddr_eval(struct net *net, 1580 struct ipv6_saddr_score *score, 1581 struct ipv6_saddr_dst *dst, 1582 int i) 1583 { 1584 int ret; 1585 1586 if (i <= score->rule) { 1587 switch (i) { 1588 case IPV6_SADDR_RULE_SCOPE: 1589 ret = score->scopedist; 1590 break; 1591 case IPV6_SADDR_RULE_PREFIX: 1592 ret = score->matchlen; 1593 break; 1594 default: 1595 ret = !!test_bit(i, score->scorebits); 1596 } 1597 goto out; 1598 } 1599 1600 switch (i) { 1601 case IPV6_SADDR_RULE_INIT: 1602 /* Rule 0: remember if hiscore is not ready yet */ 1603 ret = !!score->ifa; 1604 break; 1605 case IPV6_SADDR_RULE_LOCAL: 1606 /* Rule 1: Prefer same address */ 1607 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr); 1608 break; 1609 case IPV6_SADDR_RULE_SCOPE: 1610 /* Rule 2: Prefer appropriate scope 1611 * 1612 * ret 1613 * ^ 1614 * -1 | d 15 1615 * ---+--+-+---> scope 1616 * | 1617 * | d is scope of the destination. 1618 * B-d | \ 1619 * | \ <- smaller scope is better if 1620 * B-15 | \ if scope is enough for destination. 1621 * | ret = B - scope (-1 <= scope >= d <= 15). 1622 * d-C-1 | / 1623 * |/ <- greater is better 1624 * -C / if scope is not enough for destination. 1625 * /| ret = scope - C (-1 <= d < scope <= 15). 1626 * 1627 * d - C - 1 < B -15 (for all -1 <= d <= 15). 1628 * C > d + 14 - B >= 15 + 14 - B = 29 - B. 1629 * Assume B = 0 and we get C > 29. 1630 */ 1631 ret = __ipv6_addr_src_scope(score->addr_type); 1632 if (ret >= dst->scope) 1633 ret = -ret; 1634 else 1635 ret -= 128; /* 30 is enough */ 1636 score->scopedist = ret; 1637 break; 1638 case IPV6_SADDR_RULE_PREFERRED: 1639 { 1640 /* Rule 3: Avoid deprecated and optimistic addresses */ 1641 u8 avoid = IFA_F_DEPRECATED; 1642 1643 if (!ipv6_use_optimistic_addr(net, score->ifa->idev)) 1644 avoid |= IFA_F_OPTIMISTIC; 1645 ret = ipv6_saddr_preferred(score->addr_type) || 1646 !(score->ifa->flags & avoid); 1647 break; 1648 } 1649 #ifdef CONFIG_IPV6_MIP6 1650 case IPV6_SADDR_RULE_HOA: 1651 { 1652 /* Rule 4: Prefer home address */ 1653 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA); 1654 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome; 1655 break; 1656 } 1657 #endif 1658 case IPV6_SADDR_RULE_OIF: 1659 /* Rule 5: Prefer outgoing interface */ 1660 ret = (!dst->ifindex || 1661 dst->ifindex == score->ifa->idev->dev->ifindex); 1662 break; 1663 case IPV6_SADDR_RULE_LABEL: 1664 /* Rule 6: Prefer matching label */ 1665 ret = ipv6_addr_label(net, 1666 &score->ifa->addr, score->addr_type, 1667 score->ifa->idev->dev->ifindex) == dst->label; 1668 break; 1669 case IPV6_SADDR_RULE_PRIVACY: 1670 { 1671 /* Rule 7: Prefer public address 1672 * Note: prefer temporary address if use_tempaddr >= 2 1673 */ 1674 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? 1675 !!(dst->prefs & IPV6_PREFER_SRC_TMP) : 1676 READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2; 1677 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp; 1678 break; 1679 } 1680 case IPV6_SADDR_RULE_ORCHID: 1681 /* Rule 8-: Prefer ORCHID vs ORCHID or 1682 * non-ORCHID vs non-ORCHID 1683 */ 1684 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^ 1685 ipv6_addr_orchid(dst->addr)); 1686 break; 1687 case IPV6_SADDR_RULE_PREFIX: 1688 /* Rule 8: Use longest matching prefix */ 1689 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr); 1690 if (ret > score->ifa->prefix_len) 1691 ret = score->ifa->prefix_len; 1692 score->matchlen = ret; 1693 break; 1694 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1695 case IPV6_SADDR_RULE_NOT_OPTIMISTIC: 1696 /* Optimistic addresses still have lower precedence than other 1697 * preferred addresses. 1698 */ 1699 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC); 1700 break; 1701 #endif 1702 default: 1703 ret = 0; 1704 } 1705 1706 if (ret) 1707 __set_bit(i, score->scorebits); 1708 score->rule = i; 1709 out: 1710 return ret; 1711 } 1712 1713 static int __ipv6_dev_get_saddr(struct net *net, 1714 struct ipv6_saddr_dst *dst, 1715 struct inet6_dev *idev, 1716 struct ipv6_saddr_score *scores, 1717 int hiscore_idx) 1718 { 1719 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx]; 1720 1721 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) { 1722 int i; 1723 1724 /* 1725 * - Tentative Address (RFC2462 section 5.4) 1726 * - A tentative address is not considered 1727 * "assigned to an interface" in the traditional 1728 * sense, unless it is also flagged as optimistic. 1729 * - Candidate Source Address (section 4) 1730 * - In any case, anycast addresses, multicast 1731 * addresses, and the unspecified address MUST 1732 * NOT be included in a candidate set. 1733 */ 1734 if ((score->ifa->flags & IFA_F_TENTATIVE) && 1735 (!(score->ifa->flags & IFA_F_OPTIMISTIC))) 1736 continue; 1737 1738 score->addr_type = __ipv6_addr_type(&score->ifa->addr); 1739 1740 if (unlikely(score->addr_type == IPV6_ADDR_ANY || 1741 score->addr_type & IPV6_ADDR_MULTICAST)) { 1742 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s", 1743 idev->dev->name); 1744 continue; 1745 } 1746 1747 score->rule = -1; 1748 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX); 1749 1750 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { 1751 int minihiscore, miniscore; 1752 1753 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i); 1754 miniscore = ipv6_get_saddr_eval(net, score, dst, i); 1755 1756 if (minihiscore > miniscore) { 1757 if (i == IPV6_SADDR_RULE_SCOPE && 1758 score->scopedist > 0) { 1759 /* 1760 * special case: 1761 * each remaining entry 1762 * has too small (not enough) 1763 * scope, because ifa entries 1764 * are sorted by their scope 1765 * values. 1766 */ 1767 goto out; 1768 } 1769 break; 1770 } else if (minihiscore < miniscore) { 1771 swap(hiscore, score); 1772 hiscore_idx = 1 - hiscore_idx; 1773 1774 /* restore our iterator */ 1775 score->ifa = hiscore->ifa; 1776 1777 break; 1778 } 1779 } 1780 } 1781 out: 1782 return hiscore_idx; 1783 } 1784 1785 static int ipv6_get_saddr_master(struct net *net, 1786 const struct net_device *dst_dev, 1787 const struct net_device *master, 1788 struct ipv6_saddr_dst *dst, 1789 struct ipv6_saddr_score *scores, 1790 int hiscore_idx) 1791 { 1792 struct inet6_dev *idev; 1793 1794 idev = __in6_dev_get(dst_dev); 1795 if (idev) 1796 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev, 1797 scores, hiscore_idx); 1798 1799 idev = __in6_dev_get(master); 1800 if (idev) 1801 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev, 1802 scores, hiscore_idx); 1803 1804 return hiscore_idx; 1805 } 1806 1807 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, 1808 const struct in6_addr *daddr, unsigned int prefs, 1809 struct in6_addr *saddr) 1810 { 1811 struct ipv6_saddr_score scores[2], *hiscore; 1812 struct ipv6_saddr_dst dst; 1813 struct inet6_dev *idev; 1814 struct net_device *dev; 1815 int dst_type; 1816 bool use_oif_addr = false; 1817 int hiscore_idx = 0; 1818 int ret = 0; 1819 1820 dst_type = __ipv6_addr_type(daddr); 1821 dst.addr = daddr; 1822 dst.ifindex = dst_dev ? dst_dev->ifindex : 0; 1823 dst.scope = __ipv6_addr_src_scope(dst_type); 1824 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex); 1825 dst.prefs = prefs; 1826 1827 scores[hiscore_idx].rule = -1; 1828 scores[hiscore_idx].ifa = NULL; 1829 1830 rcu_read_lock(); 1831 1832 /* Candidate Source Address (section 4) 1833 * - multicast and link-local destination address, 1834 * the set of candidate source address MUST only 1835 * include addresses assigned to interfaces 1836 * belonging to the same link as the outgoing 1837 * interface. 1838 * (- For site-local destination addresses, the 1839 * set of candidate source addresses MUST only 1840 * include addresses assigned to interfaces 1841 * belonging to the same site as the outgoing 1842 * interface.) 1843 * - "It is RECOMMENDED that the candidate source addresses 1844 * be the set of unicast addresses assigned to the 1845 * interface that will be used to send to the destination 1846 * (the 'outgoing' interface)." (RFC 6724) 1847 */ 1848 if (dst_dev) { 1849 idev = __in6_dev_get(dst_dev); 1850 if ((dst_type & IPV6_ADDR_MULTICAST) || 1851 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL || 1852 (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) { 1853 use_oif_addr = true; 1854 } 1855 } 1856 1857 if (use_oif_addr) { 1858 if (idev) 1859 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); 1860 } else { 1861 const struct net_device *master; 1862 int master_idx = 0; 1863 1864 /* if dst_dev exists and is enslaved to an L3 device, then 1865 * prefer addresses from dst_dev and then the master over 1866 * any other enslaved devices in the L3 domain. 1867 */ 1868 master = l3mdev_master_dev_rcu(dst_dev); 1869 if (master) { 1870 master_idx = master->ifindex; 1871 1872 hiscore_idx = ipv6_get_saddr_master(net, dst_dev, 1873 master, &dst, 1874 scores, hiscore_idx); 1875 1876 if (scores[hiscore_idx].ifa && 1877 scores[hiscore_idx].scopedist >= 0) 1878 goto out; 1879 } 1880 1881 for_each_netdev_rcu(net, dev) { 1882 /* only consider addresses on devices in the 1883 * same L3 domain 1884 */ 1885 if (l3mdev_master_ifindex_rcu(dev) != master_idx) 1886 continue; 1887 idev = __in6_dev_get(dev); 1888 if (!idev) 1889 continue; 1890 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx); 1891 } 1892 } 1893 1894 out: 1895 hiscore = &scores[hiscore_idx]; 1896 if (!hiscore->ifa) 1897 ret = -EADDRNOTAVAIL; 1898 else 1899 *saddr = hiscore->ifa->addr; 1900 1901 rcu_read_unlock(); 1902 return ret; 1903 } 1904 EXPORT_SYMBOL(ipv6_dev_get_saddr); 1905 1906 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, 1907 u32 banned_flags) 1908 { 1909 struct inet6_ifaddr *ifp; 1910 int err = -EADDRNOTAVAIL; 1911 1912 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) { 1913 if (ifp->scope > IFA_LINK) 1914 break; 1915 if (ifp->scope == IFA_LINK && 1916 !(ifp->flags & banned_flags)) { 1917 *addr = ifp->addr; 1918 err = 0; 1919 break; 1920 } 1921 } 1922 return err; 1923 } 1924 1925 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, 1926 u32 banned_flags) 1927 { 1928 struct inet6_dev *idev; 1929 int err = -EADDRNOTAVAIL; 1930 1931 rcu_read_lock(); 1932 idev = __in6_dev_get(dev); 1933 if (idev) { 1934 read_lock_bh(&idev->lock); 1935 err = __ipv6_get_lladdr(idev, addr, banned_flags); 1936 read_unlock_bh(&idev->lock); 1937 } 1938 rcu_read_unlock(); 1939 return err; 1940 } 1941 1942 static int ipv6_count_addresses(const struct inet6_dev *idev) 1943 { 1944 const struct inet6_ifaddr *ifp; 1945 int cnt = 0; 1946 1947 rcu_read_lock(); 1948 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list) 1949 cnt++; 1950 rcu_read_unlock(); 1951 return cnt; 1952 } 1953 1954 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, 1955 const struct net_device *dev, int strict) 1956 { 1957 return ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1958 strict, IFA_F_TENTATIVE); 1959 } 1960 EXPORT_SYMBOL(ipv6_chk_addr); 1961 1962 /* device argument is used to find the L3 domain of interest. If 1963 * skip_dev_check is set, then the ifp device is not checked against 1964 * the passed in dev argument. So the 2 cases for addresses checks are: 1965 * 1. does the address exist in the L3 domain that dev is part of 1966 * (skip_dev_check = true), or 1967 * 1968 * 2. does the address exist on the specific device 1969 * (skip_dev_check = false) 1970 */ 1971 static struct net_device * 1972 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, 1973 const struct net_device *dev, bool skip_dev_check, 1974 int strict, u32 banned_flags) 1975 { 1976 unsigned int hash = inet6_addr_hash(net, addr); 1977 struct net_device *l3mdev, *ndev; 1978 struct inet6_ifaddr *ifp; 1979 u32 ifp_flags; 1980 1981 rcu_read_lock(); 1982 1983 l3mdev = l3mdev_master_dev_rcu(dev); 1984 if (skip_dev_check) 1985 dev = NULL; 1986 1987 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { 1988 ndev = ifp->idev->dev; 1989 1990 if (l3mdev_master_dev_rcu(ndev) != l3mdev) 1991 continue; 1992 1993 /* Decouple optimistic from tentative for evaluation here. 1994 * Ban optimistic addresses explicitly, when required. 1995 */ 1996 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC) 1997 ? (ifp->flags&~IFA_F_TENTATIVE) 1998 : ifp->flags; 1999 if (ipv6_addr_equal(&ifp->addr, addr) && 2000 !(ifp_flags&banned_flags) && 2001 (!dev || ndev == dev || 2002 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { 2003 rcu_read_unlock(); 2004 return ndev; 2005 } 2006 } 2007 2008 rcu_read_unlock(); 2009 return NULL; 2010 } 2011 2012 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, 2013 const struct net_device *dev, bool skip_dev_check, 2014 int strict, u32 banned_flags) 2015 { 2016 return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check, 2017 strict, banned_flags) ? 1 : 0; 2018 } 2019 EXPORT_SYMBOL(ipv6_chk_addr_and_flags); 2020 2021 2022 /* Compares an address/prefix_len with addresses on device @dev. 2023 * If one is found it returns true. 2024 */ 2025 bool ipv6_chk_custom_prefix(const struct in6_addr *addr, 2026 const unsigned int prefix_len, struct net_device *dev) 2027 { 2028 const struct inet6_ifaddr *ifa; 2029 const struct inet6_dev *idev; 2030 bool ret = false; 2031 2032 rcu_read_lock(); 2033 idev = __in6_dev_get(dev); 2034 if (idev) { 2035 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { 2036 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len); 2037 if (ret) 2038 break; 2039 } 2040 } 2041 rcu_read_unlock(); 2042 2043 return ret; 2044 } 2045 EXPORT_SYMBOL(ipv6_chk_custom_prefix); 2046 2047 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) 2048 { 2049 const struct inet6_ifaddr *ifa; 2050 const struct inet6_dev *idev; 2051 int onlink; 2052 2053 onlink = 0; 2054 rcu_read_lock(); 2055 idev = __in6_dev_get(dev); 2056 if (idev) { 2057 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { 2058 onlink = ipv6_prefix_equal(addr, &ifa->addr, 2059 ifa->prefix_len); 2060 if (onlink) 2061 break; 2062 } 2063 } 2064 rcu_read_unlock(); 2065 return onlink; 2066 } 2067 EXPORT_SYMBOL(ipv6_chk_prefix); 2068 2069 /** 2070 * ipv6_dev_find - find the first device with a given source address. 2071 * @net: the net namespace 2072 * @addr: the source address 2073 * @dev: used to find the L3 domain of interest 2074 * 2075 * The caller should be protected by RCU, or RTNL. 2076 */ 2077 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, 2078 struct net_device *dev) 2079 { 2080 return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1, 2081 IFA_F_TENTATIVE); 2082 } 2083 EXPORT_SYMBOL(ipv6_dev_find); 2084 2085 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, 2086 struct net_device *dev, int strict) 2087 { 2088 unsigned int hash = inet6_addr_hash(net, addr); 2089 struct inet6_ifaddr *ifp, *result = NULL; 2090 2091 rcu_read_lock(); 2092 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { 2093 if (ipv6_addr_equal(&ifp->addr, addr)) { 2094 if (!dev || ifp->idev->dev == dev || 2095 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { 2096 if (in6_ifa_hold_safe(ifp)) { 2097 result = ifp; 2098 break; 2099 } 2100 } 2101 } 2102 } 2103 rcu_read_unlock(); 2104 2105 return result; 2106 } 2107 2108 /* Gets referenced address, destroys ifaddr */ 2109 2110 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) 2111 { 2112 if (dad_failed) 2113 ifp->flags |= IFA_F_DADFAILED; 2114 2115 if (ifp->flags&IFA_F_TEMPORARY) { 2116 struct inet6_ifaddr *ifpub; 2117 spin_lock_bh(&ifp->lock); 2118 ifpub = ifp->ifpub; 2119 if (ifpub) { 2120 in6_ifa_hold(ifpub); 2121 spin_unlock_bh(&ifp->lock); 2122 ipv6_create_tempaddr(ifpub, true); 2123 in6_ifa_put(ifpub); 2124 } else { 2125 spin_unlock_bh(&ifp->lock); 2126 } 2127 ipv6_del_addr(ifp); 2128 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { 2129 spin_lock_bh(&ifp->lock); 2130 addrconf_del_dad_work(ifp); 2131 ifp->flags |= IFA_F_TENTATIVE; 2132 if (dad_failed) 2133 ifp->flags &= ~IFA_F_OPTIMISTIC; 2134 spin_unlock_bh(&ifp->lock); 2135 if (dad_failed) 2136 ipv6_ifa_notify(0, ifp); 2137 in6_ifa_put(ifp); 2138 } else { 2139 ipv6_del_addr(ifp); 2140 } 2141 } 2142 2143 static int addrconf_dad_end(struct inet6_ifaddr *ifp) 2144 { 2145 int err = -ENOENT; 2146 2147 spin_lock_bh(&ifp->lock); 2148 if (ifp->state == INET6_IFADDR_STATE_DAD) { 2149 ifp->state = INET6_IFADDR_STATE_POSTDAD; 2150 err = 0; 2151 } 2152 spin_unlock_bh(&ifp->lock); 2153 2154 return err; 2155 } 2156 2157 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp) 2158 { 2159 struct inet6_dev *idev = ifp->idev; 2160 struct net *net = dev_net(idev->dev); 2161 int max_addresses; 2162 2163 if (addrconf_dad_end(ifp)) { 2164 in6_ifa_put(ifp); 2165 return; 2166 } 2167 2168 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n", 2169 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source); 2170 2171 spin_lock_bh(&ifp->lock); 2172 2173 if (ifp->flags & IFA_F_STABLE_PRIVACY) { 2174 struct in6_addr new_addr; 2175 struct inet6_ifaddr *ifp2; 2176 int retries = ifp->stable_privacy_retry + 1; 2177 struct ifa6_config cfg = { 2178 .pfx = &new_addr, 2179 .plen = ifp->prefix_len, 2180 .ifa_flags = ifp->flags, 2181 .valid_lft = ifp->valid_lft, 2182 .preferred_lft = ifp->prefered_lft, 2183 .scope = ifp->scope, 2184 }; 2185 2186 if (retries > net->ipv6.sysctl.idgen_retries) { 2187 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n", 2188 ifp->idev->dev->name); 2189 goto errdad; 2190 } 2191 2192 new_addr = ifp->addr; 2193 if (ipv6_generate_stable_address(&new_addr, retries, 2194 idev)) 2195 goto errdad; 2196 2197 spin_unlock_bh(&ifp->lock); 2198 2199 max_addresses = READ_ONCE(idev->cnf.max_addresses); 2200 if (max_addresses && 2201 ipv6_count_addresses(idev) >= max_addresses) 2202 goto lock_errdad; 2203 2204 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n", 2205 ifp->idev->dev->name); 2206 2207 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL); 2208 if (IS_ERR(ifp2)) 2209 goto lock_errdad; 2210 2211 spin_lock_bh(&ifp2->lock); 2212 ifp2->stable_privacy_retry = retries; 2213 ifp2->state = INET6_IFADDR_STATE_PREDAD; 2214 spin_unlock_bh(&ifp2->lock); 2215 2216 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay); 2217 in6_ifa_put(ifp2); 2218 lock_errdad: 2219 spin_lock_bh(&ifp->lock); 2220 } 2221 2222 errdad: 2223 /* transition from _POSTDAD to _ERRDAD */ 2224 ifp->state = INET6_IFADDR_STATE_ERRDAD; 2225 spin_unlock_bh(&ifp->lock); 2226 2227 addrconf_mod_dad_work(ifp, 0); 2228 in6_ifa_put(ifp); 2229 } 2230 2231 /* Join to solicited addr multicast group. 2232 * caller must hold RTNL */ 2233 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) 2234 { 2235 struct in6_addr maddr; 2236 2237 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) 2238 return; 2239 2240 addrconf_addr_solict_mult(addr, &maddr); 2241 ipv6_dev_mc_inc(dev, &maddr); 2242 } 2243 2244 /* caller must hold RTNL */ 2245 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) 2246 { 2247 struct in6_addr maddr; 2248 2249 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) 2250 return; 2251 2252 addrconf_addr_solict_mult(addr, &maddr); 2253 __ipv6_dev_mc_dec(idev, &maddr); 2254 } 2255 2256 /* caller must hold RTNL */ 2257 static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 2258 { 2259 struct in6_addr addr; 2260 2261 if (ifp->prefix_len >= 127) /* RFC 6164 */ 2262 return; 2263 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 2264 if (ipv6_addr_any(&addr)) 2265 return; 2266 __ipv6_dev_ac_inc(ifp->idev, &addr); 2267 } 2268 2269 /* caller must hold RTNL */ 2270 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 2271 { 2272 struct in6_addr addr; 2273 2274 if (ifp->prefix_len >= 127) /* RFC 6164 */ 2275 return; 2276 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 2277 if (ipv6_addr_any(&addr)) 2278 return; 2279 __ipv6_dev_ac_dec(ifp->idev, &addr); 2280 } 2281 2282 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev) 2283 { 2284 switch (dev->addr_len) { 2285 case ETH_ALEN: 2286 memcpy(eui, dev->dev_addr, 3); 2287 eui[3] = 0xFF; 2288 eui[4] = 0xFE; 2289 memcpy(eui + 5, dev->dev_addr + 3, 3); 2290 break; 2291 case EUI64_ADDR_LEN: 2292 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN); 2293 eui[0] ^= 2; 2294 break; 2295 default: 2296 return -1; 2297 } 2298 2299 return 0; 2300 } 2301 2302 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev) 2303 { 2304 const union fwnet_hwaddr *ha; 2305 2306 if (dev->addr_len != FWNET_ALEN) 2307 return -1; 2308 2309 ha = (const union fwnet_hwaddr *)dev->dev_addr; 2310 2311 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id)); 2312 eui[0] ^= 2; 2313 return 0; 2314 } 2315 2316 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) 2317 { 2318 /* XXX: inherit EUI-64 from other interface -- yoshfuji */ 2319 if (dev->addr_len != ARCNET_ALEN) 2320 return -1; 2321 memset(eui, 0, 7); 2322 eui[7] = *(u8 *)dev->dev_addr; 2323 return 0; 2324 } 2325 2326 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev) 2327 { 2328 if (dev->addr_len != INFINIBAND_ALEN) 2329 return -1; 2330 memcpy(eui, dev->dev_addr + 12, 8); 2331 eui[0] |= 2; 2332 return 0; 2333 } 2334 2335 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr) 2336 { 2337 if (addr == 0) 2338 return -1; 2339 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || 2340 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || 2341 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || 2342 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) || 2343 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) || 2344 ipv4_is_lbcast(addr)) ? 0x00 : 0x02; 2345 eui[1] = 0; 2346 eui[2] = 0x5E; 2347 eui[3] = 0xFE; 2348 memcpy(eui + 4, &addr, 4); 2349 return 0; 2350 } 2351 2352 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev) 2353 { 2354 if (dev->priv_flags & IFF_ISATAP) 2355 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr); 2356 return -1; 2357 } 2358 2359 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev) 2360 { 2361 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr); 2362 } 2363 2364 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev) 2365 { 2366 memcpy(eui, dev->perm_addr, 3); 2367 memcpy(eui + 5, dev->perm_addr + 3, 3); 2368 eui[3] = 0xFF; 2369 eui[4] = 0xFE; 2370 eui[0] ^= 2; 2371 return 0; 2372 } 2373 2374 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) 2375 { 2376 switch (dev->type) { 2377 case ARPHRD_ETHER: 2378 case ARPHRD_FDDI: 2379 return addrconf_ifid_eui48(eui, dev); 2380 case ARPHRD_ARCNET: 2381 return addrconf_ifid_arcnet(eui, dev); 2382 case ARPHRD_INFINIBAND: 2383 return addrconf_ifid_infiniband(eui, dev); 2384 case ARPHRD_SIT: 2385 return addrconf_ifid_sit(eui, dev); 2386 case ARPHRD_IPGRE: 2387 case ARPHRD_TUNNEL: 2388 return addrconf_ifid_gre(eui, dev); 2389 case ARPHRD_6LOWPAN: 2390 return addrconf_ifid_6lowpan(eui, dev); 2391 case ARPHRD_IEEE1394: 2392 return addrconf_ifid_ieee1394(eui, dev); 2393 case ARPHRD_TUNNEL6: 2394 case ARPHRD_IP6GRE: 2395 case ARPHRD_RAWIP: 2396 return addrconf_ifid_ip6tnl(eui, dev); 2397 } 2398 return -1; 2399 } 2400 2401 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) 2402 { 2403 int err = -1; 2404 struct inet6_ifaddr *ifp; 2405 2406 read_lock_bh(&idev->lock); 2407 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) { 2408 if (ifp->scope > IFA_LINK) 2409 break; 2410 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { 2411 memcpy(eui, ifp->addr.s6_addr+8, 8); 2412 err = 0; 2413 break; 2414 } 2415 } 2416 read_unlock_bh(&idev->lock); 2417 return err; 2418 } 2419 2420 /* Generation of a randomized Interface Identifier 2421 * draft-ietf-6man-rfc4941bis, Section 3.3.1 2422 */ 2423 2424 static void ipv6_gen_rnd_iid(struct in6_addr *addr) 2425 { 2426 regen: 2427 get_random_bytes(&addr->s6_addr[8], 8); 2428 2429 /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1: 2430 * check if generated address is not inappropriate: 2431 * 2432 * - Reserved IPv6 Interface Identifiers 2433 * - XXX: already assigned to an address on the device 2434 */ 2435 2436 /* Subnet-router anycast: 0000:0000:0000:0000 */ 2437 if (!(addr->s6_addr32[2] | addr->s6_addr32[3])) 2438 goto regen; 2439 2440 /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212 2441 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213 2442 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF 2443 */ 2444 if (ntohl(addr->s6_addr32[2]) == 0x02005eff && 2445 (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000) 2446 goto regen; 2447 2448 /* Reserved subnet anycast addresses */ 2449 if (ntohl(addr->s6_addr32[2]) == 0xfdffffff && 2450 ntohl(addr->s6_addr32[3]) >= 0Xffffff80) 2451 goto regen; 2452 } 2453 2454 /* 2455 * Add prefix route. 2456 */ 2457 2458 static void 2459 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric, 2460 struct net_device *dev, unsigned long expires, 2461 u32 flags, gfp_t gfp_flags) 2462 { 2463 struct fib6_config cfg = { 2464 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX, 2465 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF, 2466 .fc_ifindex = dev->ifindex, 2467 .fc_expires = expires, 2468 .fc_dst_len = plen, 2469 .fc_flags = RTF_UP | flags, 2470 .fc_nlinfo.nl_net = dev_net(dev), 2471 .fc_protocol = RTPROT_KERNEL, 2472 .fc_type = RTN_UNICAST, 2473 }; 2474 2475 cfg.fc_dst = *pfx; 2476 2477 /* Prevent useless cloning on PtP SIT. 2478 This thing is done here expecting that the whole 2479 class of non-broadcast devices need not cloning. 2480 */ 2481 #if IS_ENABLED(CONFIG_IPV6_SIT) 2482 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 2483 cfg.fc_flags |= RTF_NONEXTHOP; 2484 #endif 2485 2486 ip6_route_add(&cfg, gfp_flags, NULL); 2487 } 2488 2489 2490 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, 2491 int plen, 2492 const struct net_device *dev, 2493 u32 flags, u32 noflags, 2494 bool no_gw) 2495 { 2496 struct fib6_node *fn; 2497 struct fib6_info *rt = NULL; 2498 struct fib6_table *table; 2499 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX; 2500 2501 table = fib6_get_table(dev_net(dev), tb_id); 2502 if (!table) 2503 return NULL; 2504 2505 rcu_read_lock(); 2506 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true); 2507 if (!fn) 2508 goto out; 2509 2510 for_each_fib6_node_rt_rcu(fn) { 2511 /* prefix routes only use builtin fib6_nh */ 2512 if (rt->nh) 2513 continue; 2514 2515 if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex) 2516 continue; 2517 if (no_gw && rt->fib6_nh->fib_nh_gw_family) 2518 continue; 2519 if ((rt->fib6_flags & flags) != flags) 2520 continue; 2521 if ((rt->fib6_flags & noflags) != 0) 2522 continue; 2523 if (!fib6_info_hold_safe(rt)) 2524 continue; 2525 break; 2526 } 2527 out: 2528 rcu_read_unlock(); 2529 return rt; 2530 } 2531 2532 2533 /* Create "default" multicast route to the interface */ 2534 2535 static void addrconf_add_mroute(struct net_device *dev) 2536 { 2537 struct fib6_config cfg = { 2538 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL, 2539 .fc_metric = IP6_RT_PRIO_ADDRCONF, 2540 .fc_ifindex = dev->ifindex, 2541 .fc_dst_len = 8, 2542 .fc_flags = RTF_UP, 2543 .fc_type = RTN_MULTICAST, 2544 .fc_nlinfo.nl_net = dev_net(dev), 2545 .fc_protocol = RTPROT_KERNEL, 2546 }; 2547 2548 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); 2549 2550 ip6_route_add(&cfg, GFP_KERNEL, NULL); 2551 } 2552 2553 static struct inet6_dev *addrconf_add_dev(struct net_device *dev) 2554 { 2555 struct inet6_dev *idev; 2556 2557 ASSERT_RTNL(); 2558 2559 idev = ipv6_find_idev(dev); 2560 if (IS_ERR(idev)) 2561 return idev; 2562 2563 if (idev->cnf.disable_ipv6) 2564 return ERR_PTR(-EACCES); 2565 2566 /* Add default multicast route */ 2567 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev)) 2568 addrconf_add_mroute(dev); 2569 2570 return idev; 2571 } 2572 2573 static void manage_tempaddrs(struct inet6_dev *idev, 2574 struct inet6_ifaddr *ifp, 2575 __u32 valid_lft, __u32 prefered_lft, 2576 bool create, unsigned long now) 2577 { 2578 u32 flags; 2579 struct inet6_ifaddr *ift; 2580 2581 read_lock_bh(&idev->lock); 2582 /* update all temporary addresses in the list */ 2583 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) { 2584 int age, max_valid, max_prefered; 2585 2586 if (ifp != ift->ifpub) 2587 continue; 2588 2589 /* RFC 4941 section 3.3: 2590 * If a received option will extend the lifetime of a public 2591 * address, the lifetimes of temporary addresses should 2592 * be extended, subject to the overall constraint that no 2593 * temporary addresses should ever remain "valid" or "preferred" 2594 * for a time longer than (TEMP_VALID_LIFETIME) or 2595 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively. 2596 */ 2597 age = (now - ift->cstamp) / HZ; 2598 max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age; 2599 if (max_valid < 0) 2600 max_valid = 0; 2601 2602 max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) - 2603 idev->desync_factor - age; 2604 if (max_prefered < 0) 2605 max_prefered = 0; 2606 2607 if (valid_lft > max_valid) 2608 valid_lft = max_valid; 2609 2610 if (prefered_lft > max_prefered) 2611 prefered_lft = max_prefered; 2612 2613 spin_lock(&ift->lock); 2614 flags = ift->flags; 2615 ift->valid_lft = valid_lft; 2616 ift->prefered_lft = prefered_lft; 2617 ift->tstamp = now; 2618 if (prefered_lft > 0) 2619 ift->flags &= ~IFA_F_DEPRECATED; 2620 2621 spin_unlock(&ift->lock); 2622 if (!(flags&IFA_F_TENTATIVE)) 2623 ipv6_ifa_notify(0, ift); 2624 } 2625 2626 /* Also create a temporary address if it's enabled but no temporary 2627 * address currently exists. 2628 * However, we get called with valid_lft == 0, prefered_lft == 0, create == false 2629 * as part of cleanup (ie. deleting the mngtmpaddr). 2630 * We don't want that to result in creating a new temporary ip address. 2631 */ 2632 if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft)) 2633 create = true; 2634 2635 if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) { 2636 /* When a new public address is created as described 2637 * in [ADDRCONF], also create a new temporary address. 2638 */ 2639 read_unlock_bh(&idev->lock); 2640 ipv6_create_tempaddr(ifp, false); 2641 } else { 2642 read_unlock_bh(&idev->lock); 2643 } 2644 } 2645 2646 static bool is_addr_mode_generate_stable(struct inet6_dev *idev) 2647 { 2648 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY || 2649 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; 2650 } 2651 2652 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, 2653 const struct prefix_info *pinfo, 2654 struct inet6_dev *in6_dev, 2655 const struct in6_addr *addr, int addr_type, 2656 u32 addr_flags, bool sllao, bool tokenized, 2657 __u32 valid_lft, u32 prefered_lft) 2658 { 2659 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); 2660 int create = 0, update_lft = 0; 2661 2662 if (!ifp && valid_lft) { 2663 int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses); 2664 struct ifa6_config cfg = { 2665 .pfx = addr, 2666 .plen = pinfo->prefix_len, 2667 .ifa_flags = addr_flags, 2668 .valid_lft = valid_lft, 2669 .preferred_lft = prefered_lft, 2670 .scope = addr_type & IPV6_ADDR_SCOPE_MASK, 2671 .ifa_proto = IFAPROT_KERNEL_RA 2672 }; 2673 2674 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2675 if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) || 2676 READ_ONCE(in6_dev->cnf.optimistic_dad)) && 2677 !net->ipv6.devconf_all->forwarding && sllao) 2678 cfg.ifa_flags |= IFA_F_OPTIMISTIC; 2679 #endif 2680 2681 /* Do not allow to create too much of autoconfigured 2682 * addresses; this would be too easy way to crash kernel. 2683 */ 2684 if (!max_addresses || 2685 ipv6_count_addresses(in6_dev) < max_addresses) 2686 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL); 2687 2688 if (IS_ERR_OR_NULL(ifp)) 2689 return -1; 2690 2691 create = 1; 2692 spin_lock_bh(&ifp->lock); 2693 ifp->flags |= IFA_F_MANAGETEMPADDR; 2694 ifp->cstamp = jiffies; 2695 ifp->tokenized = tokenized; 2696 spin_unlock_bh(&ifp->lock); 2697 addrconf_dad_start(ifp); 2698 } 2699 2700 if (ifp) { 2701 u32 flags; 2702 unsigned long now; 2703 u32 stored_lft; 2704 2705 /* update lifetime (RFC2462 5.5.3 e) */ 2706 spin_lock_bh(&ifp->lock); 2707 now = jiffies; 2708 if (ifp->valid_lft > (now - ifp->tstamp) / HZ) 2709 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; 2710 else 2711 stored_lft = 0; 2712 2713 /* RFC4862 Section 5.5.3e: 2714 * "Note that the preferred lifetime of the 2715 * corresponding address is always reset to 2716 * the Preferred Lifetime in the received 2717 * Prefix Information option, regardless of 2718 * whether the valid lifetime is also reset or 2719 * ignored." 2720 * 2721 * So we should always update prefered_lft here. 2722 */ 2723 update_lft = !create && stored_lft; 2724 2725 if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) { 2726 const u32 minimum_lft = min_t(u32, 2727 stored_lft, MIN_VALID_LIFETIME); 2728 valid_lft = max(valid_lft, minimum_lft); 2729 } 2730 2731 if (update_lft) { 2732 ifp->valid_lft = valid_lft; 2733 ifp->prefered_lft = prefered_lft; 2734 WRITE_ONCE(ifp->tstamp, now); 2735 flags = ifp->flags; 2736 ifp->flags &= ~IFA_F_DEPRECATED; 2737 spin_unlock_bh(&ifp->lock); 2738 2739 if (!(flags&IFA_F_TENTATIVE)) 2740 ipv6_ifa_notify(0, ifp); 2741 } else 2742 spin_unlock_bh(&ifp->lock); 2743 2744 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft, 2745 create, now); 2746 2747 in6_ifa_put(ifp); 2748 addrconf_verify(net); 2749 } 2750 2751 return 0; 2752 } 2753 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr); 2754 2755 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) 2756 { 2757 struct prefix_info *pinfo; 2758 struct fib6_table *table; 2759 __u32 valid_lft; 2760 __u32 prefered_lft; 2761 int addr_type, err; 2762 u32 addr_flags = 0; 2763 struct inet6_dev *in6_dev; 2764 struct net *net = dev_net(dev); 2765 2766 pinfo = (struct prefix_info *) opt; 2767 2768 if (len < sizeof(struct prefix_info)) { 2769 netdev_dbg(dev, "addrconf: prefix option too short\n"); 2770 return; 2771 } 2772 2773 /* 2774 * Validation checks ([ADDRCONF], page 19) 2775 */ 2776 2777 addr_type = ipv6_addr_type(&pinfo->prefix); 2778 2779 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)) 2780 return; 2781 2782 valid_lft = ntohl(pinfo->valid); 2783 prefered_lft = ntohl(pinfo->prefered); 2784 2785 if (prefered_lft > valid_lft) { 2786 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n"); 2787 return; 2788 } 2789 2790 in6_dev = in6_dev_get(dev); 2791 2792 if (!in6_dev) { 2793 net_dbg_ratelimited("addrconf: device %s not configured\n", 2794 dev->name); 2795 return; 2796 } 2797 2798 if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft) 2799 goto put; 2800 2801 /* 2802 * Two things going on here: 2803 * 1) Add routes for on-link prefixes 2804 * 2) Configure prefixes with the auto flag set 2805 */ 2806 2807 if (pinfo->onlink) { 2808 struct fib6_info *rt; 2809 unsigned long rt_expires; 2810 2811 /* Avoid arithmetic overflow. Really, we could 2812 * save rt_expires in seconds, likely valid_lft, 2813 * but it would require division in fib gc, that it 2814 * not good. 2815 */ 2816 if (HZ > USER_HZ) 2817 rt_expires = addrconf_timeout_fixup(valid_lft, HZ); 2818 else 2819 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ); 2820 2821 if (addrconf_finite_timeout(rt_expires)) 2822 rt_expires *= HZ; 2823 2824 rt = addrconf_get_prefix_route(&pinfo->prefix, 2825 pinfo->prefix_len, 2826 dev, 2827 RTF_ADDRCONF | RTF_PREFIX_RT, 2828 RTF_DEFAULT, true); 2829 2830 if (rt) { 2831 /* Autoconf prefix route */ 2832 if (valid_lft == 0) { 2833 ip6_del_rt(net, rt, false); 2834 rt = NULL; 2835 } else { 2836 table = rt->fib6_table; 2837 spin_lock_bh(&table->tb6_lock); 2838 2839 if (addrconf_finite_timeout(rt_expires)) { 2840 /* not infinity */ 2841 fib6_set_expires(rt, jiffies + rt_expires); 2842 fib6_add_gc_list(rt); 2843 } else { 2844 fib6_clean_expires(rt); 2845 fib6_remove_gc_list(rt); 2846 } 2847 2848 spin_unlock_bh(&table->tb6_lock); 2849 } 2850 } else if (valid_lft) { 2851 clock_t expires = 0; 2852 int flags = RTF_ADDRCONF | RTF_PREFIX_RT; 2853 if (addrconf_finite_timeout(rt_expires)) { 2854 /* not infinity */ 2855 flags |= RTF_EXPIRES; 2856 expires = jiffies_to_clock_t(rt_expires); 2857 } 2858 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, 2859 0, dev, expires, flags, 2860 GFP_ATOMIC); 2861 } 2862 fib6_info_release(rt); 2863 } 2864 2865 /* Try to figure out our local address for this prefix */ 2866 2867 if (pinfo->autoconf && in6_dev->cnf.autoconf) { 2868 struct in6_addr addr; 2869 bool tokenized = false, dev_addr_generated = false; 2870 2871 if (pinfo->prefix_len == 64) { 2872 memcpy(&addr, &pinfo->prefix, 8); 2873 2874 if (!ipv6_addr_any(&in6_dev->token)) { 2875 read_lock_bh(&in6_dev->lock); 2876 memcpy(addr.s6_addr + 8, 2877 in6_dev->token.s6_addr + 8, 8); 2878 read_unlock_bh(&in6_dev->lock); 2879 tokenized = true; 2880 } else if (is_addr_mode_generate_stable(in6_dev) && 2881 !ipv6_generate_stable_address(&addr, 0, 2882 in6_dev)) { 2883 addr_flags |= IFA_F_STABLE_PRIVACY; 2884 goto ok; 2885 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) && 2886 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) { 2887 goto put; 2888 } else { 2889 dev_addr_generated = true; 2890 } 2891 goto ok; 2892 } 2893 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n", 2894 pinfo->prefix_len); 2895 goto put; 2896 2897 ok: 2898 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, 2899 &addr, addr_type, 2900 addr_flags, sllao, 2901 tokenized, valid_lft, 2902 prefered_lft); 2903 if (err) 2904 goto put; 2905 2906 /* Ignore error case here because previous prefix add addr was 2907 * successful which will be notified. 2908 */ 2909 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr, 2910 addr_type, addr_flags, sllao, 2911 tokenized, valid_lft, 2912 prefered_lft, 2913 dev_addr_generated); 2914 } 2915 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); 2916 put: 2917 in6_dev_put(in6_dev); 2918 } 2919 2920 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev, 2921 struct in6_ifreq *ireq) 2922 { 2923 struct ip_tunnel_parm_kern p = { }; 2924 int err; 2925 2926 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4)) 2927 return -EADDRNOTAVAIL; 2928 2929 p.iph.daddr = ireq->ifr6_addr.s6_addr32[3]; 2930 p.iph.version = 4; 2931 p.iph.ihl = 5; 2932 p.iph.protocol = IPPROTO_IPV6; 2933 p.iph.ttl = 64; 2934 2935 if (!dev->netdev_ops->ndo_tunnel_ctl) 2936 return -EOPNOTSUPP; 2937 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL); 2938 if (err) 2939 return err; 2940 2941 dev = __dev_get_by_name(net, p.name); 2942 if (!dev) 2943 return -ENOBUFS; 2944 return dev_open(dev, NULL); 2945 } 2946 2947 /* 2948 * Set destination address. 2949 * Special case for SIT interfaces where we create a new "virtual" 2950 * device. 2951 */ 2952 int addrconf_set_dstaddr(struct net *net, void __user *arg) 2953 { 2954 struct net_device *dev; 2955 struct in6_ifreq ireq; 2956 int err = -ENODEV; 2957 2958 if (!IS_ENABLED(CONFIG_IPV6_SIT)) 2959 return -ENODEV; 2960 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 2961 return -EFAULT; 2962 2963 rtnl_lock(); 2964 dev = __dev_get_by_index(net, ireq.ifr6_ifindex); 2965 if (dev && dev->type == ARPHRD_SIT) 2966 err = addrconf_set_sit_dstaddr(net, dev, &ireq); 2967 rtnl_unlock(); 2968 return err; 2969 } 2970 2971 static int ipv6_mc_config(struct sock *sk, bool join, 2972 const struct in6_addr *addr, int ifindex) 2973 { 2974 int ret; 2975 2976 ASSERT_RTNL(); 2977 2978 lock_sock(sk); 2979 if (join) 2980 ret = ipv6_sock_mc_join(sk, ifindex, addr); 2981 else 2982 ret = ipv6_sock_mc_drop(sk, ifindex, addr); 2983 release_sock(sk); 2984 2985 return ret; 2986 } 2987 2988 /* 2989 * Manual configuration of address on an interface 2990 */ 2991 static int inet6_addr_add(struct net *net, int ifindex, 2992 struct ifa6_config *cfg, 2993 struct netlink_ext_ack *extack) 2994 { 2995 struct inet6_ifaddr *ifp; 2996 struct inet6_dev *idev; 2997 struct net_device *dev; 2998 unsigned long timeout; 2999 clock_t expires; 3000 u32 flags; 3001 3002 ASSERT_RTNL(); 3003 3004 if (cfg->plen > 128) { 3005 NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length"); 3006 return -EINVAL; 3007 } 3008 3009 /* check the lifetime */ 3010 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft) { 3011 NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid"); 3012 return -EINVAL; 3013 } 3014 3015 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) { 3016 NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64"); 3017 return -EINVAL; 3018 } 3019 3020 dev = __dev_get_by_index(net, ifindex); 3021 if (!dev) 3022 return -ENODEV; 3023 3024 idev = addrconf_add_dev(dev); 3025 if (IS_ERR(idev)) { 3026 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); 3027 return PTR_ERR(idev); 3028 } 3029 3030 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) { 3031 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk, 3032 true, cfg->pfx, ifindex); 3033 3034 if (ret < 0) { 3035 NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed"); 3036 return ret; 3037 } 3038 } 3039 3040 cfg->scope = ipv6_addr_scope(cfg->pfx); 3041 3042 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ); 3043 if (addrconf_finite_timeout(timeout)) { 3044 expires = jiffies_to_clock_t(timeout * HZ); 3045 cfg->valid_lft = timeout; 3046 flags = RTF_EXPIRES; 3047 } else { 3048 expires = 0; 3049 flags = 0; 3050 cfg->ifa_flags |= IFA_F_PERMANENT; 3051 } 3052 3053 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ); 3054 if (addrconf_finite_timeout(timeout)) { 3055 if (timeout == 0) 3056 cfg->ifa_flags |= IFA_F_DEPRECATED; 3057 cfg->preferred_lft = timeout; 3058 } 3059 3060 ifp = ipv6_add_addr(idev, cfg, true, extack); 3061 if (!IS_ERR(ifp)) { 3062 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) { 3063 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 3064 ifp->rt_priority, dev, expires, 3065 flags, GFP_KERNEL); 3066 } 3067 3068 /* Send a netlink notification if DAD is enabled and 3069 * optimistic flag is not set 3070 */ 3071 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD))) 3072 ipv6_ifa_notify(0, ifp); 3073 /* 3074 * Note that section 3.1 of RFC 4429 indicates 3075 * that the Optimistic flag should not be set for 3076 * manually configured addresses 3077 */ 3078 addrconf_dad_start(ifp); 3079 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR) 3080 manage_tempaddrs(idev, ifp, cfg->valid_lft, 3081 cfg->preferred_lft, true, jiffies); 3082 in6_ifa_put(ifp); 3083 addrconf_verify_rtnl(net); 3084 return 0; 3085 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) { 3086 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false, 3087 cfg->pfx, ifindex); 3088 } 3089 3090 return PTR_ERR(ifp); 3091 } 3092 3093 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, 3094 const struct in6_addr *pfx, unsigned int plen, 3095 struct netlink_ext_ack *extack) 3096 { 3097 struct inet6_ifaddr *ifp; 3098 struct inet6_dev *idev; 3099 struct net_device *dev; 3100 3101 if (plen > 128) { 3102 NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length"); 3103 return -EINVAL; 3104 } 3105 3106 dev = __dev_get_by_index(net, ifindex); 3107 if (!dev) { 3108 NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface"); 3109 return -ENODEV; 3110 } 3111 3112 idev = __in6_dev_get(dev); 3113 if (!idev) { 3114 NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device"); 3115 return -ENXIO; 3116 } 3117 3118 read_lock_bh(&idev->lock); 3119 list_for_each_entry(ifp, &idev->addr_list, if_list) { 3120 if (ifp->prefix_len == plen && 3121 ipv6_addr_equal(pfx, &ifp->addr)) { 3122 in6_ifa_hold(ifp); 3123 read_unlock_bh(&idev->lock); 3124 3125 if (!(ifp->flags & IFA_F_TEMPORARY) && 3126 (ifa_flags & IFA_F_MANAGETEMPADDR)) 3127 manage_tempaddrs(idev, ifp, 0, 0, false, 3128 jiffies); 3129 ipv6_del_addr(ifp); 3130 addrconf_verify_rtnl(net); 3131 if (ipv6_addr_is_multicast(pfx)) { 3132 ipv6_mc_config(net->ipv6.mc_autojoin_sk, 3133 false, pfx, dev->ifindex); 3134 } 3135 return 0; 3136 } 3137 } 3138 read_unlock_bh(&idev->lock); 3139 3140 NL_SET_ERR_MSG_MOD(extack, "address not found"); 3141 return -EADDRNOTAVAIL; 3142 } 3143 3144 3145 int addrconf_add_ifaddr(struct net *net, void __user *arg) 3146 { 3147 struct ifa6_config cfg = { 3148 .ifa_flags = IFA_F_PERMANENT, 3149 .preferred_lft = INFINITY_LIFE_TIME, 3150 .valid_lft = INFINITY_LIFE_TIME, 3151 }; 3152 struct in6_ifreq ireq; 3153 int err; 3154 3155 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3156 return -EPERM; 3157 3158 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 3159 return -EFAULT; 3160 3161 cfg.pfx = &ireq.ifr6_addr; 3162 cfg.plen = ireq.ifr6_prefixlen; 3163 3164 rtnl_lock(); 3165 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL); 3166 rtnl_unlock(); 3167 return err; 3168 } 3169 3170 int addrconf_del_ifaddr(struct net *net, void __user *arg) 3171 { 3172 struct in6_ifreq ireq; 3173 int err; 3174 3175 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3176 return -EPERM; 3177 3178 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 3179 return -EFAULT; 3180 3181 rtnl_lock(); 3182 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr, 3183 ireq.ifr6_prefixlen, NULL); 3184 rtnl_unlock(); 3185 return err; 3186 } 3187 3188 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, 3189 int plen, int scope, u8 proto) 3190 { 3191 struct inet6_ifaddr *ifp; 3192 struct ifa6_config cfg = { 3193 .pfx = addr, 3194 .plen = plen, 3195 .ifa_flags = IFA_F_PERMANENT, 3196 .valid_lft = INFINITY_LIFE_TIME, 3197 .preferred_lft = INFINITY_LIFE_TIME, 3198 .scope = scope, 3199 .ifa_proto = proto 3200 }; 3201 3202 ifp = ipv6_add_addr(idev, &cfg, true, NULL); 3203 if (!IS_ERR(ifp)) { 3204 spin_lock_bh(&ifp->lock); 3205 ifp->flags &= ~IFA_F_TENTATIVE; 3206 spin_unlock_bh(&ifp->lock); 3207 rt_genid_bump_ipv6(dev_net(idev->dev)); 3208 ipv6_ifa_notify(RTM_NEWADDR, ifp); 3209 in6_ifa_put(ifp); 3210 } 3211 } 3212 3213 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) 3214 static void add_v4_addrs(struct inet6_dev *idev) 3215 { 3216 struct in6_addr addr; 3217 struct net_device *dev; 3218 struct net *net = dev_net(idev->dev); 3219 int scope, plen, offset = 0; 3220 u32 pflags = 0; 3221 3222 ASSERT_RTNL(); 3223 3224 memset(&addr, 0, sizeof(struct in6_addr)); 3225 /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ 3226 if (idev->dev->addr_len == sizeof(struct in6_addr)) 3227 offset = sizeof(struct in6_addr) - 4; 3228 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); 3229 3230 if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { 3231 scope = IPV6_ADDR_COMPATv4; 3232 plen = 96; 3233 pflags |= RTF_NONEXTHOP; 3234 } else { 3235 if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE) 3236 return; 3237 3238 addr.s6_addr32[0] = htonl(0xfe800000); 3239 scope = IFA_LINK; 3240 plen = 64; 3241 } 3242 3243 if (addr.s6_addr32[3]) { 3244 add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC); 3245 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, 3246 GFP_KERNEL); 3247 return; 3248 } 3249 3250 for_each_netdev(net, dev) { 3251 struct in_device *in_dev = __in_dev_get_rtnl(dev); 3252 if (in_dev && (dev->flags & IFF_UP)) { 3253 struct in_ifaddr *ifa; 3254 int flag = scope; 3255 3256 in_dev_for_each_ifa_rtnl(ifa, in_dev) { 3257 addr.s6_addr32[3] = ifa->ifa_local; 3258 3259 if (ifa->ifa_scope == RT_SCOPE_LINK) 3260 continue; 3261 if (ifa->ifa_scope >= RT_SCOPE_HOST) { 3262 if (idev->dev->flags&IFF_POINTOPOINT) 3263 continue; 3264 flag |= IFA_HOST; 3265 } 3266 3267 add_addr(idev, &addr, plen, flag, 3268 IFAPROT_UNSPEC); 3269 addrconf_prefix_route(&addr, plen, 0, idev->dev, 3270 0, pflags, GFP_KERNEL); 3271 } 3272 } 3273 } 3274 } 3275 #endif 3276 3277 static void init_loopback(struct net_device *dev) 3278 { 3279 struct inet6_dev *idev; 3280 3281 /* ::1 */ 3282 3283 ASSERT_RTNL(); 3284 3285 idev = ipv6_find_idev(dev); 3286 if (IS_ERR(idev)) { 3287 pr_debug("%s: add_dev failed\n", __func__); 3288 return; 3289 } 3290 3291 add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO); 3292 } 3293 3294 void addrconf_add_linklocal(struct inet6_dev *idev, 3295 const struct in6_addr *addr, u32 flags) 3296 { 3297 struct ifa6_config cfg = { 3298 .pfx = addr, 3299 .plen = 64, 3300 .ifa_flags = flags | IFA_F_PERMANENT, 3301 .valid_lft = INFINITY_LIFE_TIME, 3302 .preferred_lft = INFINITY_LIFE_TIME, 3303 .scope = IFA_LINK, 3304 .ifa_proto = IFAPROT_KERNEL_LL 3305 }; 3306 struct inet6_ifaddr *ifp; 3307 3308 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 3309 if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) || 3310 READ_ONCE(idev->cnf.optimistic_dad)) && 3311 !dev_net(idev->dev)->ipv6.devconf_all->forwarding) 3312 cfg.ifa_flags |= IFA_F_OPTIMISTIC; 3313 #endif 3314 3315 ifp = ipv6_add_addr(idev, &cfg, true, NULL); 3316 if (!IS_ERR(ifp)) { 3317 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev, 3318 0, 0, GFP_ATOMIC); 3319 addrconf_dad_start(ifp); 3320 in6_ifa_put(ifp); 3321 } 3322 } 3323 EXPORT_SYMBOL_GPL(addrconf_add_linklocal); 3324 3325 static bool ipv6_reserved_interfaceid(struct in6_addr address) 3326 { 3327 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0) 3328 return true; 3329 3330 if (address.s6_addr32[2] == htonl(0x02005eff) && 3331 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000))) 3332 return true; 3333 3334 if (address.s6_addr32[2] == htonl(0xfdffffff) && 3335 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80))) 3336 return true; 3337 3338 return false; 3339 } 3340 3341 static int ipv6_generate_stable_address(struct in6_addr *address, 3342 u8 dad_count, 3343 const struct inet6_dev *idev) 3344 { 3345 static DEFINE_SPINLOCK(lock); 3346 static __u32 digest[SHA1_DIGEST_WORDS]; 3347 static __u32 workspace[SHA1_WORKSPACE_WORDS]; 3348 3349 static union { 3350 char __data[SHA1_BLOCK_SIZE]; 3351 struct { 3352 struct in6_addr secret; 3353 __be32 prefix[2]; 3354 unsigned char hwaddr[MAX_ADDR_LEN]; 3355 u8 dad_count; 3356 } __packed; 3357 } data; 3358 3359 struct in6_addr secret; 3360 struct in6_addr temp; 3361 struct net *net = dev_net(idev->dev); 3362 3363 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data)); 3364 3365 if (idev->cnf.stable_secret.initialized) 3366 secret = idev->cnf.stable_secret.secret; 3367 else if (net->ipv6.devconf_dflt->stable_secret.initialized) 3368 secret = net->ipv6.devconf_dflt->stable_secret.secret; 3369 else 3370 return -1; 3371 3372 retry: 3373 spin_lock_bh(&lock); 3374 3375 sha1_init(digest); 3376 memset(&data, 0, sizeof(data)); 3377 memset(workspace, 0, sizeof(workspace)); 3378 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len); 3379 data.prefix[0] = address->s6_addr32[0]; 3380 data.prefix[1] = address->s6_addr32[1]; 3381 data.secret = secret; 3382 data.dad_count = dad_count; 3383 3384 sha1_transform(digest, data.__data, workspace); 3385 3386 temp = *address; 3387 temp.s6_addr32[2] = (__force __be32)digest[0]; 3388 temp.s6_addr32[3] = (__force __be32)digest[1]; 3389 3390 spin_unlock_bh(&lock); 3391 3392 if (ipv6_reserved_interfaceid(temp)) { 3393 dad_count++; 3394 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries) 3395 return -1; 3396 goto retry; 3397 } 3398 3399 *address = temp; 3400 return 0; 3401 } 3402 3403 static void ipv6_gen_mode_random_init(struct inet6_dev *idev) 3404 { 3405 struct ipv6_stable_secret *s = &idev->cnf.stable_secret; 3406 3407 if (s->initialized) 3408 return; 3409 s = &idev->cnf.stable_secret; 3410 get_random_bytes(&s->secret, sizeof(s->secret)); 3411 s->initialized = true; 3412 } 3413 3414 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) 3415 { 3416 struct in6_addr addr; 3417 3418 /* no link local addresses on L3 master devices */ 3419 if (netif_is_l3_master(idev->dev)) 3420 return; 3421 3422 /* no link local addresses on devices flagged as slaves */ 3423 if (idev->dev->priv_flags & IFF_NO_ADDRCONF) 3424 return; 3425 3426 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 3427 3428 switch (idev->cnf.addr_gen_mode) { 3429 case IN6_ADDR_GEN_MODE_RANDOM: 3430 ipv6_gen_mode_random_init(idev); 3431 fallthrough; 3432 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY: 3433 if (!ipv6_generate_stable_address(&addr, 0, idev)) 3434 addrconf_add_linklocal(idev, &addr, 3435 IFA_F_STABLE_PRIVACY); 3436 else if (prefix_route) 3437 addrconf_prefix_route(&addr, 64, 0, idev->dev, 3438 0, 0, GFP_KERNEL); 3439 break; 3440 case IN6_ADDR_GEN_MODE_EUI64: 3441 /* addrconf_add_linklocal also adds a prefix_route and we 3442 * only need to care about prefix routes if ipv6_generate_eui64 3443 * couldn't generate one. 3444 */ 3445 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0) 3446 addrconf_add_linklocal(idev, &addr, 0); 3447 else if (prefix_route) 3448 addrconf_prefix_route(&addr, 64, 0, idev->dev, 3449 0, 0, GFP_KERNEL); 3450 break; 3451 case IN6_ADDR_GEN_MODE_NONE: 3452 default: 3453 /* will not add any link local address */ 3454 break; 3455 } 3456 } 3457 3458 static void addrconf_dev_config(struct net_device *dev) 3459 { 3460 struct inet6_dev *idev; 3461 3462 ASSERT_RTNL(); 3463 3464 if ((dev->type != ARPHRD_ETHER) && 3465 (dev->type != ARPHRD_FDDI) && 3466 (dev->type != ARPHRD_ARCNET) && 3467 (dev->type != ARPHRD_INFINIBAND) && 3468 (dev->type != ARPHRD_IEEE1394) && 3469 (dev->type != ARPHRD_TUNNEL6) && 3470 (dev->type != ARPHRD_6LOWPAN) && 3471 (dev->type != ARPHRD_TUNNEL) && 3472 (dev->type != ARPHRD_NONE) && 3473 (dev->type != ARPHRD_RAWIP)) { 3474 /* Alas, we support only Ethernet autoconfiguration. */ 3475 idev = __in6_dev_get(dev); 3476 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP && 3477 dev->flags & IFF_MULTICAST) 3478 ipv6_mc_up(idev); 3479 return; 3480 } 3481 3482 idev = addrconf_add_dev(dev); 3483 if (IS_ERR(idev)) 3484 return; 3485 3486 /* this device type has no EUI support */ 3487 if (dev->type == ARPHRD_NONE && 3488 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) 3489 WRITE_ONCE(idev->cnf.addr_gen_mode, 3490 IN6_ADDR_GEN_MODE_RANDOM); 3491 3492 addrconf_addr_gen(idev, false); 3493 } 3494 3495 #if IS_ENABLED(CONFIG_IPV6_SIT) 3496 static void addrconf_sit_config(struct net_device *dev) 3497 { 3498 struct inet6_dev *idev; 3499 3500 ASSERT_RTNL(); 3501 3502 /* 3503 * Configure the tunnel with one of our IPv4 3504 * addresses... we should configure all of 3505 * our v4 addrs in the tunnel 3506 */ 3507 3508 idev = ipv6_find_idev(dev); 3509 if (IS_ERR(idev)) { 3510 pr_debug("%s: add_dev failed\n", __func__); 3511 return; 3512 } 3513 3514 if (dev->priv_flags & IFF_ISATAP) { 3515 addrconf_addr_gen(idev, false); 3516 return; 3517 } 3518 3519 add_v4_addrs(idev); 3520 3521 if (dev->flags&IFF_POINTOPOINT) 3522 addrconf_add_mroute(dev); 3523 } 3524 #endif 3525 3526 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) 3527 static void addrconf_gre_config(struct net_device *dev) 3528 { 3529 struct inet6_dev *idev; 3530 3531 ASSERT_RTNL(); 3532 3533 idev = ipv6_find_idev(dev); 3534 if (IS_ERR(idev)) { 3535 pr_debug("%s: add_dev failed\n", __func__); 3536 return; 3537 } 3538 3539 if (dev->type == ARPHRD_ETHER) { 3540 addrconf_addr_gen(idev, true); 3541 return; 3542 } 3543 3544 add_v4_addrs(idev); 3545 3546 if (dev->flags & IFF_POINTOPOINT) 3547 addrconf_add_mroute(dev); 3548 } 3549 #endif 3550 3551 static void addrconf_init_auto_addrs(struct net_device *dev) 3552 { 3553 switch (dev->type) { 3554 #if IS_ENABLED(CONFIG_IPV6_SIT) 3555 case ARPHRD_SIT: 3556 addrconf_sit_config(dev); 3557 break; 3558 #endif 3559 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE) 3560 case ARPHRD_IP6GRE: 3561 case ARPHRD_IPGRE: 3562 addrconf_gre_config(dev); 3563 break; 3564 #endif 3565 case ARPHRD_LOOPBACK: 3566 init_loopback(dev); 3567 break; 3568 3569 default: 3570 addrconf_dev_config(dev); 3571 break; 3572 } 3573 } 3574 3575 static int fixup_permanent_addr(struct net *net, 3576 struct inet6_dev *idev, 3577 struct inet6_ifaddr *ifp) 3578 { 3579 /* !fib6_node means the host route was removed from the 3580 * FIB, for example, if 'lo' device is taken down. In that 3581 * case regenerate the host route. 3582 */ 3583 if (!ifp->rt || !ifp->rt->fib6_node) { 3584 struct fib6_info *f6i, *prev; 3585 3586 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false, 3587 GFP_ATOMIC, NULL); 3588 if (IS_ERR(f6i)) 3589 return PTR_ERR(f6i); 3590 3591 /* ifp->rt can be accessed outside of rtnl */ 3592 spin_lock(&ifp->lock); 3593 prev = ifp->rt; 3594 ifp->rt = f6i; 3595 spin_unlock(&ifp->lock); 3596 3597 fib6_info_release(prev); 3598 } 3599 3600 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) { 3601 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 3602 ifp->rt_priority, idev->dev, 0, 0, 3603 GFP_ATOMIC); 3604 } 3605 3606 if (ifp->state == INET6_IFADDR_STATE_PREDAD) 3607 addrconf_dad_start(ifp); 3608 3609 return 0; 3610 } 3611 3612 static void addrconf_permanent_addr(struct net *net, struct net_device *dev) 3613 { 3614 struct inet6_ifaddr *ifp, *tmp; 3615 struct inet6_dev *idev; 3616 3617 idev = __in6_dev_get(dev); 3618 if (!idev) 3619 return; 3620 3621 write_lock_bh(&idev->lock); 3622 3623 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { 3624 if ((ifp->flags & IFA_F_PERMANENT) && 3625 fixup_permanent_addr(net, idev, ifp) < 0) { 3626 write_unlock_bh(&idev->lock); 3627 in6_ifa_hold(ifp); 3628 ipv6_del_addr(ifp); 3629 write_lock_bh(&idev->lock); 3630 3631 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n", 3632 idev->dev->name, &ifp->addr); 3633 } 3634 } 3635 3636 write_unlock_bh(&idev->lock); 3637 } 3638 3639 static int addrconf_notify(struct notifier_block *this, unsigned long event, 3640 void *ptr) 3641 { 3642 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3643 struct netdev_notifier_change_info *change_info; 3644 struct netdev_notifier_changeupper_info *info; 3645 struct inet6_dev *idev = __in6_dev_get(dev); 3646 struct net *net = dev_net(dev); 3647 int run_pending = 0; 3648 int err; 3649 3650 switch (event) { 3651 case NETDEV_REGISTER: 3652 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 3653 idev = ipv6_add_dev(dev); 3654 if (IS_ERR(idev)) 3655 return notifier_from_errno(PTR_ERR(idev)); 3656 } 3657 break; 3658 3659 case NETDEV_CHANGEMTU: 3660 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3661 if (dev->mtu < IPV6_MIN_MTU) { 3662 addrconf_ifdown(dev, dev != net->loopback_dev); 3663 break; 3664 } 3665 3666 if (idev) { 3667 rt6_mtu_change(dev, dev->mtu); 3668 WRITE_ONCE(idev->cnf.mtu6, dev->mtu); 3669 break; 3670 } 3671 3672 /* allocate new idev */ 3673 idev = ipv6_add_dev(dev); 3674 if (IS_ERR(idev)) 3675 break; 3676 3677 /* device is still not ready */ 3678 if (!(idev->if_flags & IF_READY)) 3679 break; 3680 3681 run_pending = 1; 3682 fallthrough; 3683 case NETDEV_UP: 3684 case NETDEV_CHANGE: 3685 if (idev && idev->cnf.disable_ipv6) 3686 break; 3687 3688 if (dev->priv_flags & IFF_NO_ADDRCONF) { 3689 if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) && 3690 dev->flags & IFF_UP && dev->flags & IFF_MULTICAST) 3691 ipv6_mc_up(idev); 3692 break; 3693 } 3694 3695 if (event == NETDEV_UP) { 3696 /* restore routes for permanent addresses */ 3697 addrconf_permanent_addr(net, dev); 3698 3699 if (!addrconf_link_ready(dev)) { 3700 /* device is not ready yet. */ 3701 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n", 3702 dev->name); 3703 break; 3704 } 3705 3706 if (!idev && dev->mtu >= IPV6_MIN_MTU) 3707 idev = ipv6_add_dev(dev); 3708 3709 if (!IS_ERR_OR_NULL(idev)) { 3710 idev->if_flags |= IF_READY; 3711 run_pending = 1; 3712 } 3713 } else if (event == NETDEV_CHANGE) { 3714 if (!addrconf_link_ready(dev)) { 3715 /* device is still not ready. */ 3716 rt6_sync_down_dev(dev, event); 3717 break; 3718 } 3719 3720 if (!IS_ERR_OR_NULL(idev)) { 3721 if (idev->if_flags & IF_READY) { 3722 /* device is already configured - 3723 * but resend MLD reports, we might 3724 * have roamed and need to update 3725 * multicast snooping switches 3726 */ 3727 ipv6_mc_up(idev); 3728 change_info = ptr; 3729 if (change_info->flags_changed & IFF_NOARP) 3730 addrconf_dad_run(idev, true); 3731 rt6_sync_up(dev, RTNH_F_LINKDOWN); 3732 break; 3733 } 3734 idev->if_flags |= IF_READY; 3735 } 3736 3737 pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", 3738 dev->name); 3739 3740 run_pending = 1; 3741 } 3742 3743 addrconf_init_auto_addrs(dev); 3744 3745 if (!IS_ERR_OR_NULL(idev)) { 3746 if (run_pending) 3747 addrconf_dad_run(idev, false); 3748 3749 /* Device has an address by now */ 3750 rt6_sync_up(dev, RTNH_F_DEAD); 3751 3752 /* 3753 * If the MTU changed during the interface down, 3754 * when the interface up, the changed MTU must be 3755 * reflected in the idev as well as routers. 3756 */ 3757 if (idev->cnf.mtu6 != dev->mtu && 3758 dev->mtu >= IPV6_MIN_MTU) { 3759 rt6_mtu_change(dev, dev->mtu); 3760 WRITE_ONCE(idev->cnf.mtu6, dev->mtu); 3761 } 3762 WRITE_ONCE(idev->tstamp, jiffies); 3763 inet6_ifinfo_notify(RTM_NEWLINK, idev); 3764 3765 /* 3766 * If the changed mtu during down is lower than 3767 * IPV6_MIN_MTU stop IPv6 on this interface. 3768 */ 3769 if (dev->mtu < IPV6_MIN_MTU) 3770 addrconf_ifdown(dev, dev != net->loopback_dev); 3771 } 3772 break; 3773 3774 case NETDEV_DOWN: 3775 case NETDEV_UNREGISTER: 3776 /* 3777 * Remove all addresses from this interface. 3778 */ 3779 addrconf_ifdown(dev, event != NETDEV_DOWN); 3780 break; 3781 3782 case NETDEV_CHANGENAME: 3783 if (idev) { 3784 snmp6_unregister_dev(idev); 3785 addrconf_sysctl_unregister(idev); 3786 err = addrconf_sysctl_register(idev); 3787 if (err) 3788 return notifier_from_errno(err); 3789 err = snmp6_register_dev(idev); 3790 if (err) { 3791 addrconf_sysctl_unregister(idev); 3792 return notifier_from_errno(err); 3793 } 3794 } 3795 break; 3796 3797 case NETDEV_PRE_TYPE_CHANGE: 3798 case NETDEV_POST_TYPE_CHANGE: 3799 if (idev) 3800 addrconf_type_change(dev, event); 3801 break; 3802 3803 case NETDEV_CHANGEUPPER: 3804 info = ptr; 3805 3806 /* flush all routes if dev is linked to or unlinked from 3807 * an L3 master device (e.g., VRF) 3808 */ 3809 if (info->upper_dev && netif_is_l3_master(info->upper_dev)) 3810 addrconf_ifdown(dev, false); 3811 } 3812 3813 return NOTIFY_OK; 3814 } 3815 3816 /* 3817 * addrconf module should be notified of a device going up 3818 */ 3819 static struct notifier_block ipv6_dev_notf = { 3820 .notifier_call = addrconf_notify, 3821 .priority = ADDRCONF_NOTIFY_PRIORITY, 3822 }; 3823 3824 static void addrconf_type_change(struct net_device *dev, unsigned long event) 3825 { 3826 struct inet6_dev *idev; 3827 ASSERT_RTNL(); 3828 3829 idev = __in6_dev_get(dev); 3830 3831 if (event == NETDEV_POST_TYPE_CHANGE) 3832 ipv6_mc_remap(idev); 3833 else if (event == NETDEV_PRE_TYPE_CHANGE) 3834 ipv6_mc_unmap(idev); 3835 } 3836 3837 static bool addr_is_local(const struct in6_addr *addr) 3838 { 3839 return ipv6_addr_type(addr) & 3840 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 3841 } 3842 3843 static int addrconf_ifdown(struct net_device *dev, bool unregister) 3844 { 3845 unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN; 3846 struct net *net = dev_net(dev); 3847 struct inet6_dev *idev; 3848 struct inet6_ifaddr *ifa; 3849 LIST_HEAD(tmp_addr_list); 3850 bool keep_addr = false; 3851 bool was_ready; 3852 int state, i; 3853 3854 ASSERT_RTNL(); 3855 3856 rt6_disable_ip(dev, event); 3857 3858 idev = __in6_dev_get(dev); 3859 if (!idev) 3860 return -ENODEV; 3861 3862 /* 3863 * Step 1: remove reference to ipv6 device from parent device. 3864 * Do not dev_put! 3865 */ 3866 if (unregister) { 3867 idev->dead = 1; 3868 3869 /* protected by rtnl_lock */ 3870 RCU_INIT_POINTER(dev->ip6_ptr, NULL); 3871 3872 /* Step 1.5: remove snmp6 entry */ 3873 snmp6_unregister_dev(idev); 3874 3875 } 3876 3877 /* combine the user config with event to determine if permanent 3878 * addresses are to be removed from address hash table 3879 */ 3880 if (!unregister && !idev->cnf.disable_ipv6) { 3881 /* aggregate the system setting and interface setting */ 3882 int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down); 3883 3884 if (!_keep_addr) 3885 _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down); 3886 3887 keep_addr = (_keep_addr > 0); 3888 } 3889 3890 /* Step 2: clear hash table */ 3891 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3892 struct hlist_head *h = &net->ipv6.inet6_addr_lst[i]; 3893 3894 spin_lock_bh(&net->ipv6.addrconf_hash_lock); 3895 restart: 3896 hlist_for_each_entry_rcu(ifa, h, addr_lst) { 3897 if (ifa->idev == idev) { 3898 addrconf_del_dad_work(ifa); 3899 /* combined flag + permanent flag decide if 3900 * address is retained on a down event 3901 */ 3902 if (!keep_addr || 3903 !(ifa->flags & IFA_F_PERMANENT) || 3904 addr_is_local(&ifa->addr)) { 3905 hlist_del_init_rcu(&ifa->addr_lst); 3906 goto restart; 3907 } 3908 } 3909 } 3910 spin_unlock_bh(&net->ipv6.addrconf_hash_lock); 3911 } 3912 3913 write_lock_bh(&idev->lock); 3914 3915 addrconf_del_rs_timer(idev); 3916 3917 /* Step 2: clear flags for stateless addrconf, repeated down 3918 * detection 3919 */ 3920 was_ready = idev->if_flags & IF_READY; 3921 if (!unregister) 3922 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); 3923 3924 /* Step 3: clear tempaddr list */ 3925 while (!list_empty(&idev->tempaddr_list)) { 3926 ifa = list_first_entry(&idev->tempaddr_list, 3927 struct inet6_ifaddr, tmp_list); 3928 list_del(&ifa->tmp_list); 3929 write_unlock_bh(&idev->lock); 3930 spin_lock_bh(&ifa->lock); 3931 3932 if (ifa->ifpub) { 3933 in6_ifa_put(ifa->ifpub); 3934 ifa->ifpub = NULL; 3935 } 3936 spin_unlock_bh(&ifa->lock); 3937 in6_ifa_put(ifa); 3938 write_lock_bh(&idev->lock); 3939 } 3940 3941 list_for_each_entry(ifa, &idev->addr_list, if_list) 3942 list_add_tail(&ifa->if_list_aux, &tmp_addr_list); 3943 write_unlock_bh(&idev->lock); 3944 3945 while (!list_empty(&tmp_addr_list)) { 3946 struct fib6_info *rt = NULL; 3947 bool keep; 3948 3949 ifa = list_first_entry(&tmp_addr_list, 3950 struct inet6_ifaddr, if_list_aux); 3951 list_del(&ifa->if_list_aux); 3952 3953 addrconf_del_dad_work(ifa); 3954 3955 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3956 !addr_is_local(&ifa->addr); 3957 3958 spin_lock_bh(&ifa->lock); 3959 3960 if (keep) { 3961 /* set state to skip the notifier below */ 3962 state = INET6_IFADDR_STATE_DEAD; 3963 ifa->state = INET6_IFADDR_STATE_PREDAD; 3964 if (!(ifa->flags & IFA_F_NODAD)) 3965 ifa->flags |= IFA_F_TENTATIVE; 3966 3967 rt = ifa->rt; 3968 ifa->rt = NULL; 3969 } else { 3970 state = ifa->state; 3971 ifa->state = INET6_IFADDR_STATE_DEAD; 3972 } 3973 3974 spin_unlock_bh(&ifa->lock); 3975 3976 if (rt) 3977 ip6_del_rt(net, rt, false); 3978 3979 if (state != INET6_IFADDR_STATE_DEAD) { 3980 __ipv6_ifa_notify(RTM_DELADDR, ifa); 3981 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); 3982 } else { 3983 if (idev->cnf.forwarding) 3984 addrconf_leave_anycast(ifa); 3985 addrconf_leave_solict(ifa->idev, &ifa->addr); 3986 } 3987 3988 if (!keep) { 3989 write_lock_bh(&idev->lock); 3990 list_del_rcu(&ifa->if_list); 3991 write_unlock_bh(&idev->lock); 3992 in6_ifa_put(ifa); 3993 } 3994 } 3995 3996 /* Step 5: Discard anycast and multicast list */ 3997 if (unregister) { 3998 ipv6_ac_destroy_dev(idev); 3999 ipv6_mc_destroy_dev(idev); 4000 } else if (was_ready) { 4001 ipv6_mc_down(idev); 4002 } 4003 4004 WRITE_ONCE(idev->tstamp, jiffies); 4005 idev->ra_mtu = 0; 4006 4007 /* Last: Shot the device (if unregistered) */ 4008 if (unregister) { 4009 addrconf_sysctl_unregister(idev); 4010 neigh_parms_release(&nd_tbl, idev->nd_parms); 4011 neigh_ifdown(&nd_tbl, dev); 4012 in6_dev_put(idev); 4013 } 4014 return 0; 4015 } 4016 4017 static void addrconf_rs_timer(struct timer_list *t) 4018 { 4019 struct inet6_dev *idev = from_timer(idev, t, rs_timer); 4020 struct net_device *dev = idev->dev; 4021 struct in6_addr lladdr; 4022 int rtr_solicits; 4023 4024 write_lock(&idev->lock); 4025 if (idev->dead || !(idev->if_flags & IF_READY)) 4026 goto out; 4027 4028 if (!ipv6_accept_ra(idev)) 4029 goto out; 4030 4031 /* Announcement received after solicitation was sent */ 4032 if (idev->if_flags & IF_RA_RCVD) 4033 goto out; 4034 4035 rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits); 4036 4037 if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) { 4038 write_unlock(&idev->lock); 4039 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) 4040 ndisc_send_rs(dev, &lladdr, 4041 &in6addr_linklocal_allrouters); 4042 else 4043 goto put; 4044 4045 write_lock(&idev->lock); 4046 idev->rs_interval = rfc3315_s14_backoff_update( 4047 idev->rs_interval, 4048 READ_ONCE(idev->cnf.rtr_solicit_max_interval)); 4049 /* The wait after the last probe can be shorter */ 4050 addrconf_mod_rs_timer(idev, (idev->rs_probes == 4051 READ_ONCE(idev->cnf.rtr_solicits)) ? 4052 READ_ONCE(idev->cnf.rtr_solicit_delay) : 4053 idev->rs_interval); 4054 } else { 4055 /* 4056 * Note: we do not support deprecated "all on-link" 4057 * assumption any longer. 4058 */ 4059 pr_debug("%s: no IPv6 routers present\n", idev->dev->name); 4060 } 4061 4062 out: 4063 write_unlock(&idev->lock); 4064 put: 4065 in6_dev_put(idev); 4066 } 4067 4068 /* 4069 * Duplicate Address Detection 4070 */ 4071 static void addrconf_dad_kick(struct inet6_ifaddr *ifp) 4072 { 4073 struct inet6_dev *idev = ifp->idev; 4074 unsigned long rand_num; 4075 u64 nonce; 4076 4077 if (ifp->flags & IFA_F_OPTIMISTIC) 4078 rand_num = 0; 4079 else 4080 rand_num = get_random_u32_below( 4081 READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1); 4082 4083 nonce = 0; 4084 if (READ_ONCE(idev->cnf.enhanced_dad) || 4085 READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) { 4086 do 4087 get_random_bytes(&nonce, 6); 4088 while (nonce == 0); 4089 } 4090 ifp->dad_nonce = nonce; 4091 ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits); 4092 addrconf_mod_dad_work(ifp, rand_num); 4093 } 4094 4095 static void addrconf_dad_begin(struct inet6_ifaddr *ifp) 4096 { 4097 struct inet6_dev *idev = ifp->idev; 4098 struct net_device *dev = idev->dev; 4099 bool bump_id, notify = false; 4100 struct net *net; 4101 4102 addrconf_join_solict(dev, &ifp->addr); 4103 4104 read_lock_bh(&idev->lock); 4105 spin_lock(&ifp->lock); 4106 if (ifp->state == INET6_IFADDR_STATE_DEAD) 4107 goto out; 4108 4109 net = dev_net(dev); 4110 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 4111 (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 && 4112 READ_ONCE(idev->cnf.accept_dad) < 1) || 4113 !(ifp->flags&IFA_F_TENTATIVE) || 4114 ifp->flags & IFA_F_NODAD) { 4115 bool send_na = false; 4116 4117 if (ifp->flags & IFA_F_TENTATIVE && 4118 !(ifp->flags & IFA_F_OPTIMISTIC)) 4119 send_na = true; 4120 bump_id = ifp->flags & IFA_F_TENTATIVE; 4121 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 4122 spin_unlock(&ifp->lock); 4123 read_unlock_bh(&idev->lock); 4124 4125 addrconf_dad_completed(ifp, bump_id, send_na); 4126 return; 4127 } 4128 4129 if (!(idev->if_flags & IF_READY)) { 4130 spin_unlock(&ifp->lock); 4131 read_unlock_bh(&idev->lock); 4132 /* 4133 * If the device is not ready: 4134 * - keep it tentative if it is a permanent address. 4135 * - otherwise, kill it. 4136 */ 4137 in6_ifa_hold(ifp); 4138 addrconf_dad_stop(ifp, 0); 4139 return; 4140 } 4141 4142 /* 4143 * Optimistic nodes can start receiving 4144 * Frames right away 4145 */ 4146 if (ifp->flags & IFA_F_OPTIMISTIC) { 4147 ip6_ins_rt(net, ifp->rt); 4148 if (ipv6_use_optimistic_addr(net, idev)) { 4149 /* Because optimistic nodes can use this address, 4150 * notify listeners. If DAD fails, RTM_DELADDR is sent. 4151 */ 4152 notify = true; 4153 } 4154 } 4155 4156 addrconf_dad_kick(ifp); 4157 out: 4158 spin_unlock(&ifp->lock); 4159 read_unlock_bh(&idev->lock); 4160 if (notify) 4161 ipv6_ifa_notify(RTM_NEWADDR, ifp); 4162 } 4163 4164 static void addrconf_dad_start(struct inet6_ifaddr *ifp) 4165 { 4166 bool begin_dad = false; 4167 4168 spin_lock_bh(&ifp->lock); 4169 if (ifp->state != INET6_IFADDR_STATE_DEAD) { 4170 ifp->state = INET6_IFADDR_STATE_PREDAD; 4171 begin_dad = true; 4172 } 4173 spin_unlock_bh(&ifp->lock); 4174 4175 if (begin_dad) 4176 addrconf_mod_dad_work(ifp, 0); 4177 } 4178 4179 static void addrconf_dad_work(struct work_struct *w) 4180 { 4181 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), 4182 struct inet6_ifaddr, 4183 dad_work); 4184 struct inet6_dev *idev = ifp->idev; 4185 bool bump_id, disable_ipv6 = false; 4186 struct in6_addr mcaddr; 4187 4188 enum { 4189 DAD_PROCESS, 4190 DAD_BEGIN, 4191 DAD_ABORT, 4192 } action = DAD_PROCESS; 4193 4194 rtnl_lock(); 4195 4196 spin_lock_bh(&ifp->lock); 4197 if (ifp->state == INET6_IFADDR_STATE_PREDAD) { 4198 action = DAD_BEGIN; 4199 ifp->state = INET6_IFADDR_STATE_DAD; 4200 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { 4201 action = DAD_ABORT; 4202 ifp->state = INET6_IFADDR_STATE_POSTDAD; 4203 4204 if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 || 4205 READ_ONCE(idev->cnf.accept_dad) > 1) && 4206 !idev->cnf.disable_ipv6 && 4207 !(ifp->flags & IFA_F_STABLE_PRIVACY)) { 4208 struct in6_addr addr; 4209 4210 addr.s6_addr32[0] = htonl(0xfe800000); 4211 addr.s6_addr32[1] = 0; 4212 4213 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && 4214 ipv6_addr_equal(&ifp->addr, &addr)) { 4215 /* DAD failed for link-local based on MAC */ 4216 WRITE_ONCE(idev->cnf.disable_ipv6, 1); 4217 4218 pr_info("%s: IPv6 being disabled!\n", 4219 ifp->idev->dev->name); 4220 disable_ipv6 = true; 4221 } 4222 } 4223 } 4224 spin_unlock_bh(&ifp->lock); 4225 4226 if (action == DAD_BEGIN) { 4227 addrconf_dad_begin(ifp); 4228 goto out; 4229 } else if (action == DAD_ABORT) { 4230 in6_ifa_hold(ifp); 4231 addrconf_dad_stop(ifp, 1); 4232 if (disable_ipv6) 4233 addrconf_ifdown(idev->dev, false); 4234 goto out; 4235 } 4236 4237 if (!ifp->dad_probes && addrconf_dad_end(ifp)) 4238 goto out; 4239 4240 write_lock_bh(&idev->lock); 4241 if (idev->dead || !(idev->if_flags & IF_READY)) { 4242 write_unlock_bh(&idev->lock); 4243 goto out; 4244 } 4245 4246 spin_lock(&ifp->lock); 4247 if (ifp->state == INET6_IFADDR_STATE_DEAD) { 4248 spin_unlock(&ifp->lock); 4249 write_unlock_bh(&idev->lock); 4250 goto out; 4251 } 4252 4253 if (ifp->dad_probes == 0) { 4254 bool send_na = false; 4255 4256 /* 4257 * DAD was successful 4258 */ 4259 4260 if (ifp->flags & IFA_F_TENTATIVE && 4261 !(ifp->flags & IFA_F_OPTIMISTIC)) 4262 send_na = true; 4263 bump_id = ifp->flags & IFA_F_TENTATIVE; 4264 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 4265 spin_unlock(&ifp->lock); 4266 write_unlock_bh(&idev->lock); 4267 4268 addrconf_dad_completed(ifp, bump_id, send_na); 4269 4270 goto out; 4271 } 4272 4273 ifp->dad_probes--; 4274 addrconf_mod_dad_work(ifp, 4275 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), 4276 HZ/100)); 4277 spin_unlock(&ifp->lock); 4278 write_unlock_bh(&idev->lock); 4279 4280 /* send a neighbour solicitation for our addr */ 4281 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 4282 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, 4283 ifp->dad_nonce); 4284 out: 4285 in6_ifa_put(ifp); 4286 rtnl_unlock(); 4287 } 4288 4289 /* ifp->idev must be at least read locked */ 4290 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) 4291 { 4292 struct inet6_ifaddr *ifpiter; 4293 struct inet6_dev *idev = ifp->idev; 4294 4295 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) { 4296 if (ifpiter->scope > IFA_LINK) 4297 break; 4298 if (ifp != ifpiter && ifpiter->scope == IFA_LINK && 4299 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE| 4300 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) == 4301 IFA_F_PERMANENT) 4302 return false; 4303 } 4304 return true; 4305 } 4306 4307 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, 4308 bool send_na) 4309 { 4310 struct net_device *dev = ifp->idev->dev; 4311 struct in6_addr lladdr; 4312 bool send_rs, send_mld; 4313 4314 addrconf_del_dad_work(ifp); 4315 4316 /* 4317 * Configure the address for reception. Now it is valid. 4318 */ 4319 4320 ipv6_ifa_notify(RTM_NEWADDR, ifp); 4321 4322 /* If added prefix is link local and we are prepared to process 4323 router advertisements, start sending router solicitations. 4324 */ 4325 4326 read_lock_bh(&ifp->idev->lock); 4327 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp); 4328 send_rs = send_mld && 4329 ipv6_accept_ra(ifp->idev) && 4330 READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 && 4331 (dev->flags & IFF_LOOPBACK) == 0 && 4332 (dev->type != ARPHRD_TUNNEL) && 4333 !netif_is_team_port(dev); 4334 read_unlock_bh(&ifp->idev->lock); 4335 4336 /* While dad is in progress mld report's source address is in6_addrany. 4337 * Resend with proper ll now. 4338 */ 4339 if (send_mld) 4340 ipv6_mc_dad_complete(ifp->idev); 4341 4342 /* send unsolicited NA if enabled */ 4343 if (send_na && 4344 (READ_ONCE(ifp->idev->cnf.ndisc_notify) || 4345 READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) { 4346 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr, 4347 /*router=*/ !!ifp->idev->cnf.forwarding, 4348 /*solicited=*/ false, /*override=*/ true, 4349 /*inc_opt=*/ true); 4350 } 4351 4352 if (send_rs) { 4353 /* 4354 * If a host as already performed a random delay 4355 * [...] as part of DAD [...] there is no need 4356 * to delay again before sending the first RS 4357 */ 4358 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) 4359 return; 4360 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters); 4361 4362 write_lock_bh(&ifp->idev->lock); 4363 spin_lock(&ifp->lock); 4364 ifp->idev->rs_interval = rfc3315_s14_backoff_init( 4365 READ_ONCE(ifp->idev->cnf.rtr_solicit_interval)); 4366 ifp->idev->rs_probes = 1; 4367 ifp->idev->if_flags |= IF_RS_SENT; 4368 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval); 4369 spin_unlock(&ifp->lock); 4370 write_unlock_bh(&ifp->idev->lock); 4371 } 4372 4373 if (bump_id) 4374 rt_genid_bump_ipv6(dev_net(dev)); 4375 4376 /* Make sure that a new temporary address will be created 4377 * before this temporary address becomes deprecated. 4378 */ 4379 if (ifp->flags & IFA_F_TEMPORARY) 4380 addrconf_verify_rtnl(dev_net(dev)); 4381 } 4382 4383 static void addrconf_dad_run(struct inet6_dev *idev, bool restart) 4384 { 4385 struct inet6_ifaddr *ifp; 4386 4387 read_lock_bh(&idev->lock); 4388 list_for_each_entry(ifp, &idev->addr_list, if_list) { 4389 spin_lock(&ifp->lock); 4390 if ((ifp->flags & IFA_F_TENTATIVE && 4391 ifp->state == INET6_IFADDR_STATE_DAD) || restart) { 4392 if (restart) 4393 ifp->state = INET6_IFADDR_STATE_PREDAD; 4394 addrconf_dad_kick(ifp); 4395 } 4396 spin_unlock(&ifp->lock); 4397 } 4398 read_unlock_bh(&idev->lock); 4399 } 4400 4401 #ifdef CONFIG_PROC_FS 4402 struct if6_iter_state { 4403 struct seq_net_private p; 4404 int bucket; 4405 int offset; 4406 }; 4407 4408 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) 4409 { 4410 struct if6_iter_state *state = seq->private; 4411 struct net *net = seq_file_net(seq); 4412 struct inet6_ifaddr *ifa = NULL; 4413 int p = 0; 4414 4415 /* initial bucket if pos is 0 */ 4416 if (pos == 0) { 4417 state->bucket = 0; 4418 state->offset = 0; 4419 } 4420 4421 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 4422 hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket], 4423 addr_lst) { 4424 /* sync with offset */ 4425 if (p < state->offset) { 4426 p++; 4427 continue; 4428 } 4429 return ifa; 4430 } 4431 4432 /* prepare for next bucket */ 4433 state->offset = 0; 4434 p = 0; 4435 } 4436 return NULL; 4437 } 4438 4439 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, 4440 struct inet6_ifaddr *ifa) 4441 { 4442 struct if6_iter_state *state = seq->private; 4443 struct net *net = seq_file_net(seq); 4444 4445 hlist_for_each_entry_continue_rcu(ifa, addr_lst) { 4446 state->offset++; 4447 return ifa; 4448 } 4449 4450 state->offset = 0; 4451 while (++state->bucket < IN6_ADDR_HSIZE) { 4452 hlist_for_each_entry_rcu(ifa, 4453 &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) { 4454 return ifa; 4455 } 4456 } 4457 4458 return NULL; 4459 } 4460 4461 static void *if6_seq_start(struct seq_file *seq, loff_t *pos) 4462 __acquires(rcu) 4463 { 4464 rcu_read_lock(); 4465 return if6_get_first(seq, *pos); 4466 } 4467 4468 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4469 { 4470 struct inet6_ifaddr *ifa; 4471 4472 ifa = if6_get_next(seq, v); 4473 ++*pos; 4474 return ifa; 4475 } 4476 4477 static void if6_seq_stop(struct seq_file *seq, void *v) 4478 __releases(rcu) 4479 { 4480 rcu_read_unlock(); 4481 } 4482 4483 static int if6_seq_show(struct seq_file *seq, void *v) 4484 { 4485 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; 4486 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", 4487 &ifp->addr, 4488 ifp->idev->dev->ifindex, 4489 ifp->prefix_len, 4490 ifp->scope, 4491 (u8) ifp->flags, 4492 ifp->idev->dev->name); 4493 return 0; 4494 } 4495 4496 static const struct seq_operations if6_seq_ops = { 4497 .start = if6_seq_start, 4498 .next = if6_seq_next, 4499 .show = if6_seq_show, 4500 .stop = if6_seq_stop, 4501 }; 4502 4503 static int __net_init if6_proc_net_init(struct net *net) 4504 { 4505 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops, 4506 sizeof(struct if6_iter_state))) 4507 return -ENOMEM; 4508 return 0; 4509 } 4510 4511 static void __net_exit if6_proc_net_exit(struct net *net) 4512 { 4513 remove_proc_entry("if_inet6", net->proc_net); 4514 } 4515 4516 static struct pernet_operations if6_proc_net_ops = { 4517 .init = if6_proc_net_init, 4518 .exit = if6_proc_net_exit, 4519 }; 4520 4521 int __init if6_proc_init(void) 4522 { 4523 return register_pernet_subsys(&if6_proc_net_ops); 4524 } 4525 4526 void if6_proc_exit(void) 4527 { 4528 unregister_pernet_subsys(&if6_proc_net_ops); 4529 } 4530 #endif /* CONFIG_PROC_FS */ 4531 4532 #if IS_ENABLED(CONFIG_IPV6_MIP6) 4533 /* Check if address is a home address configured on any interface. */ 4534 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) 4535 { 4536 unsigned int hash = inet6_addr_hash(net, addr); 4537 struct inet6_ifaddr *ifp = NULL; 4538 int ret = 0; 4539 4540 rcu_read_lock(); 4541 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { 4542 if (ipv6_addr_equal(&ifp->addr, addr) && 4543 (ifp->flags & IFA_F_HOMEADDRESS)) { 4544 ret = 1; 4545 break; 4546 } 4547 } 4548 rcu_read_unlock(); 4549 return ret; 4550 } 4551 #endif 4552 4553 /* RFC6554 has some algorithm to avoid loops in segment routing by 4554 * checking if the segments contains any of a local interface address. 4555 * 4556 * Quote: 4557 * 4558 * To detect loops in the SRH, a router MUST determine if the SRH 4559 * includes multiple addresses assigned to any interface on that router. 4560 * If such addresses appear more than once and are separated by at least 4561 * one address not assigned to that router. 4562 */ 4563 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs, 4564 unsigned char nsegs) 4565 { 4566 const struct in6_addr *addr; 4567 int i, ret = 0, found = 0; 4568 struct inet6_ifaddr *ifp; 4569 bool separated = false; 4570 unsigned int hash; 4571 bool hash_found; 4572 4573 rcu_read_lock(); 4574 for (i = 0; i < nsegs; i++) { 4575 addr = &segs[i]; 4576 hash = inet6_addr_hash(net, addr); 4577 4578 hash_found = false; 4579 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) { 4580 4581 if (ipv6_addr_equal(&ifp->addr, addr)) { 4582 hash_found = true; 4583 break; 4584 } 4585 } 4586 4587 if (hash_found) { 4588 if (found > 1 && separated) { 4589 ret = 1; 4590 break; 4591 } 4592 4593 separated = false; 4594 found++; 4595 } else { 4596 separated = true; 4597 } 4598 } 4599 rcu_read_unlock(); 4600 4601 return ret; 4602 } 4603 4604 /* 4605 * Periodic address status verification 4606 */ 4607 4608 static void addrconf_verify_rtnl(struct net *net) 4609 { 4610 unsigned long now, next, next_sec, next_sched; 4611 struct inet6_ifaddr *ifp; 4612 int i; 4613 4614 ASSERT_RTNL(); 4615 4616 rcu_read_lock_bh(); 4617 now = jiffies; 4618 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); 4619 4620 cancel_delayed_work(&net->ipv6.addr_chk_work); 4621 4622 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 4623 restart: 4624 hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) { 4625 unsigned long age; 4626 4627 /* When setting preferred_lft to a value not zero or 4628 * infinity, while valid_lft is infinity 4629 * IFA_F_PERMANENT has a non-infinity life time. 4630 */ 4631 if ((ifp->flags & IFA_F_PERMANENT) && 4632 (ifp->prefered_lft == INFINITY_LIFE_TIME)) 4633 continue; 4634 4635 spin_lock(&ifp->lock); 4636 /* We try to batch several events at once. */ 4637 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; 4638 4639 if ((ifp->flags&IFA_F_TEMPORARY) && 4640 !(ifp->flags&IFA_F_TENTATIVE) && 4641 ifp->prefered_lft != INFINITY_LIFE_TIME && 4642 !ifp->regen_count && ifp->ifpub) { 4643 /* This is a non-regenerated temporary addr. */ 4644 4645 unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev); 4646 4647 if (age + regen_advance >= ifp->prefered_lft) { 4648 struct inet6_ifaddr *ifpub = ifp->ifpub; 4649 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) 4650 next = ifp->tstamp + ifp->prefered_lft * HZ; 4651 4652 ifp->regen_count++; 4653 in6_ifa_hold(ifp); 4654 in6_ifa_hold(ifpub); 4655 spin_unlock(&ifp->lock); 4656 4657 spin_lock(&ifpub->lock); 4658 ifpub->regen_count = 0; 4659 spin_unlock(&ifpub->lock); 4660 rcu_read_unlock_bh(); 4661 ipv6_create_tempaddr(ifpub, true); 4662 in6_ifa_put(ifpub); 4663 in6_ifa_put(ifp); 4664 rcu_read_lock_bh(); 4665 goto restart; 4666 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next)) 4667 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ; 4668 } 4669 4670 if (ifp->valid_lft != INFINITY_LIFE_TIME && 4671 age >= ifp->valid_lft) { 4672 spin_unlock(&ifp->lock); 4673 in6_ifa_hold(ifp); 4674 rcu_read_unlock_bh(); 4675 ipv6_del_addr(ifp); 4676 rcu_read_lock_bh(); 4677 goto restart; 4678 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 4679 spin_unlock(&ifp->lock); 4680 continue; 4681 } else if (age >= ifp->prefered_lft) { 4682 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */ 4683 int deprecate = 0; 4684 4685 if (!(ifp->flags&IFA_F_DEPRECATED)) { 4686 deprecate = 1; 4687 ifp->flags |= IFA_F_DEPRECATED; 4688 } 4689 4690 if ((ifp->valid_lft != INFINITY_LIFE_TIME) && 4691 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))) 4692 next = ifp->tstamp + ifp->valid_lft * HZ; 4693 4694 spin_unlock(&ifp->lock); 4695 4696 if (deprecate) { 4697 in6_ifa_hold(ifp); 4698 4699 ipv6_ifa_notify(0, ifp); 4700 in6_ifa_put(ifp); 4701 goto restart; 4702 } 4703 } else { 4704 /* ifp->prefered_lft <= ifp->valid_lft */ 4705 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) 4706 next = ifp->tstamp + ifp->prefered_lft * HZ; 4707 spin_unlock(&ifp->lock); 4708 } 4709 } 4710 } 4711 4712 next_sec = round_jiffies_up(next); 4713 next_sched = next; 4714 4715 /* If rounded timeout is accurate enough, accept it. */ 4716 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) 4717 next_sched = next_sec; 4718 4719 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ 4720 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) 4721 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; 4722 4723 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", 4724 now, next, next_sec, next_sched); 4725 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now); 4726 rcu_read_unlock_bh(); 4727 } 4728 4729 static void addrconf_verify_work(struct work_struct *w) 4730 { 4731 struct net *net = container_of(to_delayed_work(w), struct net, 4732 ipv6.addr_chk_work); 4733 4734 rtnl_lock(); 4735 addrconf_verify_rtnl(net); 4736 rtnl_unlock(); 4737 } 4738 4739 static void addrconf_verify(struct net *net) 4740 { 4741 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0); 4742 } 4743 4744 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, 4745 struct in6_addr **peer_pfx) 4746 { 4747 struct in6_addr *pfx = NULL; 4748 4749 *peer_pfx = NULL; 4750 4751 if (addr) 4752 pfx = nla_data(addr); 4753 4754 if (local) { 4755 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx))) 4756 *peer_pfx = pfx; 4757 pfx = nla_data(local); 4758 } 4759 4760 return pfx; 4761 } 4762 4763 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { 4764 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) }, 4765 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) }, 4766 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, 4767 [IFA_FLAGS] = { .len = sizeof(u32) }, 4768 [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, 4769 [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, 4770 [IFA_PROTO] = { .type = NLA_U8 }, 4771 }; 4772 4773 static int 4774 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, 4775 struct netlink_ext_ack *extack) 4776 { 4777 struct net *net = sock_net(skb->sk); 4778 struct ifaddrmsg *ifm; 4779 struct nlattr *tb[IFA_MAX+1]; 4780 struct in6_addr *pfx, *peer_pfx; 4781 u32 ifa_flags; 4782 int err; 4783 4784 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, 4785 ifa_ipv6_policy, extack); 4786 if (err < 0) 4787 return err; 4788 4789 ifm = nlmsg_data(nlh); 4790 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); 4791 if (!pfx) 4792 return -EINVAL; 4793 4794 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags; 4795 4796 /* We ignore other flags so far. */ 4797 ifa_flags &= IFA_F_MANAGETEMPADDR; 4798 4799 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx, 4800 ifm->ifa_prefixlen, extack); 4801 } 4802 4803 static int modify_prefix_route(struct inet6_ifaddr *ifp, 4804 unsigned long expires, u32 flags, 4805 bool modify_peer) 4806 { 4807 struct fib6_table *table; 4808 struct fib6_info *f6i; 4809 u32 prio; 4810 4811 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, 4812 ifp->prefix_len, 4813 ifp->idev->dev, 0, RTF_DEFAULT, true); 4814 if (!f6i) 4815 return -ENOENT; 4816 4817 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; 4818 if (f6i->fib6_metric != prio) { 4819 /* delete old one */ 4820 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false); 4821 4822 /* add new one */ 4823 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr, 4824 ifp->prefix_len, 4825 ifp->rt_priority, ifp->idev->dev, 4826 expires, flags, GFP_KERNEL); 4827 } else { 4828 table = f6i->fib6_table; 4829 spin_lock_bh(&table->tb6_lock); 4830 4831 if (!(flags & RTF_EXPIRES)) { 4832 fib6_clean_expires(f6i); 4833 fib6_remove_gc_list(f6i); 4834 } else { 4835 fib6_set_expires(f6i, expires); 4836 fib6_add_gc_list(f6i); 4837 } 4838 4839 spin_unlock_bh(&table->tb6_lock); 4840 4841 fib6_info_release(f6i); 4842 } 4843 4844 return 0; 4845 } 4846 4847 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, 4848 struct ifa6_config *cfg) 4849 { 4850 u32 flags; 4851 clock_t expires; 4852 unsigned long timeout; 4853 bool was_managetempaddr; 4854 bool had_prefixroute; 4855 bool new_peer = false; 4856 4857 ASSERT_RTNL(); 4858 4859 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft) 4860 return -EINVAL; 4861 4862 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && 4863 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64)) 4864 return -EINVAL; 4865 4866 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED) 4867 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC; 4868 4869 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ); 4870 if (addrconf_finite_timeout(timeout)) { 4871 expires = jiffies_to_clock_t(timeout * HZ); 4872 cfg->valid_lft = timeout; 4873 flags = RTF_EXPIRES; 4874 } else { 4875 expires = 0; 4876 flags = 0; 4877 cfg->ifa_flags |= IFA_F_PERMANENT; 4878 } 4879 4880 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ); 4881 if (addrconf_finite_timeout(timeout)) { 4882 if (timeout == 0) 4883 cfg->ifa_flags |= IFA_F_DEPRECATED; 4884 cfg->preferred_lft = timeout; 4885 } 4886 4887 if (cfg->peer_pfx && 4888 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) { 4889 if (!ipv6_addr_any(&ifp->peer_addr)) 4890 cleanup_prefix_route(ifp, expires, true, true); 4891 new_peer = true; 4892 } 4893 4894 spin_lock_bh(&ifp->lock); 4895 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR; 4896 had_prefixroute = ifp->flags & IFA_F_PERMANENT && 4897 !(ifp->flags & IFA_F_NOPREFIXROUTE); 4898 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | 4899 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | 4900 IFA_F_NOPREFIXROUTE); 4901 ifp->flags |= cfg->ifa_flags; 4902 WRITE_ONCE(ifp->tstamp, jiffies); 4903 WRITE_ONCE(ifp->valid_lft, cfg->valid_lft); 4904 WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft); 4905 WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto); 4906 4907 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) 4908 WRITE_ONCE(ifp->rt_priority, cfg->rt_priority); 4909 4910 if (new_peer) 4911 ifp->peer_addr = *cfg->peer_pfx; 4912 4913 spin_unlock_bh(&ifp->lock); 4914 if (!(ifp->flags&IFA_F_TENTATIVE)) 4915 ipv6_ifa_notify(0, ifp); 4916 4917 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) { 4918 int rc = -ENOENT; 4919 4920 if (had_prefixroute) 4921 rc = modify_prefix_route(ifp, expires, flags, false); 4922 4923 /* prefix route could have been deleted; if so restore it */ 4924 if (rc == -ENOENT) { 4925 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 4926 ifp->rt_priority, ifp->idev->dev, 4927 expires, flags, GFP_KERNEL); 4928 } 4929 4930 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) 4931 rc = modify_prefix_route(ifp, expires, flags, true); 4932 4933 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { 4934 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, 4935 ifp->rt_priority, ifp->idev->dev, 4936 expires, flags, GFP_KERNEL); 4937 } 4938 } else if (had_prefixroute) { 4939 enum cleanup_prefix_rt_t action; 4940 unsigned long rt_expires; 4941 4942 write_lock_bh(&ifp->idev->lock); 4943 action = check_cleanup_prefix_route(ifp, &rt_expires); 4944 write_unlock_bh(&ifp->idev->lock); 4945 4946 if (action != CLEANUP_PREFIX_RT_NOP) { 4947 cleanup_prefix_route(ifp, rt_expires, 4948 action == CLEANUP_PREFIX_RT_DEL, false); 4949 } 4950 } 4951 4952 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { 4953 if (was_managetempaddr && 4954 !(ifp->flags & IFA_F_MANAGETEMPADDR)) { 4955 cfg->valid_lft = 0; 4956 cfg->preferred_lft = 0; 4957 } 4958 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, 4959 cfg->preferred_lft, !was_managetempaddr, 4960 jiffies); 4961 } 4962 4963 addrconf_verify_rtnl(net); 4964 4965 return 0; 4966 } 4967 4968 static int 4969 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, 4970 struct netlink_ext_ack *extack) 4971 { 4972 struct net *net = sock_net(skb->sk); 4973 struct ifaddrmsg *ifm; 4974 struct nlattr *tb[IFA_MAX+1]; 4975 struct in6_addr *peer_pfx; 4976 struct inet6_ifaddr *ifa; 4977 struct net_device *dev; 4978 struct inet6_dev *idev; 4979 struct ifa6_config cfg; 4980 int err; 4981 4982 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, 4983 ifa_ipv6_policy, extack); 4984 if (err < 0) 4985 return err; 4986 4987 memset(&cfg, 0, sizeof(cfg)); 4988 4989 ifm = nlmsg_data(nlh); 4990 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); 4991 if (!cfg.pfx) 4992 return -EINVAL; 4993 4994 cfg.peer_pfx = peer_pfx; 4995 cfg.plen = ifm->ifa_prefixlen; 4996 if (tb[IFA_RT_PRIORITY]) 4997 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); 4998 4999 if (tb[IFA_PROTO]) 5000 cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]); 5001 5002 cfg.valid_lft = INFINITY_LIFE_TIME; 5003 cfg.preferred_lft = INFINITY_LIFE_TIME; 5004 5005 if (tb[IFA_CACHEINFO]) { 5006 struct ifa_cacheinfo *ci; 5007 5008 ci = nla_data(tb[IFA_CACHEINFO]); 5009 cfg.valid_lft = ci->ifa_valid; 5010 cfg.preferred_lft = ci->ifa_prefered; 5011 } 5012 5013 dev = __dev_get_by_index(net, ifm->ifa_index); 5014 if (!dev) { 5015 NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface"); 5016 return -ENODEV; 5017 } 5018 5019 if (tb[IFA_FLAGS]) 5020 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]); 5021 else 5022 cfg.ifa_flags = ifm->ifa_flags; 5023 5024 /* We ignore other flags so far. */ 5025 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | 5026 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE | 5027 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 5028 5029 idev = ipv6_find_idev(dev); 5030 if (IS_ERR(idev)) 5031 return PTR_ERR(idev); 5032 5033 if (!ipv6_allow_optimistic_dad(net, idev)) 5034 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 5035 5036 if (cfg.ifa_flags & IFA_F_NODAD && 5037 cfg.ifa_flags & IFA_F_OPTIMISTIC) { 5038 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive"); 5039 return -EINVAL; 5040 } 5041 5042 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1); 5043 if (!ifa) { 5044 /* 5045 * It would be best to check for !NLM_F_CREATE here but 5046 * userspace already relies on not having to provide this. 5047 */ 5048 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack); 5049 } 5050 5051 if (nlh->nlmsg_flags & NLM_F_EXCL || 5052 !(nlh->nlmsg_flags & NLM_F_REPLACE)) { 5053 NL_SET_ERR_MSG_MOD(extack, "address already assigned"); 5054 err = -EEXIST; 5055 } else { 5056 err = inet6_addr_modify(net, ifa, &cfg); 5057 } 5058 5059 in6_ifa_put(ifa); 5060 5061 return err; 5062 } 5063 5064 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags, 5065 u8 scope, int ifindex) 5066 { 5067 struct ifaddrmsg *ifm; 5068 5069 ifm = nlmsg_data(nlh); 5070 ifm->ifa_family = AF_INET6; 5071 ifm->ifa_prefixlen = prefixlen; 5072 ifm->ifa_flags = flags; 5073 ifm->ifa_scope = scope; 5074 ifm->ifa_index = ifindex; 5075 } 5076 5077 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, 5078 unsigned long tstamp, u32 preferred, u32 valid) 5079 { 5080 struct ifa_cacheinfo ci; 5081 5082 ci.cstamp = cstamp_delta(cstamp); 5083 ci.tstamp = cstamp_delta(tstamp); 5084 ci.ifa_prefered = preferred; 5085 ci.ifa_valid = valid; 5086 5087 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci); 5088 } 5089 5090 static inline int rt_scope(int ifa_scope) 5091 { 5092 if (ifa_scope & IFA_HOST) 5093 return RT_SCOPE_HOST; 5094 else if (ifa_scope & IFA_LINK) 5095 return RT_SCOPE_LINK; 5096 else if (ifa_scope & IFA_SITE) 5097 return RT_SCOPE_SITE; 5098 else 5099 return RT_SCOPE_UNIVERSE; 5100 } 5101 5102 static inline int inet6_ifaddr_msgsize(void) 5103 { 5104 return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) 5105 + nla_total_size(16) /* IFA_LOCAL */ 5106 + nla_total_size(16) /* IFA_ADDRESS */ 5107 + nla_total_size(sizeof(struct ifa_cacheinfo)) 5108 + nla_total_size(4) /* IFA_FLAGS */ 5109 + nla_total_size(1) /* IFA_PROTO */ 5110 + nla_total_size(4) /* IFA_RT_PRIORITY */; 5111 } 5112 5113 enum addr_type_t { 5114 UNICAST_ADDR, 5115 MULTICAST_ADDR, 5116 ANYCAST_ADDR, 5117 }; 5118 5119 struct inet6_fill_args { 5120 u32 portid; 5121 u32 seq; 5122 int event; 5123 unsigned int flags; 5124 int netnsid; 5125 int ifindex; 5126 enum addr_type_t type; 5127 }; 5128 5129 static int inet6_fill_ifaddr(struct sk_buff *skb, 5130 const struct inet6_ifaddr *ifa, 5131 struct inet6_fill_args *args) 5132 { 5133 struct nlmsghdr *nlh; 5134 u32 preferred, valid; 5135 u32 flags, priority; 5136 u8 proto; 5137 5138 nlh = nlmsg_put(skb, args->portid, args->seq, args->event, 5139 sizeof(struct ifaddrmsg), args->flags); 5140 if (!nlh) 5141 return -EMSGSIZE; 5142 5143 flags = READ_ONCE(ifa->flags); 5144 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), 5145 ifa->idev->dev->ifindex); 5146 5147 if (args->netnsid >= 0 && 5148 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) 5149 goto error; 5150 5151 preferred = READ_ONCE(ifa->prefered_lft); 5152 valid = READ_ONCE(ifa->valid_lft); 5153 5154 if (!((flags & IFA_F_PERMANENT) && 5155 (preferred == INFINITY_LIFE_TIME))) { 5156 if (preferred != INFINITY_LIFE_TIME) { 5157 long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ; 5158 5159 if (preferred > tval) 5160 preferred -= tval; 5161 else 5162 preferred = 0; 5163 if (valid != INFINITY_LIFE_TIME) { 5164 if (valid > tval) 5165 valid -= tval; 5166 else 5167 valid = 0; 5168 } 5169 } 5170 } else { 5171 preferred = INFINITY_LIFE_TIME; 5172 valid = INFINITY_LIFE_TIME; 5173 } 5174 5175 if (!ipv6_addr_any(&ifa->peer_addr)) { 5176 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 || 5177 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0) 5178 goto error; 5179 } else { 5180 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0) 5181 goto error; 5182 } 5183 5184 priority = READ_ONCE(ifa->rt_priority); 5185 if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority)) 5186 goto error; 5187 5188 if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp), 5189 preferred, valid) < 0) 5190 goto error; 5191 5192 if (nla_put_u32(skb, IFA_FLAGS, flags) < 0) 5193 goto error; 5194 5195 proto = READ_ONCE(ifa->ifa_proto); 5196 if (proto && nla_put_u8(skb, IFA_PROTO, proto)) 5197 goto error; 5198 5199 nlmsg_end(skb, nlh); 5200 return 0; 5201 5202 error: 5203 nlmsg_cancel(skb, nlh); 5204 return -EMSGSIZE; 5205 } 5206 5207 static int inet6_fill_ifmcaddr(struct sk_buff *skb, 5208 const struct ifmcaddr6 *ifmca, 5209 struct inet6_fill_args *args) 5210 { 5211 int ifindex = ifmca->idev->dev->ifindex; 5212 u8 scope = RT_SCOPE_UNIVERSE; 5213 struct nlmsghdr *nlh; 5214 5215 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) 5216 scope = RT_SCOPE_SITE; 5217 5218 nlh = nlmsg_put(skb, args->portid, args->seq, args->event, 5219 sizeof(struct ifaddrmsg), args->flags); 5220 if (!nlh) 5221 return -EMSGSIZE; 5222 5223 if (args->netnsid >= 0 && 5224 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { 5225 nlmsg_cancel(skb, nlh); 5226 return -EMSGSIZE; 5227 } 5228 5229 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 5230 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || 5231 put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp), 5232 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) { 5233 nlmsg_cancel(skb, nlh); 5234 return -EMSGSIZE; 5235 } 5236 5237 nlmsg_end(skb, nlh); 5238 return 0; 5239 } 5240 5241 static int inet6_fill_ifacaddr(struct sk_buff *skb, 5242 const struct ifacaddr6 *ifaca, 5243 struct inet6_fill_args *args) 5244 { 5245 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt); 5246 int ifindex = dev ? dev->ifindex : 1; 5247 u8 scope = RT_SCOPE_UNIVERSE; 5248 struct nlmsghdr *nlh; 5249 5250 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) 5251 scope = RT_SCOPE_SITE; 5252 5253 nlh = nlmsg_put(skb, args->portid, args->seq, args->event, 5254 sizeof(struct ifaddrmsg), args->flags); 5255 if (!nlh) 5256 return -EMSGSIZE; 5257 5258 if (args->netnsid >= 0 && 5259 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) { 5260 nlmsg_cancel(skb, nlh); 5261 return -EMSGSIZE; 5262 } 5263 5264 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); 5265 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || 5266 put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp), 5267 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) { 5268 nlmsg_cancel(skb, nlh); 5269 return -EMSGSIZE; 5270 } 5271 5272 nlmsg_end(skb, nlh); 5273 return 0; 5274 } 5275 5276 /* called with rcu_read_lock() */ 5277 static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb, 5278 struct netlink_callback *cb, int *s_ip_idx, 5279 struct inet6_fill_args *fillargs) 5280 { 5281 const struct ifmcaddr6 *ifmca; 5282 const struct ifacaddr6 *ifaca; 5283 int ip_idx = 0; 5284 int err = 0; 5285 5286 switch (fillargs->type) { 5287 case UNICAST_ADDR: { 5288 const struct inet6_ifaddr *ifa; 5289 fillargs->event = RTM_NEWADDR; 5290 5291 /* unicast address incl. temp addr */ 5292 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) { 5293 if (ip_idx < *s_ip_idx) 5294 goto next; 5295 err = inet6_fill_ifaddr(skb, ifa, fillargs); 5296 if (err < 0) 5297 break; 5298 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 5299 next: 5300 ip_idx++; 5301 } 5302 break; 5303 } 5304 case MULTICAST_ADDR: 5305 fillargs->event = RTM_GETMULTICAST; 5306 5307 /* multicast address */ 5308 for (ifmca = rcu_dereference(idev->mc_list); 5309 ifmca; 5310 ifmca = rcu_dereference(ifmca->next), ip_idx++) { 5311 if (ip_idx < *s_ip_idx) 5312 continue; 5313 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs); 5314 if (err < 0) 5315 break; 5316 } 5317 break; 5318 case ANYCAST_ADDR: 5319 fillargs->event = RTM_GETANYCAST; 5320 /* anycast address */ 5321 for (ifaca = rcu_dereference(idev->ac_list); ifaca; 5322 ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) { 5323 if (ip_idx < *s_ip_idx) 5324 continue; 5325 err = inet6_fill_ifacaddr(skb, ifaca, fillargs); 5326 if (err < 0) 5327 break; 5328 } 5329 break; 5330 default: 5331 break; 5332 } 5333 *s_ip_idx = err ? ip_idx : 0; 5334 return err; 5335 } 5336 5337 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, 5338 struct inet6_fill_args *fillargs, 5339 struct net **tgt_net, struct sock *sk, 5340 struct netlink_callback *cb) 5341 { 5342 struct netlink_ext_ack *extack = cb->extack; 5343 struct nlattr *tb[IFA_MAX+1]; 5344 struct ifaddrmsg *ifm; 5345 int err, i; 5346 5347 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 5348 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request"); 5349 return -EINVAL; 5350 } 5351 5352 ifm = nlmsg_data(nlh); 5353 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { 5354 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request"); 5355 return -EINVAL; 5356 } 5357 5358 fillargs->ifindex = ifm->ifa_index; 5359 if (fillargs->ifindex) { 5360 cb->answer_flags |= NLM_F_DUMP_FILTERED; 5361 fillargs->flags |= NLM_F_DUMP_FILTERED; 5362 } 5363 5364 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, 5365 ifa_ipv6_policy, extack); 5366 if (err < 0) 5367 return err; 5368 5369 for (i = 0; i <= IFA_MAX; ++i) { 5370 if (!tb[i]) 5371 continue; 5372 5373 if (i == IFA_TARGET_NETNSID) { 5374 struct net *net; 5375 5376 fillargs->netnsid = nla_get_s32(tb[i]); 5377 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid); 5378 if (IS_ERR(net)) { 5379 fillargs->netnsid = -1; 5380 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id"); 5381 return PTR_ERR(net); 5382 } 5383 *tgt_net = net; 5384 } else { 5385 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request"); 5386 return -EINVAL; 5387 } 5388 } 5389 5390 return 0; 5391 } 5392 5393 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, 5394 enum addr_type_t type) 5395 { 5396 struct net *tgt_net = sock_net(skb->sk); 5397 const struct nlmsghdr *nlh = cb->nlh; 5398 struct inet6_fill_args fillargs = { 5399 .portid = NETLINK_CB(cb->skb).portid, 5400 .seq = cb->nlh->nlmsg_seq, 5401 .flags = NLM_F_MULTI, 5402 .netnsid = -1, 5403 .type = type, 5404 }; 5405 struct { 5406 unsigned long ifindex; 5407 int ip_idx; 5408 } *ctx = (void *)cb->ctx; 5409 struct net_device *dev; 5410 struct inet6_dev *idev; 5411 int err = 0; 5412 5413 rcu_read_lock(); 5414 if (cb->strict_check) { 5415 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net, 5416 skb->sk, cb); 5417 if (err < 0) 5418 goto done; 5419 5420 err = 0; 5421 if (fillargs.ifindex) { 5422 dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex); 5423 if (!dev) { 5424 err = -ENODEV; 5425 goto done; 5426 } 5427 idev = __in6_dev_get(dev); 5428 if (idev) 5429 err = in6_dump_addrs(idev, skb, cb, 5430 &ctx->ip_idx, 5431 &fillargs); 5432 goto done; 5433 } 5434 } 5435 5436 cb->seq = inet6_base_seq(tgt_net); 5437 for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { 5438 idev = __in6_dev_get(dev); 5439 if (!idev) 5440 continue; 5441 err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx, 5442 &fillargs); 5443 if (err < 0) 5444 goto done; 5445 } 5446 done: 5447 rcu_read_unlock(); 5448 if (fillargs.netnsid >= 0) 5449 put_net(tgt_net); 5450 5451 return err; 5452 } 5453 5454 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 5455 { 5456 enum addr_type_t type = UNICAST_ADDR; 5457 5458 return inet6_dump_addr(skb, cb, type); 5459 } 5460 5461 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) 5462 { 5463 enum addr_type_t type = MULTICAST_ADDR; 5464 5465 return inet6_dump_addr(skb, cb, type); 5466 } 5467 5468 5469 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) 5470 { 5471 enum addr_type_t type = ANYCAST_ADDR; 5472 5473 return inet6_dump_addr(skb, cb, type); 5474 } 5475 5476 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb, 5477 const struct nlmsghdr *nlh, 5478 struct nlattr **tb, 5479 struct netlink_ext_ack *extack) 5480 { 5481 struct ifaddrmsg *ifm; 5482 int i, err; 5483 5484 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 5485 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request"); 5486 return -EINVAL; 5487 } 5488 5489 if (!netlink_strict_get_check(skb)) 5490 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, 5491 ifa_ipv6_policy, extack); 5492 5493 ifm = nlmsg_data(nlh); 5494 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { 5495 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request"); 5496 return -EINVAL; 5497 } 5498 5499 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, 5500 ifa_ipv6_policy, extack); 5501 if (err) 5502 return err; 5503 5504 for (i = 0; i <= IFA_MAX; i++) { 5505 if (!tb[i]) 5506 continue; 5507 5508 switch (i) { 5509 case IFA_TARGET_NETNSID: 5510 case IFA_ADDRESS: 5511 case IFA_LOCAL: 5512 break; 5513 default: 5514 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request"); 5515 return -EINVAL; 5516 } 5517 } 5518 5519 return 0; 5520 } 5521 5522 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, 5523 struct netlink_ext_ack *extack) 5524 { 5525 struct net *tgt_net = sock_net(in_skb->sk); 5526 struct inet6_fill_args fillargs = { 5527 .portid = NETLINK_CB(in_skb).portid, 5528 .seq = nlh->nlmsg_seq, 5529 .event = RTM_NEWADDR, 5530 .flags = 0, 5531 .netnsid = -1, 5532 }; 5533 struct ifaddrmsg *ifm; 5534 struct nlattr *tb[IFA_MAX+1]; 5535 struct in6_addr *addr = NULL, *peer; 5536 struct net_device *dev = NULL; 5537 struct inet6_ifaddr *ifa; 5538 struct sk_buff *skb; 5539 int err; 5540 5541 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack); 5542 if (err < 0) 5543 return err; 5544 5545 if (tb[IFA_TARGET_NETNSID]) { 5546 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); 5547 5548 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk, 5549 fillargs.netnsid); 5550 if (IS_ERR(tgt_net)) 5551 return PTR_ERR(tgt_net); 5552 } 5553 5554 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); 5555 if (!addr) { 5556 err = -EINVAL; 5557 goto errout; 5558 } 5559 ifm = nlmsg_data(nlh); 5560 if (ifm->ifa_index) 5561 dev = dev_get_by_index(tgt_net, ifm->ifa_index); 5562 5563 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1); 5564 if (!ifa) { 5565 err = -EADDRNOTAVAIL; 5566 goto errout; 5567 } 5568 5569 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL); 5570 if (!skb) { 5571 err = -ENOBUFS; 5572 goto errout_ifa; 5573 } 5574 5575 err = inet6_fill_ifaddr(skb, ifa, &fillargs); 5576 if (err < 0) { 5577 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ 5578 WARN_ON(err == -EMSGSIZE); 5579 kfree_skb(skb); 5580 goto errout_ifa; 5581 } 5582 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid); 5583 errout_ifa: 5584 in6_ifa_put(ifa); 5585 errout: 5586 dev_put(dev); 5587 if (fillargs.netnsid >= 0) 5588 put_net(tgt_net); 5589 5590 return err; 5591 } 5592 5593 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 5594 { 5595 struct sk_buff *skb; 5596 struct net *net = dev_net(ifa->idev->dev); 5597 struct inet6_fill_args fillargs = { 5598 .portid = 0, 5599 .seq = 0, 5600 .event = event, 5601 .flags = 0, 5602 .netnsid = -1, 5603 }; 5604 int err = -ENOBUFS; 5605 5606 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); 5607 if (!skb) 5608 goto errout; 5609 5610 err = inet6_fill_ifaddr(skb, ifa, &fillargs); 5611 if (err < 0) { 5612 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ 5613 WARN_ON(err == -EMSGSIZE); 5614 kfree_skb(skb); 5615 goto errout; 5616 } 5617 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 5618 return; 5619 errout: 5620 if (err < 0) 5621 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); 5622 } 5623 5624 static void ipv6_store_devconf(const struct ipv6_devconf *cnf, 5625 __s32 *array, int bytes) 5626 { 5627 BUG_ON(bytes < (DEVCONF_MAX * 4)); 5628 5629 memset(array, 0, bytes); 5630 array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding); 5631 array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit); 5632 array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6); 5633 array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra); 5634 array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects); 5635 array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf); 5636 array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits); 5637 array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits); 5638 array[DEVCONF_RTR_SOLICIT_INTERVAL] = 5639 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval)); 5640 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] = 5641 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval)); 5642 array[DEVCONF_RTR_SOLICIT_DELAY] = 5643 jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay)); 5644 array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version); 5645 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] = 5646 jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval)); 5647 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] = 5648 jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval)); 5649 array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr); 5650 array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft); 5651 array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft); 5652 array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry); 5653 array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor); 5654 array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses); 5655 array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr); 5656 array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric); 5657 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = 5658 READ_ONCE(cnf->accept_ra_min_hop_limit); 5659 array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo); 5660 #ifdef CONFIG_IPV6_ROUTER_PREF 5661 array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref); 5662 array[DEVCONF_RTR_PROBE_INTERVAL] = 5663 jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval)); 5664 #ifdef CONFIG_IPV6_ROUTE_INFO 5665 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = 5666 READ_ONCE(cnf->accept_ra_rt_info_min_plen); 5667 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = 5668 READ_ONCE(cnf->accept_ra_rt_info_max_plen); 5669 #endif 5670 #endif 5671 array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp); 5672 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = 5673 READ_ONCE(cnf->accept_source_route); 5674 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 5675 array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad); 5676 array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic); 5677 #endif 5678 #ifdef CONFIG_IPV6_MROUTE 5679 array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding); 5680 #endif 5681 array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6); 5682 array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad); 5683 array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao); 5684 array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify); 5685 array[DEVCONF_SUPPRESS_FRAG_NDISC] = 5686 READ_ONCE(cnf->suppress_frag_ndisc); 5687 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = 5688 READ_ONCE(cnf->accept_ra_from_local); 5689 array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu); 5690 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = 5691 READ_ONCE(cnf->ignore_routes_with_linkdown); 5692 /* we omit DEVCONF_STABLE_SECRET for now */ 5693 array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only); 5694 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = 5695 READ_ONCE(cnf->drop_unicast_in_l2_multicast); 5696 array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na); 5697 array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down); 5698 array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled); 5699 #ifdef CONFIG_IPV6_SEG6_HMAC 5700 array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac); 5701 #endif 5702 array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad); 5703 array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode); 5704 array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy); 5705 array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass); 5706 array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled); 5707 array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled); 5708 array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id); 5709 array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide); 5710 array[DEVCONF_NDISC_EVICT_NOCARRIER] = 5711 READ_ONCE(cnf->ndisc_evict_nocarrier); 5712 array[DEVCONF_ACCEPT_UNTRACKED_NA] = 5713 READ_ONCE(cnf->accept_untracked_na); 5714 array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft); 5715 } 5716 5717 static inline size_t inet6_ifla6_size(void) 5718 { 5719 return nla_total_size(4) /* IFLA_INET6_FLAGS */ 5720 + nla_total_size(sizeof(struct ifla_cacheinfo)) 5721 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ 5722 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */ 5723 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ 5724 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */ 5725 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */ 5726 + nla_total_size(4) /* IFLA_INET6_RA_MTU */ 5727 + 0; 5728 } 5729 5730 static inline size_t inet6_if_nlmsg_size(void) 5731 { 5732 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 5733 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 5734 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 5735 + nla_total_size(4) /* IFLA_MTU */ 5736 + nla_total_size(4) /* IFLA_LINK */ 5737 + nla_total_size(1) /* IFLA_OPERSTATE */ 5738 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */ 5739 } 5740 5741 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib, 5742 int bytes) 5743 { 5744 int i; 5745 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX; 5746 BUG_ON(pad < 0); 5747 5748 /* Use put_unaligned() because stats may not be aligned for u64. */ 5749 put_unaligned(ICMP6_MIB_MAX, &stats[0]); 5750 for (i = 1; i < ICMP6_MIB_MAX; i++) 5751 put_unaligned(atomic_long_read(&mib[i]), &stats[i]); 5752 5753 memset(&stats[ICMP6_MIB_MAX], 0, pad); 5754 } 5755 5756 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib, 5757 int bytes, size_t syncpoff) 5758 { 5759 int i, c; 5760 u64 buff[IPSTATS_MIB_MAX]; 5761 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX; 5762 5763 BUG_ON(pad < 0); 5764 5765 memset(buff, 0, sizeof(buff)); 5766 buff[0] = IPSTATS_MIB_MAX; 5767 5768 for_each_possible_cpu(c) { 5769 for (i = 1; i < IPSTATS_MIB_MAX; i++) 5770 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff); 5771 } 5772 5773 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64)); 5774 memset(&stats[IPSTATS_MIB_MAX], 0, pad); 5775 } 5776 5777 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, 5778 int bytes) 5779 { 5780 switch (attrtype) { 5781 case IFLA_INET6_STATS: 5782 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes, 5783 offsetof(struct ipstats_mib, syncp)); 5784 break; 5785 case IFLA_INET6_ICMP6STATS: 5786 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes); 5787 break; 5788 } 5789 } 5790 5791 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, 5792 u32 ext_filter_mask) 5793 { 5794 struct ifla_cacheinfo ci; 5795 struct nlattr *nla; 5796 u32 ra_mtu; 5797 5798 if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags))) 5799 goto nla_put_failure; 5800 ci.max_reasm_len = IPV6_MAXPLEN; 5801 ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp)); 5802 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); 5803 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME)); 5804 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci)) 5805 goto nla_put_failure; 5806 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); 5807 if (!nla) 5808 goto nla_put_failure; 5809 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla)); 5810 5811 /* XXX - MC not implemented */ 5812 5813 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) 5814 return 0; 5815 5816 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); 5817 if (!nla) 5818 goto nla_put_failure; 5819 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); 5820 5821 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); 5822 if (!nla) 5823 goto nla_put_failure; 5824 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); 5825 5826 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); 5827 if (!nla) 5828 goto nla_put_failure; 5829 read_lock_bh(&idev->lock); 5830 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla)); 5831 read_unlock_bh(&idev->lock); 5832 5833 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, 5834 READ_ONCE(idev->cnf.addr_gen_mode))) 5835 goto nla_put_failure; 5836 5837 ra_mtu = READ_ONCE(idev->ra_mtu); 5838 if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu)) 5839 goto nla_put_failure; 5840 5841 return 0; 5842 5843 nla_put_failure: 5844 return -EMSGSIZE; 5845 } 5846 5847 static size_t inet6_get_link_af_size(const struct net_device *dev, 5848 u32 ext_filter_mask) 5849 { 5850 if (!__in6_dev_get(dev)) 5851 return 0; 5852 5853 return inet6_ifla6_size(); 5854 } 5855 5856 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev, 5857 u32 ext_filter_mask) 5858 { 5859 struct inet6_dev *idev = __in6_dev_get(dev); 5860 5861 if (!idev) 5862 return -ENODATA; 5863 5864 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0) 5865 return -EMSGSIZE; 5866 5867 return 0; 5868 } 5869 5870 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token, 5871 struct netlink_ext_ack *extack) 5872 { 5873 struct inet6_ifaddr *ifp; 5874 struct net_device *dev = idev->dev; 5875 bool clear_token, update_rs = false; 5876 struct in6_addr ll_addr; 5877 5878 ASSERT_RTNL(); 5879 5880 if (!token) 5881 return -EINVAL; 5882 5883 if (dev->flags & IFF_LOOPBACK) { 5884 NL_SET_ERR_MSG_MOD(extack, "Device is loopback"); 5885 return -EINVAL; 5886 } 5887 5888 if (dev->flags & IFF_NOARP) { 5889 NL_SET_ERR_MSG_MOD(extack, 5890 "Device does not do neighbour discovery"); 5891 return -EINVAL; 5892 } 5893 5894 if (!ipv6_accept_ra(idev)) { 5895 NL_SET_ERR_MSG_MOD(extack, 5896 "Router advertisement is disabled on device"); 5897 return -EINVAL; 5898 } 5899 5900 if (READ_ONCE(idev->cnf.rtr_solicits) == 0) { 5901 NL_SET_ERR_MSG(extack, 5902 "Router solicitation is disabled on device"); 5903 return -EINVAL; 5904 } 5905 5906 write_lock_bh(&idev->lock); 5907 5908 BUILD_BUG_ON(sizeof(token->s6_addr) != 16); 5909 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8); 5910 5911 write_unlock_bh(&idev->lock); 5912 5913 clear_token = ipv6_addr_any(token); 5914 if (clear_token) 5915 goto update_lft; 5916 5917 if (!idev->dead && (idev->if_flags & IF_READY) && 5918 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE | 5919 IFA_F_OPTIMISTIC)) { 5920 /* If we're not ready, then normal ifup will take care 5921 * of this. Otherwise, we need to request our rs here. 5922 */ 5923 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters); 5924 update_rs = true; 5925 } 5926 5927 update_lft: 5928 write_lock_bh(&idev->lock); 5929 5930 if (update_rs) { 5931 idev->if_flags |= IF_RS_SENT; 5932 idev->rs_interval = rfc3315_s14_backoff_init( 5933 READ_ONCE(idev->cnf.rtr_solicit_interval)); 5934 idev->rs_probes = 1; 5935 addrconf_mod_rs_timer(idev, idev->rs_interval); 5936 } 5937 5938 /* Well, that's kinda nasty ... */ 5939 list_for_each_entry(ifp, &idev->addr_list, if_list) { 5940 spin_lock(&ifp->lock); 5941 if (ifp->tokenized) { 5942 ifp->valid_lft = 0; 5943 ifp->prefered_lft = 0; 5944 } 5945 spin_unlock(&ifp->lock); 5946 } 5947 5948 write_unlock_bh(&idev->lock); 5949 inet6_ifinfo_notify(RTM_NEWLINK, idev); 5950 addrconf_verify_rtnl(dev_net(dev)); 5951 return 0; 5952 } 5953 5954 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = { 5955 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 }, 5956 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) }, 5957 [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT, 5958 .reject_message = 5959 "IFLA_INET6_RA_MTU can not be set" }, 5960 }; 5961 5962 static int check_addr_gen_mode(int mode) 5963 { 5964 if (mode != IN6_ADDR_GEN_MODE_EUI64 && 5965 mode != IN6_ADDR_GEN_MODE_NONE && 5966 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY && 5967 mode != IN6_ADDR_GEN_MODE_RANDOM) 5968 return -EINVAL; 5969 return 1; 5970 } 5971 5972 static int check_stable_privacy(struct inet6_dev *idev, struct net *net, 5973 int mode) 5974 { 5975 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY && 5976 !idev->cnf.stable_secret.initialized && 5977 !net->ipv6.devconf_dflt->stable_secret.initialized) 5978 return -EINVAL; 5979 return 1; 5980 } 5981 5982 static int inet6_validate_link_af(const struct net_device *dev, 5983 const struct nlattr *nla, 5984 struct netlink_ext_ack *extack) 5985 { 5986 struct nlattr *tb[IFLA_INET6_MAX + 1]; 5987 struct inet6_dev *idev = NULL; 5988 int err; 5989 5990 if (dev) { 5991 idev = __in6_dev_get(dev); 5992 if (!idev) 5993 return -EAFNOSUPPORT; 5994 } 5995 5996 err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, 5997 inet6_af_policy, extack); 5998 if (err) 5999 return err; 6000 6001 if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE]) 6002 return -EINVAL; 6003 6004 if (tb[IFLA_INET6_ADDR_GEN_MODE]) { 6005 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]); 6006 6007 if (check_addr_gen_mode(mode) < 0) 6008 return -EINVAL; 6009 if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0) 6010 return -EINVAL; 6011 } 6012 6013 return 0; 6014 } 6015 6016 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla, 6017 struct netlink_ext_ack *extack) 6018 { 6019 struct inet6_dev *idev = __in6_dev_get(dev); 6020 struct nlattr *tb[IFLA_INET6_MAX + 1]; 6021 int err; 6022 6023 if (!idev) 6024 return -EAFNOSUPPORT; 6025 6026 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) 6027 return -EINVAL; 6028 6029 if (tb[IFLA_INET6_TOKEN]) { 6030 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]), 6031 extack); 6032 if (err) 6033 return err; 6034 } 6035 6036 if (tb[IFLA_INET6_ADDR_GEN_MODE]) { 6037 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]); 6038 6039 WRITE_ONCE(idev->cnf.addr_gen_mode, mode); 6040 } 6041 6042 return 0; 6043 } 6044 6045 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 6046 u32 portid, u32 seq, int event, unsigned int flags) 6047 { 6048 struct net_device *dev = idev->dev; 6049 struct ifinfomsg *hdr; 6050 struct nlmsghdr *nlh; 6051 int ifindex, iflink; 6052 void *protoinfo; 6053 6054 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags); 6055 if (!nlh) 6056 return -EMSGSIZE; 6057 6058 hdr = nlmsg_data(nlh); 6059 hdr->ifi_family = AF_INET6; 6060 hdr->__ifi_pad = 0; 6061 hdr->ifi_type = dev->type; 6062 ifindex = READ_ONCE(dev->ifindex); 6063 hdr->ifi_index = ifindex; 6064 hdr->ifi_flags = dev_get_flags(dev); 6065 hdr->ifi_change = 0; 6066 6067 iflink = dev_get_iflink(dev); 6068 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 6069 (dev->addr_len && 6070 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 6071 nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) || 6072 (ifindex != iflink && 6073 nla_put_u32(skb, IFLA_LINK, iflink)) || 6074 nla_put_u8(skb, IFLA_OPERSTATE, 6075 netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN)) 6076 goto nla_put_failure; 6077 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO); 6078 if (!protoinfo) 6079 goto nla_put_failure; 6080 6081 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0) 6082 goto nla_put_failure; 6083 6084 nla_nest_end(skb, protoinfo); 6085 nlmsg_end(skb, nlh); 6086 return 0; 6087 6088 nla_put_failure: 6089 nlmsg_cancel(skb, nlh); 6090 return -EMSGSIZE; 6091 } 6092 6093 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh, 6094 struct netlink_ext_ack *extack) 6095 { 6096 struct ifinfomsg *ifm; 6097 6098 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 6099 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request"); 6100 return -EINVAL; 6101 } 6102 6103 if (nlmsg_attrlen(nlh, sizeof(*ifm))) { 6104 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header"); 6105 return -EINVAL; 6106 } 6107 6108 ifm = nlmsg_data(nlh); 6109 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 6110 ifm->ifi_change || ifm->ifi_index) { 6111 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request"); 6112 return -EINVAL; 6113 } 6114 6115 return 0; 6116 } 6117 6118 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 6119 { 6120 struct net *net = sock_net(skb->sk); 6121 struct { 6122 unsigned long ifindex; 6123 } *ctx = (void *)cb->ctx; 6124 struct net_device *dev; 6125 struct inet6_dev *idev; 6126 int err; 6127 6128 /* only requests using strict checking can pass data to 6129 * influence the dump 6130 */ 6131 if (cb->strict_check) { 6132 err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack); 6133 6134 if (err < 0) 6135 return err; 6136 } 6137 6138 err = 0; 6139 rcu_read_lock(); 6140 for_each_netdev_dump(net, dev, ctx->ifindex) { 6141 idev = __in6_dev_get(dev); 6142 if (!idev) 6143 continue; 6144 err = inet6_fill_ifinfo(skb, idev, 6145 NETLINK_CB(cb->skb).portid, 6146 cb->nlh->nlmsg_seq, 6147 RTM_NEWLINK, NLM_F_MULTI); 6148 if (err < 0) 6149 break; 6150 } 6151 rcu_read_unlock(); 6152 6153 return err; 6154 } 6155 6156 void inet6_ifinfo_notify(int event, struct inet6_dev *idev) 6157 { 6158 struct sk_buff *skb; 6159 struct net *net = dev_net(idev->dev); 6160 int err = -ENOBUFS; 6161 6162 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); 6163 if (!skb) 6164 goto errout; 6165 6166 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); 6167 if (err < 0) { 6168 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */ 6169 WARN_ON(err == -EMSGSIZE); 6170 kfree_skb(skb); 6171 goto errout; 6172 } 6173 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC); 6174 return; 6175 errout: 6176 if (err < 0) 6177 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err); 6178 } 6179 6180 static inline size_t inet6_prefix_nlmsg_size(void) 6181 { 6182 return NLMSG_ALIGN(sizeof(struct prefixmsg)) 6183 + nla_total_size(sizeof(struct in6_addr)) 6184 + nla_total_size(sizeof(struct prefix_cacheinfo)); 6185 } 6186 6187 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, 6188 struct prefix_info *pinfo, u32 portid, u32 seq, 6189 int event, unsigned int flags) 6190 { 6191 struct prefixmsg *pmsg; 6192 struct nlmsghdr *nlh; 6193 struct prefix_cacheinfo ci; 6194 6195 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags); 6196 if (!nlh) 6197 return -EMSGSIZE; 6198 6199 pmsg = nlmsg_data(nlh); 6200 pmsg->prefix_family = AF_INET6; 6201 pmsg->prefix_pad1 = 0; 6202 pmsg->prefix_pad2 = 0; 6203 pmsg->prefix_ifindex = idev->dev->ifindex; 6204 pmsg->prefix_len = pinfo->prefix_len; 6205 pmsg->prefix_type = pinfo->type; 6206 pmsg->prefix_pad3 = 0; 6207 pmsg->prefix_flags = pinfo->flags; 6208 6209 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix)) 6210 goto nla_put_failure; 6211 ci.preferred_time = ntohl(pinfo->prefered); 6212 ci.valid_time = ntohl(pinfo->valid); 6213 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci)) 6214 goto nla_put_failure; 6215 nlmsg_end(skb, nlh); 6216 return 0; 6217 6218 nla_put_failure: 6219 nlmsg_cancel(skb, nlh); 6220 return -EMSGSIZE; 6221 } 6222 6223 static void inet6_prefix_notify(int event, struct inet6_dev *idev, 6224 struct prefix_info *pinfo) 6225 { 6226 struct sk_buff *skb; 6227 struct net *net = dev_net(idev->dev); 6228 int err = -ENOBUFS; 6229 6230 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); 6231 if (!skb) 6232 goto errout; 6233 6234 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); 6235 if (err < 0) { 6236 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */ 6237 WARN_ON(err == -EMSGSIZE); 6238 kfree_skb(skb); 6239 goto errout; 6240 } 6241 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); 6242 return; 6243 errout: 6244 if (err < 0) 6245 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); 6246 } 6247 6248 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 6249 { 6250 struct net *net = dev_net(ifp->idev->dev); 6251 6252 if (event) 6253 ASSERT_RTNL(); 6254 6255 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); 6256 6257 switch (event) { 6258 case RTM_NEWADDR: 6259 /* 6260 * If the address was optimistic we inserted the route at the 6261 * start of our DAD process, so we don't need to do it again. 6262 * If the device was taken down in the middle of the DAD 6263 * cycle there is a race where we could get here without a 6264 * host route, so nothing to insert. That will be fixed when 6265 * the device is brought up. 6266 */ 6267 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) { 6268 ip6_ins_rt(net, ifp->rt); 6269 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) { 6270 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n", 6271 &ifp->addr, ifp->idev->dev->name); 6272 } 6273 6274 if (ifp->idev->cnf.forwarding) 6275 addrconf_join_anycast(ifp); 6276 if (!ipv6_addr_any(&ifp->peer_addr)) 6277 addrconf_prefix_route(&ifp->peer_addr, 128, 6278 ifp->rt_priority, ifp->idev->dev, 6279 0, 0, GFP_ATOMIC); 6280 break; 6281 case RTM_DELADDR: 6282 if (ifp->idev->cnf.forwarding) 6283 addrconf_leave_anycast(ifp); 6284 addrconf_leave_solict(ifp->idev, &ifp->addr); 6285 if (!ipv6_addr_any(&ifp->peer_addr)) { 6286 struct fib6_info *rt; 6287 6288 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, 6289 ifp->idev->dev, 0, 0, 6290 false); 6291 if (rt) 6292 ip6_del_rt(net, rt, false); 6293 } 6294 if (ifp->rt) { 6295 ip6_del_rt(net, ifp->rt, false); 6296 ifp->rt = NULL; 6297 } 6298 rt_genid_bump_ipv6(net); 6299 break; 6300 } 6301 atomic_inc(&net->ipv6.dev_addr_genid); 6302 } 6303 6304 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 6305 { 6306 if (likely(ifp->idev->dead == 0)) 6307 __ipv6_ifa_notify(event, ifp); 6308 } 6309 6310 #ifdef CONFIG_SYSCTL 6311 6312 static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write, 6313 void *buffer, size_t *lenp, loff_t *ppos) 6314 { 6315 int *valp = ctl->data; 6316 int val = *valp; 6317 loff_t pos = *ppos; 6318 struct ctl_table lctl; 6319 int ret; 6320 6321 /* 6322 * ctl->data points to idev->cnf.forwarding, we should 6323 * not modify it until we get the rtnl lock. 6324 */ 6325 lctl = *ctl; 6326 lctl.data = &val; 6327 6328 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 6329 6330 if (write) 6331 ret = addrconf_fixup_forwarding(ctl, valp, val); 6332 if (ret) 6333 *ppos = pos; 6334 return ret; 6335 } 6336 6337 static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write, 6338 void *buffer, size_t *lenp, loff_t *ppos) 6339 { 6340 struct inet6_dev *idev = ctl->extra1; 6341 int min_mtu = IPV6_MIN_MTU; 6342 struct ctl_table lctl; 6343 6344 lctl = *ctl; 6345 lctl.extra1 = &min_mtu; 6346 lctl.extra2 = idev ? &idev->dev->mtu : NULL; 6347 6348 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos); 6349 } 6350 6351 static void dev_disable_change(struct inet6_dev *idev) 6352 { 6353 struct netdev_notifier_info info; 6354 6355 if (!idev || !idev->dev) 6356 return; 6357 6358 netdev_notifier_info_init(&info, idev->dev); 6359 if (idev->cnf.disable_ipv6) 6360 addrconf_notify(NULL, NETDEV_DOWN, &info); 6361 else 6362 addrconf_notify(NULL, NETDEV_UP, &info); 6363 } 6364 6365 static void addrconf_disable_change(struct net *net, __s32 newf) 6366 { 6367 struct net_device *dev; 6368 struct inet6_dev *idev; 6369 6370 for_each_netdev(net, dev) { 6371 idev = __in6_dev_get(dev); 6372 if (idev) { 6373 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 6374 6375 WRITE_ONCE(idev->cnf.disable_ipv6, newf); 6376 if (changed) 6377 dev_disable_change(idev); 6378 } 6379 } 6380 } 6381 6382 static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf) 6383 { 6384 struct net *net = (struct net *)table->extra2; 6385 int old; 6386 6387 if (p == &net->ipv6.devconf_dflt->disable_ipv6) { 6388 WRITE_ONCE(*p, newf); 6389 return 0; 6390 } 6391 6392 if (!rtnl_trylock()) 6393 return restart_syscall(); 6394 6395 old = *p; 6396 WRITE_ONCE(*p, newf); 6397 6398 if (p == &net->ipv6.devconf_all->disable_ipv6) { 6399 WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf); 6400 addrconf_disable_change(net, newf); 6401 } else if ((!newf) ^ (!old)) 6402 dev_disable_change((struct inet6_dev *)table->extra1); 6403 6404 rtnl_unlock(); 6405 return 0; 6406 } 6407 6408 static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write, 6409 void *buffer, size_t *lenp, loff_t *ppos) 6410 { 6411 int *valp = ctl->data; 6412 int val = *valp; 6413 loff_t pos = *ppos; 6414 struct ctl_table lctl; 6415 int ret; 6416 6417 /* 6418 * ctl->data points to idev->cnf.disable_ipv6, we should 6419 * not modify it until we get the rtnl lock. 6420 */ 6421 lctl = *ctl; 6422 lctl.data = &val; 6423 6424 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 6425 6426 if (write) 6427 ret = addrconf_disable_ipv6(ctl, valp, val); 6428 if (ret) 6429 *ppos = pos; 6430 return ret; 6431 } 6432 6433 static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write, 6434 void *buffer, size_t *lenp, loff_t *ppos) 6435 { 6436 int *valp = ctl->data; 6437 int ret; 6438 int old, new; 6439 6440 old = *valp; 6441 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 6442 new = *valp; 6443 6444 if (write && old != new) { 6445 struct net *net = ctl->extra2; 6446 6447 if (!rtnl_trylock()) 6448 return restart_syscall(); 6449 6450 if (valp == &net->ipv6.devconf_dflt->proxy_ndp) 6451 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 6452 NETCONFA_PROXY_NEIGH, 6453 NETCONFA_IFINDEX_DEFAULT, 6454 net->ipv6.devconf_dflt); 6455 else if (valp == &net->ipv6.devconf_all->proxy_ndp) 6456 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 6457 NETCONFA_PROXY_NEIGH, 6458 NETCONFA_IFINDEX_ALL, 6459 net->ipv6.devconf_all); 6460 else { 6461 struct inet6_dev *idev = ctl->extra1; 6462 6463 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 6464 NETCONFA_PROXY_NEIGH, 6465 idev->dev->ifindex, 6466 &idev->cnf); 6467 } 6468 rtnl_unlock(); 6469 } 6470 6471 return ret; 6472 } 6473 6474 static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write, 6475 void *buffer, size_t *lenp, 6476 loff_t *ppos) 6477 { 6478 int ret = 0; 6479 u32 new_val; 6480 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1; 6481 struct net *net = (struct net *)ctl->extra2; 6482 struct ctl_table tmp = { 6483 .data = &new_val, 6484 .maxlen = sizeof(new_val), 6485 .mode = ctl->mode, 6486 }; 6487 6488 if (!rtnl_trylock()) 6489 return restart_syscall(); 6490 6491 new_val = *((u32 *)ctl->data); 6492 6493 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos); 6494 if (ret != 0) 6495 goto out; 6496 6497 if (write) { 6498 if (check_addr_gen_mode(new_val) < 0) { 6499 ret = -EINVAL; 6500 goto out; 6501 } 6502 6503 if (idev) { 6504 if (check_stable_privacy(idev, net, new_val) < 0) { 6505 ret = -EINVAL; 6506 goto out; 6507 } 6508 6509 if (idev->cnf.addr_gen_mode != new_val) { 6510 WRITE_ONCE(idev->cnf.addr_gen_mode, new_val); 6511 addrconf_init_auto_addrs(idev->dev); 6512 } 6513 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) { 6514 struct net_device *dev; 6515 6516 WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val); 6517 for_each_netdev(net, dev) { 6518 idev = __in6_dev_get(dev); 6519 if (idev && 6520 idev->cnf.addr_gen_mode != new_val) { 6521 WRITE_ONCE(idev->cnf.addr_gen_mode, 6522 new_val); 6523 addrconf_init_auto_addrs(idev->dev); 6524 } 6525 } 6526 } 6527 6528 WRITE_ONCE(*((u32 *)ctl->data), new_val); 6529 } 6530 6531 out: 6532 rtnl_unlock(); 6533 6534 return ret; 6535 } 6536 6537 static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write, 6538 void *buffer, size_t *lenp, 6539 loff_t *ppos) 6540 { 6541 int err; 6542 struct in6_addr addr; 6543 char str[IPV6_MAX_STRLEN]; 6544 struct ctl_table lctl = *ctl; 6545 struct net *net = ctl->extra2; 6546 struct ipv6_stable_secret *secret = ctl->data; 6547 6548 if (&net->ipv6.devconf_all->stable_secret == ctl->data) 6549 return -EIO; 6550 6551 lctl.maxlen = IPV6_MAX_STRLEN; 6552 lctl.data = str; 6553 6554 if (!rtnl_trylock()) 6555 return restart_syscall(); 6556 6557 if (!write && !secret->initialized) { 6558 err = -EIO; 6559 goto out; 6560 } 6561 6562 err = snprintf(str, sizeof(str), "%pI6", &secret->secret); 6563 if (err >= sizeof(str)) { 6564 err = -EIO; 6565 goto out; 6566 } 6567 6568 err = proc_dostring(&lctl, write, buffer, lenp, ppos); 6569 if (err || !write) 6570 goto out; 6571 6572 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) { 6573 err = -EIO; 6574 goto out; 6575 } 6576 6577 secret->initialized = true; 6578 secret->secret = addr; 6579 6580 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) { 6581 struct net_device *dev; 6582 6583 for_each_netdev(net, dev) { 6584 struct inet6_dev *idev = __in6_dev_get(dev); 6585 6586 if (idev) { 6587 WRITE_ONCE(idev->cnf.addr_gen_mode, 6588 IN6_ADDR_GEN_MODE_STABLE_PRIVACY); 6589 } 6590 } 6591 } else { 6592 struct inet6_dev *idev = ctl->extra1; 6593 6594 WRITE_ONCE(idev->cnf.addr_gen_mode, 6595 IN6_ADDR_GEN_MODE_STABLE_PRIVACY); 6596 } 6597 6598 out: 6599 rtnl_unlock(); 6600 6601 return err; 6602 } 6603 6604 static 6605 int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl, 6606 int write, void *buffer, 6607 size_t *lenp, 6608 loff_t *ppos) 6609 { 6610 int *valp = ctl->data; 6611 int val = *valp; 6612 loff_t pos = *ppos; 6613 struct ctl_table lctl; 6614 int ret; 6615 6616 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown 6617 * we should not modify it until we get the rtnl lock. 6618 */ 6619 lctl = *ctl; 6620 lctl.data = &val; 6621 6622 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 6623 6624 if (write) 6625 ret = addrconf_fixup_linkdown(ctl, valp, val); 6626 if (ret) 6627 *ppos = pos; 6628 return ret; 6629 } 6630 6631 static 6632 void addrconf_set_nopolicy(struct rt6_info *rt, int action) 6633 { 6634 if (rt) { 6635 if (action) 6636 rt->dst.flags |= DST_NOPOLICY; 6637 else 6638 rt->dst.flags &= ~DST_NOPOLICY; 6639 } 6640 } 6641 6642 static 6643 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val) 6644 { 6645 struct inet6_ifaddr *ifa; 6646 6647 read_lock_bh(&idev->lock); 6648 list_for_each_entry(ifa, &idev->addr_list, if_list) { 6649 spin_lock(&ifa->lock); 6650 if (ifa->rt) { 6651 /* host routes only use builtin fib6_nh */ 6652 struct fib6_nh *nh = ifa->rt->fib6_nh; 6653 int cpu; 6654 6655 rcu_read_lock(); 6656 ifa->rt->dst_nopolicy = val ? true : false; 6657 if (nh->rt6i_pcpu) { 6658 for_each_possible_cpu(cpu) { 6659 struct rt6_info **rtp; 6660 6661 rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu); 6662 addrconf_set_nopolicy(*rtp, val); 6663 } 6664 } 6665 rcu_read_unlock(); 6666 } 6667 spin_unlock(&ifa->lock); 6668 } 6669 read_unlock_bh(&idev->lock); 6670 } 6671 6672 static 6673 int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val) 6674 { 6675 struct net *net = (struct net *)ctl->extra2; 6676 struct inet6_dev *idev; 6677 6678 if (valp == &net->ipv6.devconf_dflt->disable_policy) { 6679 WRITE_ONCE(*valp, val); 6680 return 0; 6681 } 6682 6683 if (!rtnl_trylock()) 6684 return restart_syscall(); 6685 6686 WRITE_ONCE(*valp, val); 6687 6688 if (valp == &net->ipv6.devconf_all->disable_policy) { 6689 struct net_device *dev; 6690 6691 for_each_netdev(net, dev) { 6692 idev = __in6_dev_get(dev); 6693 if (idev) 6694 addrconf_disable_policy_idev(idev, val); 6695 } 6696 } else { 6697 idev = (struct inet6_dev *)ctl->extra1; 6698 addrconf_disable_policy_idev(idev, val); 6699 } 6700 6701 rtnl_unlock(); 6702 return 0; 6703 } 6704 6705 static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write, 6706 void *buffer, size_t *lenp, loff_t *ppos) 6707 { 6708 int *valp = ctl->data; 6709 int val = *valp; 6710 loff_t pos = *ppos; 6711 struct ctl_table lctl; 6712 int ret; 6713 6714 lctl = *ctl; 6715 lctl.data = &val; 6716 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 6717 6718 if (write && (*valp != val)) 6719 ret = addrconf_disable_policy(ctl, valp, val); 6720 6721 if (ret) 6722 *ppos = pos; 6723 6724 return ret; 6725 } 6726 6727 static int minus_one = -1; 6728 static const int two_five_five = 255; 6729 static u32 ioam6_if_id_max = U16_MAX; 6730 6731 static const struct ctl_table addrconf_sysctl[] = { 6732 { 6733 .procname = "forwarding", 6734 .data = &ipv6_devconf.forwarding, 6735 .maxlen = sizeof(int), 6736 .mode = 0644, 6737 .proc_handler = addrconf_sysctl_forward, 6738 }, 6739 { 6740 .procname = "hop_limit", 6741 .data = &ipv6_devconf.hop_limit, 6742 .maxlen = sizeof(int), 6743 .mode = 0644, 6744 .proc_handler = proc_dointvec_minmax, 6745 .extra1 = (void *)SYSCTL_ONE, 6746 .extra2 = (void *)&two_five_five, 6747 }, 6748 { 6749 .procname = "mtu", 6750 .data = &ipv6_devconf.mtu6, 6751 .maxlen = sizeof(int), 6752 .mode = 0644, 6753 .proc_handler = addrconf_sysctl_mtu, 6754 }, 6755 { 6756 .procname = "accept_ra", 6757 .data = &ipv6_devconf.accept_ra, 6758 .maxlen = sizeof(int), 6759 .mode = 0644, 6760 .proc_handler = proc_dointvec, 6761 }, 6762 { 6763 .procname = "accept_redirects", 6764 .data = &ipv6_devconf.accept_redirects, 6765 .maxlen = sizeof(int), 6766 .mode = 0644, 6767 .proc_handler = proc_dointvec, 6768 }, 6769 { 6770 .procname = "autoconf", 6771 .data = &ipv6_devconf.autoconf, 6772 .maxlen = sizeof(int), 6773 .mode = 0644, 6774 .proc_handler = proc_dointvec, 6775 }, 6776 { 6777 .procname = "dad_transmits", 6778 .data = &ipv6_devconf.dad_transmits, 6779 .maxlen = sizeof(int), 6780 .mode = 0644, 6781 .proc_handler = proc_dointvec, 6782 }, 6783 { 6784 .procname = "router_solicitations", 6785 .data = &ipv6_devconf.rtr_solicits, 6786 .maxlen = sizeof(int), 6787 .mode = 0644, 6788 .proc_handler = proc_dointvec_minmax, 6789 .extra1 = &minus_one, 6790 }, 6791 { 6792 .procname = "router_solicitation_interval", 6793 .data = &ipv6_devconf.rtr_solicit_interval, 6794 .maxlen = sizeof(int), 6795 .mode = 0644, 6796 .proc_handler = proc_dointvec_jiffies, 6797 }, 6798 { 6799 .procname = "router_solicitation_max_interval", 6800 .data = &ipv6_devconf.rtr_solicit_max_interval, 6801 .maxlen = sizeof(int), 6802 .mode = 0644, 6803 .proc_handler = proc_dointvec_jiffies, 6804 }, 6805 { 6806 .procname = "router_solicitation_delay", 6807 .data = &ipv6_devconf.rtr_solicit_delay, 6808 .maxlen = sizeof(int), 6809 .mode = 0644, 6810 .proc_handler = proc_dointvec_jiffies, 6811 }, 6812 { 6813 .procname = "force_mld_version", 6814 .data = &ipv6_devconf.force_mld_version, 6815 .maxlen = sizeof(int), 6816 .mode = 0644, 6817 .proc_handler = proc_dointvec, 6818 }, 6819 { 6820 .procname = "mldv1_unsolicited_report_interval", 6821 .data = 6822 &ipv6_devconf.mldv1_unsolicited_report_interval, 6823 .maxlen = sizeof(int), 6824 .mode = 0644, 6825 .proc_handler = proc_dointvec_ms_jiffies, 6826 }, 6827 { 6828 .procname = "mldv2_unsolicited_report_interval", 6829 .data = 6830 &ipv6_devconf.mldv2_unsolicited_report_interval, 6831 .maxlen = sizeof(int), 6832 .mode = 0644, 6833 .proc_handler = proc_dointvec_ms_jiffies, 6834 }, 6835 { 6836 .procname = "use_tempaddr", 6837 .data = &ipv6_devconf.use_tempaddr, 6838 .maxlen = sizeof(int), 6839 .mode = 0644, 6840 .proc_handler = proc_dointvec, 6841 }, 6842 { 6843 .procname = "temp_valid_lft", 6844 .data = &ipv6_devconf.temp_valid_lft, 6845 .maxlen = sizeof(int), 6846 .mode = 0644, 6847 .proc_handler = proc_dointvec, 6848 }, 6849 { 6850 .procname = "temp_prefered_lft", 6851 .data = &ipv6_devconf.temp_prefered_lft, 6852 .maxlen = sizeof(int), 6853 .mode = 0644, 6854 .proc_handler = proc_dointvec, 6855 }, 6856 { 6857 .procname = "regen_min_advance", 6858 .data = &ipv6_devconf.regen_min_advance, 6859 .maxlen = sizeof(int), 6860 .mode = 0644, 6861 .proc_handler = proc_dointvec, 6862 }, 6863 { 6864 .procname = "regen_max_retry", 6865 .data = &ipv6_devconf.regen_max_retry, 6866 .maxlen = sizeof(int), 6867 .mode = 0644, 6868 .proc_handler = proc_dointvec, 6869 }, 6870 { 6871 .procname = "max_desync_factor", 6872 .data = &ipv6_devconf.max_desync_factor, 6873 .maxlen = sizeof(int), 6874 .mode = 0644, 6875 .proc_handler = proc_dointvec, 6876 }, 6877 { 6878 .procname = "max_addresses", 6879 .data = &ipv6_devconf.max_addresses, 6880 .maxlen = sizeof(int), 6881 .mode = 0644, 6882 .proc_handler = proc_dointvec, 6883 }, 6884 { 6885 .procname = "accept_ra_defrtr", 6886 .data = &ipv6_devconf.accept_ra_defrtr, 6887 .maxlen = sizeof(int), 6888 .mode = 0644, 6889 .proc_handler = proc_dointvec, 6890 }, 6891 { 6892 .procname = "ra_defrtr_metric", 6893 .data = &ipv6_devconf.ra_defrtr_metric, 6894 .maxlen = sizeof(u32), 6895 .mode = 0644, 6896 .proc_handler = proc_douintvec_minmax, 6897 .extra1 = (void *)SYSCTL_ONE, 6898 }, 6899 { 6900 .procname = "accept_ra_min_hop_limit", 6901 .data = &ipv6_devconf.accept_ra_min_hop_limit, 6902 .maxlen = sizeof(int), 6903 .mode = 0644, 6904 .proc_handler = proc_dointvec, 6905 }, 6906 { 6907 .procname = "accept_ra_min_lft", 6908 .data = &ipv6_devconf.accept_ra_min_lft, 6909 .maxlen = sizeof(int), 6910 .mode = 0644, 6911 .proc_handler = proc_dointvec, 6912 }, 6913 { 6914 .procname = "accept_ra_pinfo", 6915 .data = &ipv6_devconf.accept_ra_pinfo, 6916 .maxlen = sizeof(int), 6917 .mode = 0644, 6918 .proc_handler = proc_dointvec, 6919 }, 6920 { 6921 .procname = "ra_honor_pio_life", 6922 .data = &ipv6_devconf.ra_honor_pio_life, 6923 .maxlen = sizeof(u8), 6924 .mode = 0644, 6925 .proc_handler = proc_dou8vec_minmax, 6926 .extra1 = SYSCTL_ZERO, 6927 .extra2 = SYSCTL_ONE, 6928 }, 6929 #ifdef CONFIG_IPV6_ROUTER_PREF 6930 { 6931 .procname = "accept_ra_rtr_pref", 6932 .data = &ipv6_devconf.accept_ra_rtr_pref, 6933 .maxlen = sizeof(int), 6934 .mode = 0644, 6935 .proc_handler = proc_dointvec, 6936 }, 6937 { 6938 .procname = "router_probe_interval", 6939 .data = &ipv6_devconf.rtr_probe_interval, 6940 .maxlen = sizeof(int), 6941 .mode = 0644, 6942 .proc_handler = proc_dointvec_jiffies, 6943 }, 6944 #ifdef CONFIG_IPV6_ROUTE_INFO 6945 { 6946 .procname = "accept_ra_rt_info_min_plen", 6947 .data = &ipv6_devconf.accept_ra_rt_info_min_plen, 6948 .maxlen = sizeof(int), 6949 .mode = 0644, 6950 .proc_handler = proc_dointvec, 6951 }, 6952 { 6953 .procname = "accept_ra_rt_info_max_plen", 6954 .data = &ipv6_devconf.accept_ra_rt_info_max_plen, 6955 .maxlen = sizeof(int), 6956 .mode = 0644, 6957 .proc_handler = proc_dointvec, 6958 }, 6959 #endif 6960 #endif 6961 { 6962 .procname = "proxy_ndp", 6963 .data = &ipv6_devconf.proxy_ndp, 6964 .maxlen = sizeof(int), 6965 .mode = 0644, 6966 .proc_handler = addrconf_sysctl_proxy_ndp, 6967 }, 6968 { 6969 .procname = "accept_source_route", 6970 .data = &ipv6_devconf.accept_source_route, 6971 .maxlen = sizeof(int), 6972 .mode = 0644, 6973 .proc_handler = proc_dointvec, 6974 }, 6975 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 6976 { 6977 .procname = "optimistic_dad", 6978 .data = &ipv6_devconf.optimistic_dad, 6979 .maxlen = sizeof(int), 6980 .mode = 0644, 6981 .proc_handler = proc_dointvec, 6982 }, 6983 { 6984 .procname = "use_optimistic", 6985 .data = &ipv6_devconf.use_optimistic, 6986 .maxlen = sizeof(int), 6987 .mode = 0644, 6988 .proc_handler = proc_dointvec, 6989 }, 6990 #endif 6991 #ifdef CONFIG_IPV6_MROUTE 6992 { 6993 .procname = "mc_forwarding", 6994 .data = &ipv6_devconf.mc_forwarding, 6995 .maxlen = sizeof(int), 6996 .mode = 0444, 6997 .proc_handler = proc_dointvec, 6998 }, 6999 #endif 7000 { 7001 .procname = "disable_ipv6", 7002 .data = &ipv6_devconf.disable_ipv6, 7003 .maxlen = sizeof(int), 7004 .mode = 0644, 7005 .proc_handler = addrconf_sysctl_disable, 7006 }, 7007 { 7008 .procname = "accept_dad", 7009 .data = &ipv6_devconf.accept_dad, 7010 .maxlen = sizeof(int), 7011 .mode = 0644, 7012 .proc_handler = proc_dointvec, 7013 }, 7014 { 7015 .procname = "force_tllao", 7016 .data = &ipv6_devconf.force_tllao, 7017 .maxlen = sizeof(int), 7018 .mode = 0644, 7019 .proc_handler = proc_dointvec 7020 }, 7021 { 7022 .procname = "ndisc_notify", 7023 .data = &ipv6_devconf.ndisc_notify, 7024 .maxlen = sizeof(int), 7025 .mode = 0644, 7026 .proc_handler = proc_dointvec 7027 }, 7028 { 7029 .procname = "suppress_frag_ndisc", 7030 .data = &ipv6_devconf.suppress_frag_ndisc, 7031 .maxlen = sizeof(int), 7032 .mode = 0644, 7033 .proc_handler = proc_dointvec 7034 }, 7035 { 7036 .procname = "accept_ra_from_local", 7037 .data = &ipv6_devconf.accept_ra_from_local, 7038 .maxlen = sizeof(int), 7039 .mode = 0644, 7040 .proc_handler = proc_dointvec, 7041 }, 7042 { 7043 .procname = "accept_ra_mtu", 7044 .data = &ipv6_devconf.accept_ra_mtu, 7045 .maxlen = sizeof(int), 7046 .mode = 0644, 7047 .proc_handler = proc_dointvec, 7048 }, 7049 { 7050 .procname = "stable_secret", 7051 .data = &ipv6_devconf.stable_secret, 7052 .maxlen = IPV6_MAX_STRLEN, 7053 .mode = 0600, 7054 .proc_handler = addrconf_sysctl_stable_secret, 7055 }, 7056 { 7057 .procname = "use_oif_addrs_only", 7058 .data = &ipv6_devconf.use_oif_addrs_only, 7059 .maxlen = sizeof(int), 7060 .mode = 0644, 7061 .proc_handler = proc_dointvec, 7062 }, 7063 { 7064 .procname = "ignore_routes_with_linkdown", 7065 .data = &ipv6_devconf.ignore_routes_with_linkdown, 7066 .maxlen = sizeof(int), 7067 .mode = 0644, 7068 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown, 7069 }, 7070 { 7071 .procname = "drop_unicast_in_l2_multicast", 7072 .data = &ipv6_devconf.drop_unicast_in_l2_multicast, 7073 .maxlen = sizeof(int), 7074 .mode = 0644, 7075 .proc_handler = proc_dointvec, 7076 }, 7077 { 7078 .procname = "drop_unsolicited_na", 7079 .data = &ipv6_devconf.drop_unsolicited_na, 7080 .maxlen = sizeof(int), 7081 .mode = 0644, 7082 .proc_handler = proc_dointvec, 7083 }, 7084 { 7085 .procname = "keep_addr_on_down", 7086 .data = &ipv6_devconf.keep_addr_on_down, 7087 .maxlen = sizeof(int), 7088 .mode = 0644, 7089 .proc_handler = proc_dointvec, 7090 7091 }, 7092 { 7093 .procname = "seg6_enabled", 7094 .data = &ipv6_devconf.seg6_enabled, 7095 .maxlen = sizeof(int), 7096 .mode = 0644, 7097 .proc_handler = proc_dointvec, 7098 }, 7099 #ifdef CONFIG_IPV6_SEG6_HMAC 7100 { 7101 .procname = "seg6_require_hmac", 7102 .data = &ipv6_devconf.seg6_require_hmac, 7103 .maxlen = sizeof(int), 7104 .mode = 0644, 7105 .proc_handler = proc_dointvec, 7106 }, 7107 #endif 7108 { 7109 .procname = "enhanced_dad", 7110 .data = &ipv6_devconf.enhanced_dad, 7111 .maxlen = sizeof(int), 7112 .mode = 0644, 7113 .proc_handler = proc_dointvec, 7114 }, 7115 { 7116 .procname = "addr_gen_mode", 7117 .data = &ipv6_devconf.addr_gen_mode, 7118 .maxlen = sizeof(int), 7119 .mode = 0644, 7120 .proc_handler = addrconf_sysctl_addr_gen_mode, 7121 }, 7122 { 7123 .procname = "disable_policy", 7124 .data = &ipv6_devconf.disable_policy, 7125 .maxlen = sizeof(int), 7126 .mode = 0644, 7127 .proc_handler = addrconf_sysctl_disable_policy, 7128 }, 7129 { 7130 .procname = "ndisc_tclass", 7131 .data = &ipv6_devconf.ndisc_tclass, 7132 .maxlen = sizeof(int), 7133 .mode = 0644, 7134 .proc_handler = proc_dointvec_minmax, 7135 .extra1 = (void *)SYSCTL_ZERO, 7136 .extra2 = (void *)&two_five_five, 7137 }, 7138 { 7139 .procname = "rpl_seg_enabled", 7140 .data = &ipv6_devconf.rpl_seg_enabled, 7141 .maxlen = sizeof(int), 7142 .mode = 0644, 7143 .proc_handler = proc_dointvec, 7144 }, 7145 { 7146 .procname = "ioam6_enabled", 7147 .data = &ipv6_devconf.ioam6_enabled, 7148 .maxlen = sizeof(u8), 7149 .mode = 0644, 7150 .proc_handler = proc_dou8vec_minmax, 7151 .extra1 = (void *)SYSCTL_ZERO, 7152 .extra2 = (void *)SYSCTL_ONE, 7153 }, 7154 { 7155 .procname = "ioam6_id", 7156 .data = &ipv6_devconf.ioam6_id, 7157 .maxlen = sizeof(u32), 7158 .mode = 0644, 7159 .proc_handler = proc_douintvec_minmax, 7160 .extra1 = (void *)SYSCTL_ZERO, 7161 .extra2 = (void *)&ioam6_if_id_max, 7162 }, 7163 { 7164 .procname = "ioam6_id_wide", 7165 .data = &ipv6_devconf.ioam6_id_wide, 7166 .maxlen = sizeof(u32), 7167 .mode = 0644, 7168 .proc_handler = proc_douintvec, 7169 }, 7170 { 7171 .procname = "ndisc_evict_nocarrier", 7172 .data = &ipv6_devconf.ndisc_evict_nocarrier, 7173 .maxlen = sizeof(u8), 7174 .mode = 0644, 7175 .proc_handler = proc_dou8vec_minmax, 7176 .extra1 = (void *)SYSCTL_ZERO, 7177 .extra2 = (void *)SYSCTL_ONE, 7178 }, 7179 { 7180 .procname = "accept_untracked_na", 7181 .data = &ipv6_devconf.accept_untracked_na, 7182 .maxlen = sizeof(int), 7183 .mode = 0644, 7184 .proc_handler = proc_dointvec_minmax, 7185 .extra1 = SYSCTL_ZERO, 7186 .extra2 = SYSCTL_TWO, 7187 }, 7188 }; 7189 7190 static int __addrconf_sysctl_register(struct net *net, char *dev_name, 7191 struct inet6_dev *idev, struct ipv6_devconf *p) 7192 { 7193 size_t table_size = ARRAY_SIZE(addrconf_sysctl); 7194 int i, ifindex; 7195 struct ctl_table *table; 7196 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; 7197 7198 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT); 7199 if (!table) 7200 goto out; 7201 7202 for (i = 0; i < table_size; i++) { 7203 table[i].data += (char *)p - (char *)&ipv6_devconf; 7204 /* If one of these is already set, then it is not safe to 7205 * overwrite either of them: this makes proc_dointvec_minmax 7206 * usable. 7207 */ 7208 if (!table[i].extra1 && !table[i].extra2) { 7209 table[i].extra1 = idev; /* embedded; no ref */ 7210 table[i].extra2 = net; 7211 } 7212 } 7213 7214 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name); 7215 7216 p->sysctl_header = register_net_sysctl_sz(net, path, table, 7217 table_size); 7218 if (!p->sysctl_header) 7219 goto free; 7220 7221 if (!strcmp(dev_name, "all")) 7222 ifindex = NETCONFA_IFINDEX_ALL; 7223 else if (!strcmp(dev_name, "default")) 7224 ifindex = NETCONFA_IFINDEX_DEFAULT; 7225 else 7226 ifindex = idev->dev->ifindex; 7227 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, 7228 ifindex, p); 7229 return 0; 7230 7231 free: 7232 kfree(table); 7233 out: 7234 return -ENOBUFS; 7235 } 7236 7237 static void __addrconf_sysctl_unregister(struct net *net, 7238 struct ipv6_devconf *p, int ifindex) 7239 { 7240 const struct ctl_table *table; 7241 7242 if (!p->sysctl_header) 7243 return; 7244 7245 table = p->sysctl_header->ctl_table_arg; 7246 unregister_net_sysctl_table(p->sysctl_header); 7247 p->sysctl_header = NULL; 7248 kfree(table); 7249 7250 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL); 7251 } 7252 7253 static int addrconf_sysctl_register(struct inet6_dev *idev) 7254 { 7255 int err; 7256 7257 if (!sysctl_dev_name_is_allowed(idev->dev->name)) 7258 return -EINVAL; 7259 7260 err = neigh_sysctl_register(idev->dev, idev->nd_parms, 7261 &ndisc_ifinfo_sysctl_change); 7262 if (err) 7263 return err; 7264 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 7265 idev, &idev->cnf); 7266 if (err) 7267 neigh_sysctl_unregister(idev->nd_parms); 7268 7269 return err; 7270 } 7271 7272 static void addrconf_sysctl_unregister(struct inet6_dev *idev) 7273 { 7274 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf, 7275 idev->dev->ifindex); 7276 neigh_sysctl_unregister(idev->nd_parms); 7277 } 7278 7279 7280 #endif 7281 7282 static int __net_init addrconf_init_net(struct net *net) 7283 { 7284 int err = -ENOMEM; 7285 struct ipv6_devconf *all, *dflt; 7286 7287 spin_lock_init(&net->ipv6.addrconf_hash_lock); 7288 INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work); 7289 net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE, 7290 sizeof(struct hlist_head), 7291 GFP_KERNEL); 7292 if (!net->ipv6.inet6_addr_lst) 7293 goto err_alloc_addr; 7294 7295 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); 7296 if (!all) 7297 goto err_alloc_all; 7298 7299 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); 7300 if (!dflt) 7301 goto err_alloc_dflt; 7302 7303 if (!net_eq(net, &init_net)) { 7304 switch (net_inherit_devconf()) { 7305 case 1: /* copy from init_net */ 7306 memcpy(all, init_net.ipv6.devconf_all, 7307 sizeof(ipv6_devconf)); 7308 memcpy(dflt, init_net.ipv6.devconf_dflt, 7309 sizeof(ipv6_devconf_dflt)); 7310 break; 7311 case 3: /* copy from the current netns */ 7312 memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all, 7313 sizeof(ipv6_devconf)); 7314 memcpy(dflt, 7315 current->nsproxy->net_ns->ipv6.devconf_dflt, 7316 sizeof(ipv6_devconf_dflt)); 7317 break; 7318 case 0: 7319 case 2: 7320 /* use compiled values */ 7321 break; 7322 } 7323 } 7324 7325 /* these will be inherited by all namespaces */ 7326 dflt->autoconf = ipv6_defaults.autoconf; 7327 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; 7328 7329 dflt->stable_secret.initialized = false; 7330 all->stable_secret.initialized = false; 7331 7332 net->ipv6.devconf_all = all; 7333 net->ipv6.devconf_dflt = dflt; 7334 7335 #ifdef CONFIG_SYSCTL 7336 err = __addrconf_sysctl_register(net, "all", NULL, all); 7337 if (err < 0) 7338 goto err_reg_all; 7339 7340 err = __addrconf_sysctl_register(net, "default", NULL, dflt); 7341 if (err < 0) 7342 goto err_reg_dflt; 7343 #endif 7344 return 0; 7345 7346 #ifdef CONFIG_SYSCTL 7347 err_reg_dflt: 7348 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); 7349 err_reg_all: 7350 kfree(dflt); 7351 net->ipv6.devconf_dflt = NULL; 7352 #endif 7353 err_alloc_dflt: 7354 kfree(all); 7355 net->ipv6.devconf_all = NULL; 7356 err_alloc_all: 7357 kfree(net->ipv6.inet6_addr_lst); 7358 err_alloc_addr: 7359 return err; 7360 } 7361 7362 static void __net_exit addrconf_exit_net(struct net *net) 7363 { 7364 int i; 7365 7366 #ifdef CONFIG_SYSCTL 7367 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt, 7368 NETCONFA_IFINDEX_DEFAULT); 7369 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all, 7370 NETCONFA_IFINDEX_ALL); 7371 #endif 7372 kfree(net->ipv6.devconf_dflt); 7373 net->ipv6.devconf_dflt = NULL; 7374 kfree(net->ipv6.devconf_all); 7375 net->ipv6.devconf_all = NULL; 7376 7377 cancel_delayed_work_sync(&net->ipv6.addr_chk_work); 7378 /* 7379 * Check hash table, then free it. 7380 */ 7381 for (i = 0; i < IN6_ADDR_HSIZE; i++) 7382 WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i])); 7383 7384 kfree(net->ipv6.inet6_addr_lst); 7385 net->ipv6.inet6_addr_lst = NULL; 7386 } 7387 7388 static struct pernet_operations addrconf_ops = { 7389 .init = addrconf_init_net, 7390 .exit = addrconf_exit_net, 7391 }; 7392 7393 static struct rtnl_af_ops inet6_ops __read_mostly = { 7394 .family = AF_INET6, 7395 .fill_link_af = inet6_fill_link_af, 7396 .get_link_af_size = inet6_get_link_af_size, 7397 .validate_link_af = inet6_validate_link_af, 7398 .set_link_af = inet6_set_link_af, 7399 }; 7400 7401 /* 7402 * Init / cleanup code 7403 */ 7404 7405 int __init addrconf_init(void) 7406 { 7407 struct inet6_dev *idev; 7408 int err; 7409 7410 err = ipv6_addr_label_init(); 7411 if (err < 0) { 7412 pr_crit("%s: cannot initialize default policy table: %d\n", 7413 __func__, err); 7414 goto out; 7415 } 7416 7417 err = register_pernet_subsys(&addrconf_ops); 7418 if (err < 0) 7419 goto out_addrlabel; 7420 7421 /* All works using addrconf_wq need to lock rtnl. */ 7422 addrconf_wq = create_singlethread_workqueue("ipv6_addrconf"); 7423 if (!addrconf_wq) { 7424 err = -ENOMEM; 7425 goto out_nowq; 7426 } 7427 7428 rtnl_lock(); 7429 idev = ipv6_add_dev(blackhole_netdev); 7430 rtnl_unlock(); 7431 if (IS_ERR(idev)) { 7432 err = PTR_ERR(idev); 7433 goto errlo; 7434 } 7435 7436 ip6_route_init_special_entries(); 7437 7438 register_netdevice_notifier(&ipv6_dev_notf); 7439 7440 addrconf_verify(&init_net); 7441 7442 rtnl_af_register(&inet6_ops); 7443 7444 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK, 7445 NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED); 7446 if (err < 0) 7447 goto errout; 7448 7449 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR, 7450 inet6_rtm_newaddr, NULL, 0); 7451 if (err < 0) 7452 goto errout; 7453 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR, 7454 inet6_rtm_deladdr, NULL, 0); 7455 if (err < 0) 7456 goto errout; 7457 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR, 7458 inet6_rtm_getaddr, inet6_dump_ifaddr, 7459 RTNL_FLAG_DOIT_UNLOCKED | 7460 RTNL_FLAG_DUMP_UNLOCKED); 7461 if (err < 0) 7462 goto errout; 7463 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST, 7464 NULL, inet6_dump_ifmcaddr, 7465 RTNL_FLAG_DUMP_UNLOCKED); 7466 if (err < 0) 7467 goto errout; 7468 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST, 7469 NULL, inet6_dump_ifacaddr, 7470 RTNL_FLAG_DUMP_UNLOCKED); 7471 if (err < 0) 7472 goto errout; 7473 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF, 7474 inet6_netconf_get_devconf, 7475 inet6_netconf_dump_devconf, 7476 RTNL_FLAG_DOIT_UNLOCKED | 7477 RTNL_FLAG_DUMP_UNLOCKED); 7478 if (err < 0) 7479 goto errout; 7480 err = ipv6_addr_label_rtnl_register(); 7481 if (err < 0) 7482 goto errout; 7483 7484 return 0; 7485 errout: 7486 rtnl_unregister_all(PF_INET6); 7487 rtnl_af_unregister(&inet6_ops); 7488 unregister_netdevice_notifier(&ipv6_dev_notf); 7489 errlo: 7490 destroy_workqueue(addrconf_wq); 7491 out_nowq: 7492 unregister_pernet_subsys(&addrconf_ops); 7493 out_addrlabel: 7494 ipv6_addr_label_cleanup(); 7495 out: 7496 return err; 7497 } 7498 7499 void addrconf_cleanup(void) 7500 { 7501 struct net_device *dev; 7502 7503 unregister_netdevice_notifier(&ipv6_dev_notf); 7504 unregister_pernet_subsys(&addrconf_ops); 7505 ipv6_addr_label_cleanup(); 7506 7507 rtnl_af_unregister(&inet6_ops); 7508 7509 rtnl_lock(); 7510 7511 /* clean dev list */ 7512 for_each_netdev(&init_net, dev) { 7513 if (__in6_dev_get(dev) == NULL) 7514 continue; 7515 addrconf_ifdown(dev, true); 7516 } 7517 addrconf_ifdown(init_net.loopback_dev, true); 7518 7519 rtnl_unlock(); 7520 7521 destroy_workqueue(addrconf_wq); 7522 } 7523