1 /* 2 * Linux INET6 implementation 3 * FIB front-end. 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 /* Changes: 15 * 16 * YOSHIFUJI Hideaki @USAGI 17 * reworked default router selection. 18 * - respect outgoing interface 19 * - select from (probably) reachable routers (i.e. 20 * routers in REACHABLE, STALE, DELAY or PROBE states). 21 * - always select the same router if it is (probably) 22 * reachable. otherwise, round-robin the list. 23 * Ville Nuorvala 24 * Fixed routing subtrees. 25 */ 26 27 #define pr_fmt(fmt) "IPv6: " fmt 28 29 #include <linux/capability.h> 30 #include <linux/errno.h> 31 #include <linux/export.h> 32 #include <linux/types.h> 33 #include <linux/times.h> 34 #include <linux/socket.h> 35 #include <linux/sockios.h> 36 #include <linux/net.h> 37 #include <linux/route.h> 38 #include <linux/netdevice.h> 39 #include <linux/in6.h> 40 #include <linux/mroute6.h> 41 #include <linux/init.h> 42 #include <linux/if_arp.h> 43 #include <linux/proc_fs.h> 44 #include <linux/seq_file.h> 45 #include <linux/nsproxy.h> 46 #include <linux/slab.h> 47 #include <net/net_namespace.h> 48 #include <net/snmp.h> 49 #include <net/ipv6.h> 50 #include <net/ip6_fib.h> 51 #include <net/ip6_route.h> 52 #include <net/ndisc.h> 53 #include <net/addrconf.h> 54 #include <net/tcp.h> 55 #include <linux/rtnetlink.h> 56 #include <net/dst.h> 57 #include <net/dst_metadata.h> 58 #include <net/xfrm.h> 59 #include <net/netevent.h> 60 #include <net/netlink.h> 61 #include <net/nexthop.h> 62 #include <net/lwtunnel.h> 63 #include <net/ip_tunnels.h> 64 #include <net/l3mdev.h> 65 #include <trace/events/fib6.h> 66 67 #include <linux/uaccess.h> 68 69 #ifdef CONFIG_SYSCTL 70 #include <linux/sysctl.h> 71 #endif 72 73 enum rt6_nud_state { 74 RT6_NUD_FAIL_HARD = -3, 75 RT6_NUD_FAIL_PROBE = -2, 76 RT6_NUD_FAIL_DO_RR = -1, 77 RT6_NUD_SUCCEED = 1 78 }; 79 80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort); 81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 82 static unsigned int ip6_default_advmss(const struct dst_entry *dst); 83 static unsigned int ip6_mtu(const struct dst_entry *dst); 84 static struct dst_entry *ip6_negative_advice(struct dst_entry *); 85 static void ip6_dst_destroy(struct dst_entry *); 86 static void ip6_dst_ifdown(struct dst_entry *, 87 struct net_device *dev, int how); 88 static int ip6_dst_gc(struct dst_ops *ops); 89 90 static int ip6_pkt_discard(struct sk_buff *skb); 91 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); 92 static int ip6_pkt_prohibit(struct sk_buff *skb); 93 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 94 static void ip6_link_failure(struct sk_buff *skb); 95 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 96 struct sk_buff *skb, u32 mtu); 97 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 98 struct sk_buff *skb); 99 static void rt6_dst_from_metrics_check(struct rt6_info *rt); 100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict); 101 static size_t rt6_nlmsg_size(struct rt6_info *rt); 102 static int rt6_fill_node(struct net *net, 103 struct sk_buff *skb, struct rt6_info *rt, 104 struct in6_addr *dst, struct in6_addr *src, 105 int iif, int type, u32 portid, u32 seq, 106 unsigned int flags); 107 108 #ifdef CONFIG_IPV6_ROUTE_INFO 109 static struct rt6_info *rt6_add_route_info(struct net *net, 110 const struct in6_addr *prefix, int prefixlen, 111 const struct in6_addr *gwaddr, 112 struct net_device *dev, 113 unsigned int pref); 114 static struct rt6_info *rt6_get_route_info(struct net *net, 115 const struct in6_addr *prefix, int prefixlen, 116 const struct in6_addr *gwaddr, 117 struct net_device *dev); 118 #endif 119 120 struct uncached_list { 121 spinlock_t lock; 122 struct list_head head; 123 }; 124 125 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 126 127 static void rt6_uncached_list_add(struct rt6_info *rt) 128 { 129 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 130 131 rt->rt6i_uncached_list = ul; 132 133 spin_lock_bh(&ul->lock); 134 list_add_tail(&rt->rt6i_uncached, &ul->head); 135 spin_unlock_bh(&ul->lock); 136 } 137 138 static void rt6_uncached_list_del(struct rt6_info *rt) 139 { 140 if (!list_empty(&rt->rt6i_uncached)) { 141 struct uncached_list *ul = rt->rt6i_uncached_list; 142 143 spin_lock_bh(&ul->lock); 144 list_del(&rt->rt6i_uncached); 145 spin_unlock_bh(&ul->lock); 146 } 147 } 148 149 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) 150 { 151 struct net_device *loopback_dev = net->loopback_dev; 152 int cpu; 153 154 if (dev == loopback_dev) 155 return; 156 157 for_each_possible_cpu(cpu) { 158 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 159 struct rt6_info *rt; 160 161 spin_lock_bh(&ul->lock); 162 list_for_each_entry(rt, &ul->head, rt6i_uncached) { 163 struct inet6_dev *rt_idev = rt->rt6i_idev; 164 struct net_device *rt_dev = rt->dst.dev; 165 166 if (rt_idev->dev == dev) { 167 rt->rt6i_idev = in6_dev_get(loopback_dev); 168 in6_dev_put(rt_idev); 169 } 170 171 if (rt_dev == dev) { 172 rt->dst.dev = loopback_dev; 173 dev_hold(rt->dst.dev); 174 dev_put(rt_dev); 175 } 176 } 177 spin_unlock_bh(&ul->lock); 178 } 179 } 180 181 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt) 182 { 183 return dst_metrics_write_ptr(rt->dst.from); 184 } 185 186 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 187 { 188 struct rt6_info *rt = (struct rt6_info *)dst; 189 190 if (rt->rt6i_flags & RTF_PCPU) 191 return rt6_pcpu_cow_metrics(rt); 192 else if (rt->rt6i_flags & RTF_CACHE) 193 return NULL; 194 else 195 return dst_cow_metrics_generic(dst, old); 196 } 197 198 static inline const void *choose_neigh_daddr(struct rt6_info *rt, 199 struct sk_buff *skb, 200 const void *daddr) 201 { 202 struct in6_addr *p = &rt->rt6i_gateway; 203 204 if (!ipv6_addr_any(p)) 205 return (const void *) p; 206 else if (skb) 207 return &ipv6_hdr(skb)->daddr; 208 return daddr; 209 } 210 211 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, 212 struct sk_buff *skb, 213 const void *daddr) 214 { 215 struct rt6_info *rt = (struct rt6_info *) dst; 216 struct neighbour *n; 217 218 daddr = choose_neigh_daddr(rt, skb, daddr); 219 n = __ipv6_neigh_lookup(dst->dev, daddr); 220 if (n) 221 return n; 222 return neigh_create(&nd_tbl, daddr, dst->dev); 223 } 224 225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 226 { 227 struct net_device *dev = dst->dev; 228 struct rt6_info *rt = (struct rt6_info *)dst; 229 230 daddr = choose_neigh_daddr(rt, NULL, daddr); 231 if (!daddr) 232 return; 233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 234 return; 235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) 236 return; 237 __ipv6_confirm_neigh(dev, daddr); 238 } 239 240 static struct dst_ops ip6_dst_ops_template = { 241 .family = AF_INET6, 242 .gc = ip6_dst_gc, 243 .gc_thresh = 1024, 244 .check = ip6_dst_check, 245 .default_advmss = ip6_default_advmss, 246 .mtu = ip6_mtu, 247 .cow_metrics = ipv6_cow_metrics, 248 .destroy = ip6_dst_destroy, 249 .ifdown = ip6_dst_ifdown, 250 .negative_advice = ip6_negative_advice, 251 .link_failure = ip6_link_failure, 252 .update_pmtu = ip6_rt_update_pmtu, 253 .redirect = rt6_do_redirect, 254 .local_out = __ip6_local_out, 255 .neigh_lookup = ip6_neigh_lookup, 256 .confirm_neigh = ip6_confirm_neigh, 257 }; 258 259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) 260 { 261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 262 263 return mtu ? : dst->dev->mtu; 264 } 265 266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 267 struct sk_buff *skb, u32 mtu) 268 { 269 } 270 271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 272 struct sk_buff *skb) 273 { 274 } 275 276 static struct dst_ops ip6_dst_blackhole_ops = { 277 .family = AF_INET6, 278 .destroy = ip6_dst_destroy, 279 .check = ip6_dst_check, 280 .mtu = ip6_blackhole_mtu, 281 .default_advmss = ip6_default_advmss, 282 .update_pmtu = ip6_rt_blackhole_update_pmtu, 283 .redirect = ip6_rt_blackhole_redirect, 284 .cow_metrics = dst_cow_metrics_generic, 285 .neigh_lookup = ip6_neigh_lookup, 286 }; 287 288 static const u32 ip6_template_metrics[RTAX_MAX] = { 289 [RTAX_HOPLIMIT - 1] = 0, 290 }; 291 292 static const struct rt6_info ip6_null_entry_template = { 293 .dst = { 294 .__refcnt = ATOMIC_INIT(1), 295 .__use = 1, 296 .obsolete = DST_OBSOLETE_FORCE_CHK, 297 .error = -ENETUNREACH, 298 .input = ip6_pkt_discard, 299 .output = ip6_pkt_discard_out, 300 }, 301 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 302 .rt6i_protocol = RTPROT_KERNEL, 303 .rt6i_metric = ~(u32) 0, 304 .rt6i_ref = ATOMIC_INIT(1), 305 }; 306 307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 308 309 static const struct rt6_info ip6_prohibit_entry_template = { 310 .dst = { 311 .__refcnt = ATOMIC_INIT(1), 312 .__use = 1, 313 .obsolete = DST_OBSOLETE_FORCE_CHK, 314 .error = -EACCES, 315 .input = ip6_pkt_prohibit, 316 .output = ip6_pkt_prohibit_out, 317 }, 318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 319 .rt6i_protocol = RTPROT_KERNEL, 320 .rt6i_metric = ~(u32) 0, 321 .rt6i_ref = ATOMIC_INIT(1), 322 }; 323 324 static const struct rt6_info ip6_blk_hole_entry_template = { 325 .dst = { 326 .__refcnt = ATOMIC_INIT(1), 327 .__use = 1, 328 .obsolete = DST_OBSOLETE_FORCE_CHK, 329 .error = -EINVAL, 330 .input = dst_discard, 331 .output = dst_discard_out, 332 }, 333 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 334 .rt6i_protocol = RTPROT_KERNEL, 335 .rt6i_metric = ~(u32) 0, 336 .rt6i_ref = ATOMIC_INIT(1), 337 }; 338 339 #endif 340 341 static void rt6_info_init(struct rt6_info *rt) 342 { 343 struct dst_entry *dst = &rt->dst; 344 345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); 346 INIT_LIST_HEAD(&rt->rt6i_siblings); 347 INIT_LIST_HEAD(&rt->rt6i_uncached); 348 } 349 350 /* allocate dst with ip6_dst_ops */ 351 static struct rt6_info *__ip6_dst_alloc(struct net *net, 352 struct net_device *dev, 353 int flags) 354 { 355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 356 1, DST_OBSOLETE_FORCE_CHK, flags); 357 358 if (rt) 359 rt6_info_init(rt); 360 361 return rt; 362 } 363 364 struct rt6_info *ip6_dst_alloc(struct net *net, 365 struct net_device *dev, 366 int flags) 367 { 368 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 369 370 if (rt) { 371 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 372 if (rt->rt6i_pcpu) { 373 int cpu; 374 375 for_each_possible_cpu(cpu) { 376 struct rt6_info **p; 377 378 p = per_cpu_ptr(rt->rt6i_pcpu, cpu); 379 /* no one shares rt */ 380 *p = NULL; 381 } 382 } else { 383 dst_release_immediate(&rt->dst); 384 return NULL; 385 } 386 } 387 388 return rt; 389 } 390 EXPORT_SYMBOL(ip6_dst_alloc); 391 392 static void ip6_dst_destroy(struct dst_entry *dst) 393 { 394 struct rt6_info *rt = (struct rt6_info *)dst; 395 struct dst_entry *from = dst->from; 396 struct inet6_dev *idev; 397 398 dst_destroy_metrics_generic(dst); 399 free_percpu(rt->rt6i_pcpu); 400 rt6_uncached_list_del(rt); 401 402 idev = rt->rt6i_idev; 403 if (idev) { 404 rt->rt6i_idev = NULL; 405 in6_dev_put(idev); 406 } 407 408 dst->from = NULL; 409 dst_release(from); 410 } 411 412 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, 413 int how) 414 { 415 struct rt6_info *rt = (struct rt6_info *)dst; 416 struct inet6_dev *idev = rt->rt6i_idev; 417 struct net_device *loopback_dev = 418 dev_net(dev)->loopback_dev; 419 420 if (dev != loopback_dev) { 421 if (idev && idev->dev == dev) { 422 struct inet6_dev *loopback_idev = 423 in6_dev_get(loopback_dev); 424 if (loopback_idev) { 425 rt->rt6i_idev = loopback_idev; 426 in6_dev_put(idev); 427 } 428 } 429 } 430 } 431 432 static bool __rt6_check_expired(const struct rt6_info *rt) 433 { 434 if (rt->rt6i_flags & RTF_EXPIRES) 435 return time_after(jiffies, rt->dst.expires); 436 else 437 return false; 438 } 439 440 static bool rt6_check_expired(const struct rt6_info *rt) 441 { 442 if (rt->rt6i_flags & RTF_EXPIRES) { 443 if (time_after(jiffies, rt->dst.expires)) 444 return true; 445 } else if (rt->dst.from) { 446 return rt6_check_expired((struct rt6_info *) rt->dst.from); 447 } 448 return false; 449 } 450 451 /* Multipath route selection: 452 * Hash based function using packet header and flowlabel. 453 * Adapted from fib_info_hashfn() 454 */ 455 static int rt6_info_hash_nhsfn(unsigned int candidate_count, 456 const struct flowi6 *fl6) 457 { 458 return get_hash_from_flowi6(fl6) % candidate_count; 459 } 460 461 static struct rt6_info *rt6_multipath_select(struct rt6_info *match, 462 struct flowi6 *fl6, int oif, 463 int strict) 464 { 465 struct rt6_info *sibling, *next_sibling; 466 int route_choosen; 467 468 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6); 469 /* Don't change the route, if route_choosen == 0 470 * (siblings does not include ourself) 471 */ 472 if (route_choosen) 473 list_for_each_entry_safe(sibling, next_sibling, 474 &match->rt6i_siblings, rt6i_siblings) { 475 route_choosen--; 476 if (route_choosen == 0) { 477 if (rt6_score_route(sibling, oif, strict) < 0) 478 break; 479 match = sibling; 480 break; 481 } 482 } 483 return match; 484 } 485 486 /* 487 * Route lookup. Any table->tb6_lock is implied. 488 */ 489 490 static inline struct rt6_info *rt6_device_match(struct net *net, 491 struct rt6_info *rt, 492 const struct in6_addr *saddr, 493 int oif, 494 int flags) 495 { 496 struct rt6_info *local = NULL; 497 struct rt6_info *sprt; 498 499 if (!oif && ipv6_addr_any(saddr)) 500 goto out; 501 502 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { 503 struct net_device *dev = sprt->dst.dev; 504 505 if (oif) { 506 if (dev->ifindex == oif) 507 return sprt; 508 if (dev->flags & IFF_LOOPBACK) { 509 if (!sprt->rt6i_idev || 510 sprt->rt6i_idev->dev->ifindex != oif) { 511 if (flags & RT6_LOOKUP_F_IFACE) 512 continue; 513 if (local && 514 local->rt6i_idev->dev->ifindex == oif) 515 continue; 516 } 517 local = sprt; 518 } 519 } else { 520 if (ipv6_chk_addr(net, saddr, dev, 521 flags & RT6_LOOKUP_F_IFACE)) 522 return sprt; 523 } 524 } 525 526 if (oif) { 527 if (local) 528 return local; 529 530 if (flags & RT6_LOOKUP_F_IFACE) 531 return net->ipv6.ip6_null_entry; 532 } 533 out: 534 return rt; 535 } 536 537 #ifdef CONFIG_IPV6_ROUTER_PREF 538 struct __rt6_probe_work { 539 struct work_struct work; 540 struct in6_addr target; 541 struct net_device *dev; 542 }; 543 544 static void rt6_probe_deferred(struct work_struct *w) 545 { 546 struct in6_addr mcaddr; 547 struct __rt6_probe_work *work = 548 container_of(w, struct __rt6_probe_work, work); 549 550 addrconf_addr_solict_mult(&work->target, &mcaddr); 551 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); 552 dev_put(work->dev); 553 kfree(work); 554 } 555 556 static void rt6_probe(struct rt6_info *rt) 557 { 558 struct __rt6_probe_work *work; 559 struct neighbour *neigh; 560 /* 561 * Okay, this does not seem to be appropriate 562 * for now, however, we need to check if it 563 * is really so; aka Router Reachability Probing. 564 * 565 * Router Reachability Probe MUST be rate-limited 566 * to no more than one per minute. 567 */ 568 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) 569 return; 570 rcu_read_lock_bh(); 571 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); 572 if (neigh) { 573 if (neigh->nud_state & NUD_VALID) 574 goto out; 575 576 work = NULL; 577 write_lock(&neigh->lock); 578 if (!(neigh->nud_state & NUD_VALID) && 579 time_after(jiffies, 580 neigh->updated + 581 rt->rt6i_idev->cnf.rtr_probe_interval)) { 582 work = kmalloc(sizeof(*work), GFP_ATOMIC); 583 if (work) 584 __neigh_set_probe_once(neigh); 585 } 586 write_unlock(&neigh->lock); 587 } else { 588 work = kmalloc(sizeof(*work), GFP_ATOMIC); 589 } 590 591 if (work) { 592 INIT_WORK(&work->work, rt6_probe_deferred); 593 work->target = rt->rt6i_gateway; 594 dev_hold(rt->dst.dev); 595 work->dev = rt->dst.dev; 596 schedule_work(&work->work); 597 } 598 599 out: 600 rcu_read_unlock_bh(); 601 } 602 #else 603 static inline void rt6_probe(struct rt6_info *rt) 604 { 605 } 606 #endif 607 608 /* 609 * Default Router Selection (RFC 2461 6.3.6) 610 */ 611 static inline int rt6_check_dev(struct rt6_info *rt, int oif) 612 { 613 struct net_device *dev = rt->dst.dev; 614 if (!oif || dev->ifindex == oif) 615 return 2; 616 if ((dev->flags & IFF_LOOPBACK) && 617 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) 618 return 1; 619 return 0; 620 } 621 622 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) 623 { 624 struct neighbour *neigh; 625 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; 626 627 if (rt->rt6i_flags & RTF_NONEXTHOP || 628 !(rt->rt6i_flags & RTF_GATEWAY)) 629 return RT6_NUD_SUCCEED; 630 631 rcu_read_lock_bh(); 632 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); 633 if (neigh) { 634 read_lock(&neigh->lock); 635 if (neigh->nud_state & NUD_VALID) 636 ret = RT6_NUD_SUCCEED; 637 #ifdef CONFIG_IPV6_ROUTER_PREF 638 else if (!(neigh->nud_state & NUD_FAILED)) 639 ret = RT6_NUD_SUCCEED; 640 else 641 ret = RT6_NUD_FAIL_PROBE; 642 #endif 643 read_unlock(&neigh->lock); 644 } else { 645 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? 646 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; 647 } 648 rcu_read_unlock_bh(); 649 650 return ret; 651 } 652 653 static int rt6_score_route(struct rt6_info *rt, int oif, 654 int strict) 655 { 656 int m; 657 658 m = rt6_check_dev(rt, oif); 659 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 660 return RT6_NUD_FAIL_HARD; 661 #ifdef CONFIG_IPV6_ROUTER_PREF 662 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 663 #endif 664 if (strict & RT6_LOOKUP_F_REACHABLE) { 665 int n = rt6_check_neigh(rt); 666 if (n < 0) 667 return n; 668 } 669 return m; 670 } 671 672 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, 673 int *mpri, struct rt6_info *match, 674 bool *do_rr) 675 { 676 int m; 677 bool match_do_rr = false; 678 struct inet6_dev *idev = rt->rt6i_idev; 679 struct net_device *dev = rt->dst.dev; 680 681 if (dev && !netif_carrier_ok(dev) && 682 idev->cnf.ignore_routes_with_linkdown && 683 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) 684 goto out; 685 686 if (rt6_check_expired(rt)) 687 goto out; 688 689 m = rt6_score_route(rt, oif, strict); 690 if (m == RT6_NUD_FAIL_DO_RR) { 691 match_do_rr = true; 692 m = 0; /* lowest valid score */ 693 } else if (m == RT6_NUD_FAIL_HARD) { 694 goto out; 695 } 696 697 if (strict & RT6_LOOKUP_F_REACHABLE) 698 rt6_probe(rt); 699 700 /* note that m can be RT6_NUD_FAIL_PROBE at this point */ 701 if (m > *mpri) { 702 *do_rr = match_do_rr; 703 *mpri = m; 704 match = rt; 705 } 706 out: 707 return match; 708 } 709 710 static struct rt6_info *find_rr_leaf(struct fib6_node *fn, 711 struct rt6_info *rr_head, 712 u32 metric, int oif, int strict, 713 bool *do_rr) 714 { 715 struct rt6_info *rt, *match, *cont; 716 int mpri = -1; 717 718 match = NULL; 719 cont = NULL; 720 for (rt = rr_head; rt; rt = rt->dst.rt6_next) { 721 if (rt->rt6i_metric != metric) { 722 cont = rt; 723 break; 724 } 725 726 match = find_match(rt, oif, strict, &mpri, match, do_rr); 727 } 728 729 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) { 730 if (rt->rt6i_metric != metric) { 731 cont = rt; 732 break; 733 } 734 735 match = find_match(rt, oif, strict, &mpri, match, do_rr); 736 } 737 738 if (match || !cont) 739 return match; 740 741 for (rt = cont; rt; rt = rt->dst.rt6_next) 742 match = find_match(rt, oif, strict, &mpri, match, do_rr); 743 744 return match; 745 } 746 747 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) 748 { 749 struct rt6_info *match, *rt0; 750 struct net *net; 751 bool do_rr = false; 752 753 rt0 = fn->rr_ptr; 754 if (!rt0) 755 fn->rr_ptr = rt0 = fn->leaf; 756 757 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict, 758 &do_rr); 759 760 if (do_rr) { 761 struct rt6_info *next = rt0->dst.rt6_next; 762 763 /* no entries matched; do round-robin */ 764 if (!next || next->rt6i_metric != rt0->rt6i_metric) 765 next = fn->leaf; 766 767 if (next != rt0) 768 fn->rr_ptr = next; 769 } 770 771 net = dev_net(rt0->dst.dev); 772 return match ? match : net->ipv6.ip6_null_entry; 773 } 774 775 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) 776 { 777 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); 778 } 779 780 #ifdef CONFIG_IPV6_ROUTE_INFO 781 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 782 const struct in6_addr *gwaddr) 783 { 784 struct net *net = dev_net(dev); 785 struct route_info *rinfo = (struct route_info *) opt; 786 struct in6_addr prefix_buf, *prefix; 787 unsigned int pref; 788 unsigned long lifetime; 789 struct rt6_info *rt; 790 791 if (len < sizeof(struct route_info)) { 792 return -EINVAL; 793 } 794 795 /* Sanity check for prefix_len and length */ 796 if (rinfo->length > 3) { 797 return -EINVAL; 798 } else if (rinfo->prefix_len > 128) { 799 return -EINVAL; 800 } else if (rinfo->prefix_len > 64) { 801 if (rinfo->length < 2) { 802 return -EINVAL; 803 } 804 } else if (rinfo->prefix_len > 0) { 805 if (rinfo->length < 1) { 806 return -EINVAL; 807 } 808 } 809 810 pref = rinfo->route_pref; 811 if (pref == ICMPV6_ROUTER_PREF_INVALID) 812 return -EINVAL; 813 814 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); 815 816 if (rinfo->length == 3) 817 prefix = (struct in6_addr *)rinfo->prefix; 818 else { 819 /* this function is safe */ 820 ipv6_addr_prefix(&prefix_buf, 821 (struct in6_addr *)rinfo->prefix, 822 rinfo->prefix_len); 823 prefix = &prefix_buf; 824 } 825 826 if (rinfo->prefix_len == 0) 827 rt = rt6_get_dflt_router(gwaddr, dev); 828 else 829 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 830 gwaddr, dev); 831 832 if (rt && !lifetime) { 833 ip6_del_rt(rt); 834 rt = NULL; 835 } 836 837 if (!rt && lifetime) 838 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, 839 dev, pref); 840 else if (rt) 841 rt->rt6i_flags = RTF_ROUTEINFO | 842 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 843 844 if (rt) { 845 if (!addrconf_finite_timeout(lifetime)) 846 rt6_clean_expires(rt); 847 else 848 rt6_set_expires(rt, jiffies + HZ * lifetime); 849 850 ip6_rt_put(rt); 851 } 852 return 0; 853 } 854 #endif 855 856 static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 857 struct in6_addr *saddr) 858 { 859 struct fib6_node *pn; 860 while (1) { 861 if (fn->fn_flags & RTN_TL_ROOT) 862 return NULL; 863 pn = fn->parent; 864 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) 865 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); 866 else 867 fn = pn; 868 if (fn->fn_flags & RTN_RTINFO) 869 return fn; 870 } 871 } 872 873 static struct rt6_info *ip6_pol_route_lookup(struct net *net, 874 struct fib6_table *table, 875 struct flowi6 *fl6, int flags) 876 { 877 struct fib6_node *fn; 878 struct rt6_info *rt; 879 880 read_lock_bh(&table->tb6_lock); 881 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 882 restart: 883 rt = fn->leaf; 884 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); 885 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) 886 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); 887 if (rt == net->ipv6.ip6_null_entry) { 888 fn = fib6_backtrack(fn, &fl6->saddr); 889 if (fn) 890 goto restart; 891 } 892 dst_use(&rt->dst, jiffies); 893 read_unlock_bh(&table->tb6_lock); 894 895 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); 896 897 return rt; 898 899 } 900 901 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, 902 int flags) 903 { 904 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); 905 } 906 EXPORT_SYMBOL_GPL(ip6_route_lookup); 907 908 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, 909 const struct in6_addr *saddr, int oif, int strict) 910 { 911 struct flowi6 fl6 = { 912 .flowi6_oif = oif, 913 .daddr = *daddr, 914 }; 915 struct dst_entry *dst; 916 int flags = strict ? RT6_LOOKUP_F_IFACE : 0; 917 918 if (saddr) { 919 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 920 flags |= RT6_LOOKUP_F_HAS_SADDR; 921 } 922 923 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); 924 if (dst->error == 0) 925 return (struct rt6_info *) dst; 926 927 dst_release(dst); 928 929 return NULL; 930 } 931 EXPORT_SYMBOL(rt6_lookup); 932 933 /* ip6_ins_rt is called with FREE table->tb6_lock. 934 * It takes new route entry, the addition fails by any reason the 935 * route is released. 936 * Caller must hold dst before calling it. 937 */ 938 939 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, 940 struct mx6_config *mxc, 941 struct netlink_ext_ack *extack) 942 { 943 int err; 944 struct fib6_table *table; 945 946 table = rt->rt6i_table; 947 write_lock_bh(&table->tb6_lock); 948 err = fib6_add(&table->tb6_root, rt, info, mxc, extack); 949 write_unlock_bh(&table->tb6_lock); 950 951 return err; 952 } 953 954 int ip6_ins_rt(struct rt6_info *rt) 955 { 956 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; 957 struct mx6_config mxc = { .mx = NULL, }; 958 959 /* Hold dst to account for the reference from the fib6 tree */ 960 dst_hold(&rt->dst); 961 return __ip6_ins_rt(rt, &info, &mxc, NULL); 962 } 963 964 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, 965 const struct in6_addr *daddr, 966 const struct in6_addr *saddr) 967 { 968 struct rt6_info *rt; 969 970 /* 971 * Clone the route. 972 */ 973 974 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 975 ort = (struct rt6_info *)ort->dst.from; 976 977 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0); 978 979 if (!rt) 980 return NULL; 981 982 ip6_rt_copy_init(rt, ort); 983 rt->rt6i_flags |= RTF_CACHE; 984 rt->rt6i_metric = 0; 985 rt->dst.flags |= DST_HOST; 986 rt->rt6i_dst.addr = *daddr; 987 rt->rt6i_dst.plen = 128; 988 989 if (!rt6_is_gw_or_nonexthop(ort)) { 990 if (ort->rt6i_dst.plen != 128 && 991 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 992 rt->rt6i_flags |= RTF_ANYCAST; 993 #ifdef CONFIG_IPV6_SUBTREES 994 if (rt->rt6i_src.plen && saddr) { 995 rt->rt6i_src.addr = *saddr; 996 rt->rt6i_src.plen = 128; 997 } 998 #endif 999 } 1000 1001 return rt; 1002 } 1003 1004 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) 1005 { 1006 struct rt6_info *pcpu_rt; 1007 1008 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 1009 rt->dst.dev, rt->dst.flags); 1010 1011 if (!pcpu_rt) 1012 return NULL; 1013 ip6_rt_copy_init(pcpu_rt, rt); 1014 pcpu_rt->rt6i_protocol = rt->rt6i_protocol; 1015 pcpu_rt->rt6i_flags |= RTF_PCPU; 1016 return pcpu_rt; 1017 } 1018 1019 /* It should be called with read_lock_bh(&tb6_lock) acquired */ 1020 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 1021 { 1022 struct rt6_info *pcpu_rt, **p; 1023 1024 p = this_cpu_ptr(rt->rt6i_pcpu); 1025 pcpu_rt = *p; 1026 1027 if (pcpu_rt) { 1028 dst_hold(&pcpu_rt->dst); 1029 rt6_dst_from_metrics_check(pcpu_rt); 1030 } 1031 return pcpu_rt; 1032 } 1033 1034 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) 1035 { 1036 struct fib6_table *table = rt->rt6i_table; 1037 struct rt6_info *pcpu_rt, *prev, **p; 1038 1039 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1040 if (!pcpu_rt) { 1041 struct net *net = dev_net(rt->dst.dev); 1042 1043 dst_hold(&net->ipv6.ip6_null_entry->dst); 1044 return net->ipv6.ip6_null_entry; 1045 } 1046 1047 read_lock_bh(&table->tb6_lock); 1048 if (rt->rt6i_pcpu) { 1049 p = this_cpu_ptr(rt->rt6i_pcpu); 1050 prev = cmpxchg(p, NULL, pcpu_rt); 1051 if (prev) { 1052 /* If someone did it before us, return prev instead */ 1053 dst_release_immediate(&pcpu_rt->dst); 1054 pcpu_rt = prev; 1055 } 1056 } else { 1057 /* rt has been removed from the fib6 tree 1058 * before we have a chance to acquire the read_lock. 1059 * In this case, don't brother to create a pcpu rt 1060 * since rt is going away anyway. The next 1061 * dst_check() will trigger a re-lookup. 1062 */ 1063 dst_release_immediate(&pcpu_rt->dst); 1064 pcpu_rt = rt; 1065 } 1066 dst_hold(&pcpu_rt->dst); 1067 rt6_dst_from_metrics_check(pcpu_rt); 1068 read_unlock_bh(&table->tb6_lock); 1069 return pcpu_rt; 1070 } 1071 1072 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, 1073 int oif, struct flowi6 *fl6, int flags) 1074 { 1075 struct fib6_node *fn, *saved_fn; 1076 struct rt6_info *rt; 1077 int strict = 0; 1078 1079 strict |= flags & RT6_LOOKUP_F_IFACE; 1080 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 1081 if (net->ipv6.devconf_all->forwarding == 0) 1082 strict |= RT6_LOOKUP_F_REACHABLE; 1083 1084 read_lock_bh(&table->tb6_lock); 1085 1086 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1087 saved_fn = fn; 1088 1089 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) 1090 oif = 0; 1091 1092 redo_rt6_select: 1093 rt = rt6_select(fn, oif, strict); 1094 if (rt->rt6i_nsiblings) 1095 rt = rt6_multipath_select(rt, fl6, oif, strict); 1096 if (rt == net->ipv6.ip6_null_entry) { 1097 fn = fib6_backtrack(fn, &fl6->saddr); 1098 if (fn) 1099 goto redo_rt6_select; 1100 else if (strict & RT6_LOOKUP_F_REACHABLE) { 1101 /* also consider unreachable route */ 1102 strict &= ~RT6_LOOKUP_F_REACHABLE; 1103 fn = saved_fn; 1104 goto redo_rt6_select; 1105 } 1106 } 1107 1108 1109 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) { 1110 dst_use(&rt->dst, jiffies); 1111 read_unlock_bh(&table->tb6_lock); 1112 1113 rt6_dst_from_metrics_check(rt); 1114 1115 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); 1116 return rt; 1117 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && 1118 !(rt->rt6i_flags & RTF_GATEWAY))) { 1119 /* Create a RTF_CACHE clone which will not be 1120 * owned by the fib6 tree. It is for the special case where 1121 * the daddr in the skb during the neighbor look-up is different 1122 * from the fl6->daddr used to look-up route here. 1123 */ 1124 1125 struct rt6_info *uncached_rt; 1126 1127 dst_use(&rt->dst, jiffies); 1128 read_unlock_bh(&table->tb6_lock); 1129 1130 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL); 1131 dst_release(&rt->dst); 1132 1133 if (uncached_rt) { 1134 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc() 1135 * No need for another dst_hold() 1136 */ 1137 rt6_uncached_list_add(uncached_rt); 1138 } else { 1139 uncached_rt = net->ipv6.ip6_null_entry; 1140 dst_hold(&uncached_rt->dst); 1141 } 1142 1143 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6); 1144 return uncached_rt; 1145 1146 } else { 1147 /* Get a percpu copy */ 1148 1149 struct rt6_info *pcpu_rt; 1150 1151 rt->dst.lastuse = jiffies; 1152 rt->dst.__use++; 1153 pcpu_rt = rt6_get_pcpu_route(rt); 1154 1155 if (pcpu_rt) { 1156 read_unlock_bh(&table->tb6_lock); 1157 } else { 1158 /* We have to do the read_unlock first 1159 * because rt6_make_pcpu_route() may trigger 1160 * ip6_dst_gc() which will take the write_lock. 1161 */ 1162 dst_hold(&rt->dst); 1163 read_unlock_bh(&table->tb6_lock); 1164 pcpu_rt = rt6_make_pcpu_route(rt); 1165 dst_release(&rt->dst); 1166 } 1167 1168 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6); 1169 return pcpu_rt; 1170 1171 } 1172 } 1173 EXPORT_SYMBOL_GPL(ip6_pol_route); 1174 1175 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, 1176 struct flowi6 *fl6, int flags) 1177 { 1178 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); 1179 } 1180 1181 struct dst_entry *ip6_route_input_lookup(struct net *net, 1182 struct net_device *dev, 1183 struct flowi6 *fl6, int flags) 1184 { 1185 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) 1186 flags |= RT6_LOOKUP_F_IFACE; 1187 1188 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); 1189 } 1190 EXPORT_SYMBOL_GPL(ip6_route_input_lookup); 1191 1192 void ip6_route_input(struct sk_buff *skb) 1193 { 1194 const struct ipv6hdr *iph = ipv6_hdr(skb); 1195 struct net *net = dev_net(skb->dev); 1196 int flags = RT6_LOOKUP_F_HAS_SADDR; 1197 struct ip_tunnel_info *tun_info; 1198 struct flowi6 fl6 = { 1199 .flowi6_iif = skb->dev->ifindex, 1200 .daddr = iph->daddr, 1201 .saddr = iph->saddr, 1202 .flowlabel = ip6_flowinfo(iph), 1203 .flowi6_mark = skb->mark, 1204 .flowi6_proto = iph->nexthdr, 1205 }; 1206 1207 tun_info = skb_tunnel_info(skb); 1208 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) 1209 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; 1210 skb_dst_drop(skb); 1211 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); 1212 } 1213 1214 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, 1215 struct flowi6 *fl6, int flags) 1216 { 1217 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); 1218 } 1219 1220 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, 1221 struct flowi6 *fl6, int flags) 1222 { 1223 bool any_src; 1224 1225 if (rt6_need_strict(&fl6->daddr)) { 1226 struct dst_entry *dst; 1227 1228 dst = l3mdev_link_scope_lookup(net, fl6); 1229 if (dst) 1230 return dst; 1231 } 1232 1233 fl6->flowi6_iif = LOOPBACK_IFINDEX; 1234 1235 any_src = ipv6_addr_any(&fl6->saddr); 1236 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || 1237 (fl6->flowi6_oif && any_src)) 1238 flags |= RT6_LOOKUP_F_IFACE; 1239 1240 if (!any_src) 1241 flags |= RT6_LOOKUP_F_HAS_SADDR; 1242 else if (sk) 1243 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); 1244 1245 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); 1246 } 1247 EXPORT_SYMBOL_GPL(ip6_route_output_flags); 1248 1249 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 1250 { 1251 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; 1252 struct net_device *loopback_dev = net->loopback_dev; 1253 struct dst_entry *new = NULL; 1254 1255 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, 1256 DST_OBSOLETE_NONE, 0); 1257 if (rt) { 1258 rt6_info_init(rt); 1259 1260 new = &rt->dst; 1261 new->__use = 1; 1262 new->input = dst_discard; 1263 new->output = dst_discard_out; 1264 1265 dst_copy_metrics(new, &ort->dst); 1266 1267 rt->rt6i_idev = in6_dev_get(loopback_dev); 1268 rt->rt6i_gateway = ort->rt6i_gateway; 1269 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; 1270 rt->rt6i_metric = 0; 1271 1272 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); 1273 #ifdef CONFIG_IPV6_SUBTREES 1274 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 1275 #endif 1276 } 1277 1278 dst_release(dst_orig); 1279 return new ? new : ERR_PTR(-ENOMEM); 1280 } 1281 1282 /* 1283 * Destination cache support functions 1284 */ 1285 1286 static void rt6_dst_from_metrics_check(struct rt6_info *rt) 1287 { 1288 if (rt->dst.from && 1289 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from)) 1290 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true); 1291 } 1292 1293 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) 1294 { 1295 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) 1296 return NULL; 1297 1298 if (rt6_check_expired(rt)) 1299 return NULL; 1300 1301 return &rt->dst; 1302 } 1303 1304 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) 1305 { 1306 if (!__rt6_check_expired(rt) && 1307 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1308 rt6_check((struct rt6_info *)(rt->dst.from), cookie)) 1309 return &rt->dst; 1310 else 1311 return NULL; 1312 } 1313 1314 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) 1315 { 1316 struct rt6_info *rt; 1317 1318 rt = (struct rt6_info *) dst; 1319 1320 /* All IPV6 dsts are created with ->obsolete set to the value 1321 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1322 * into this function always. 1323 */ 1324 1325 rt6_dst_from_metrics_check(rt); 1326 1327 if (rt->rt6i_flags & RTF_PCPU || 1328 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) 1329 return rt6_dst_from_check(rt, cookie); 1330 else 1331 return rt6_check(rt, cookie); 1332 } 1333 1334 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 1335 { 1336 struct rt6_info *rt = (struct rt6_info *) dst; 1337 1338 if (rt) { 1339 if (rt->rt6i_flags & RTF_CACHE) { 1340 if (rt6_check_expired(rt)) { 1341 ip6_del_rt(rt); 1342 dst = NULL; 1343 } 1344 } else { 1345 dst_release(dst); 1346 dst = NULL; 1347 } 1348 } 1349 return dst; 1350 } 1351 1352 static void ip6_link_failure(struct sk_buff *skb) 1353 { 1354 struct rt6_info *rt; 1355 1356 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 1357 1358 rt = (struct rt6_info *) skb_dst(skb); 1359 if (rt) { 1360 if (rt->rt6i_flags & RTF_CACHE) { 1361 if (dst_hold_safe(&rt->dst)) 1362 ip6_del_rt(rt); 1363 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { 1364 rt->rt6i_node->fn_sernum = -1; 1365 } 1366 } 1367 } 1368 1369 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) 1370 { 1371 struct net *net = dev_net(rt->dst.dev); 1372 1373 rt->rt6i_flags |= RTF_MODIFIED; 1374 rt->rt6i_pmtu = mtu; 1375 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1376 } 1377 1378 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) 1379 { 1380 return !(rt->rt6i_flags & RTF_CACHE) && 1381 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); 1382 } 1383 1384 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1385 const struct ipv6hdr *iph, u32 mtu) 1386 { 1387 const struct in6_addr *daddr, *saddr; 1388 struct rt6_info *rt6 = (struct rt6_info *)dst; 1389 1390 if (rt6->rt6i_flags & RTF_LOCAL) 1391 return; 1392 1393 if (dst_metric_locked(dst, RTAX_MTU)) 1394 return; 1395 1396 if (iph) { 1397 daddr = &iph->daddr; 1398 saddr = &iph->saddr; 1399 } else if (sk) { 1400 daddr = &sk->sk_v6_daddr; 1401 saddr = &inet6_sk(sk)->saddr; 1402 } else { 1403 daddr = NULL; 1404 saddr = NULL; 1405 } 1406 dst_confirm_neigh(dst, daddr); 1407 mtu = max_t(u32, mtu, IPV6_MIN_MTU); 1408 if (mtu >= dst_mtu(dst)) 1409 return; 1410 1411 if (!rt6_cache_allowed_for_pmtu(rt6)) { 1412 rt6_do_update_pmtu(rt6, mtu); 1413 } else if (daddr) { 1414 struct rt6_info *nrt6; 1415 1416 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); 1417 if (nrt6) { 1418 rt6_do_update_pmtu(nrt6, mtu); 1419 1420 /* ip6_ins_rt(nrt6) will bump the 1421 * rt6->rt6i_node->fn_sernum 1422 * which will fail the next rt6_check() and 1423 * invalidate the sk->sk_dst_cache. 1424 */ 1425 ip6_ins_rt(nrt6); 1426 /* Release the reference taken in 1427 * ip6_rt_cache_alloc() 1428 */ 1429 dst_release(&nrt6->dst); 1430 } 1431 } 1432 } 1433 1434 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1435 struct sk_buff *skb, u32 mtu) 1436 { 1437 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); 1438 } 1439 1440 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, 1441 int oif, u32 mark, kuid_t uid) 1442 { 1443 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 1444 struct dst_entry *dst; 1445 struct flowi6 fl6; 1446 1447 memset(&fl6, 0, sizeof(fl6)); 1448 fl6.flowi6_oif = oif; 1449 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); 1450 fl6.daddr = iph->daddr; 1451 fl6.saddr = iph->saddr; 1452 fl6.flowlabel = ip6_flowinfo(iph); 1453 fl6.flowi6_uid = uid; 1454 1455 dst = ip6_route_output(net, NULL, &fl6); 1456 if (!dst->error) 1457 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); 1458 dst_release(dst); 1459 } 1460 EXPORT_SYMBOL_GPL(ip6_update_pmtu); 1461 1462 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1463 { 1464 struct dst_entry *dst; 1465 1466 ip6_update_pmtu(skb, sock_net(sk), mtu, 1467 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); 1468 1469 dst = __sk_dst_get(sk); 1470 if (!dst || !dst->obsolete || 1471 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) 1472 return; 1473 1474 bh_lock_sock(sk); 1475 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 1476 ip6_datagram_dst_update(sk, false); 1477 bh_unlock_sock(sk); 1478 } 1479 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1480 1481 /* Handle redirects */ 1482 struct ip6rd_flowi { 1483 struct flowi6 fl6; 1484 struct in6_addr gateway; 1485 }; 1486 1487 static struct rt6_info *__ip6_route_redirect(struct net *net, 1488 struct fib6_table *table, 1489 struct flowi6 *fl6, 1490 int flags) 1491 { 1492 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; 1493 struct rt6_info *rt; 1494 struct fib6_node *fn; 1495 1496 /* Get the "current" route for this destination and 1497 * check if the redirect has come from appropriate router. 1498 * 1499 * RFC 4861 specifies that redirects should only be 1500 * accepted if they come from the nexthop to the target. 1501 * Due to the way the routes are chosen, this notion 1502 * is a bit fuzzy and one might need to check all possible 1503 * routes. 1504 */ 1505 1506 read_lock_bh(&table->tb6_lock); 1507 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); 1508 restart: 1509 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { 1510 if (rt6_check_expired(rt)) 1511 continue; 1512 if (rt->dst.error) 1513 break; 1514 if (!(rt->rt6i_flags & RTF_GATEWAY)) 1515 continue; 1516 if (fl6->flowi6_oif != rt->dst.dev->ifindex) 1517 continue; 1518 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) 1519 continue; 1520 break; 1521 } 1522 1523 if (!rt) 1524 rt = net->ipv6.ip6_null_entry; 1525 else if (rt->dst.error) { 1526 rt = net->ipv6.ip6_null_entry; 1527 goto out; 1528 } 1529 1530 if (rt == net->ipv6.ip6_null_entry) { 1531 fn = fib6_backtrack(fn, &fl6->saddr); 1532 if (fn) 1533 goto restart; 1534 } 1535 1536 out: 1537 dst_hold(&rt->dst); 1538 1539 read_unlock_bh(&table->tb6_lock); 1540 1541 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); 1542 return rt; 1543 }; 1544 1545 static struct dst_entry *ip6_route_redirect(struct net *net, 1546 const struct flowi6 *fl6, 1547 const struct in6_addr *gateway) 1548 { 1549 int flags = RT6_LOOKUP_F_HAS_SADDR; 1550 struct ip6rd_flowi rdfl; 1551 1552 rdfl.fl6 = *fl6; 1553 rdfl.gateway = *gateway; 1554 1555 return fib6_rule_lookup(net, &rdfl.fl6, 1556 flags, __ip6_route_redirect); 1557 } 1558 1559 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, 1560 kuid_t uid) 1561 { 1562 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; 1563 struct dst_entry *dst; 1564 struct flowi6 fl6; 1565 1566 memset(&fl6, 0, sizeof(fl6)); 1567 fl6.flowi6_iif = LOOPBACK_IFINDEX; 1568 fl6.flowi6_oif = oif; 1569 fl6.flowi6_mark = mark; 1570 fl6.daddr = iph->daddr; 1571 fl6.saddr = iph->saddr; 1572 fl6.flowlabel = ip6_flowinfo(iph); 1573 fl6.flowi6_uid = uid; 1574 1575 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); 1576 rt6_do_redirect(dst, NULL, skb); 1577 dst_release(dst); 1578 } 1579 EXPORT_SYMBOL_GPL(ip6_redirect); 1580 1581 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, 1582 u32 mark) 1583 { 1584 const struct ipv6hdr *iph = ipv6_hdr(skb); 1585 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); 1586 struct dst_entry *dst; 1587 struct flowi6 fl6; 1588 1589 memset(&fl6, 0, sizeof(fl6)); 1590 fl6.flowi6_iif = LOOPBACK_IFINDEX; 1591 fl6.flowi6_oif = oif; 1592 fl6.flowi6_mark = mark; 1593 fl6.daddr = msg->dest; 1594 fl6.saddr = iph->daddr; 1595 fl6.flowi6_uid = sock_net_uid(net, NULL); 1596 1597 dst = ip6_route_redirect(net, &fl6, &iph->saddr); 1598 rt6_do_redirect(dst, NULL, skb); 1599 dst_release(dst); 1600 } 1601 1602 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1603 { 1604 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, 1605 sk->sk_uid); 1606 } 1607 EXPORT_SYMBOL_GPL(ip6_sk_redirect); 1608 1609 static unsigned int ip6_default_advmss(const struct dst_entry *dst) 1610 { 1611 struct net_device *dev = dst->dev; 1612 unsigned int mtu = dst_mtu(dst); 1613 struct net *net = dev_net(dev); 1614 1615 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 1616 1617 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) 1618 mtu = net->ipv6.sysctl.ip6_rt_min_advmss; 1619 1620 /* 1621 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 1622 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. 1623 * IPV6_MAXPLEN is also valid and means: "any MSS, 1624 * rely only on pmtu discovery" 1625 */ 1626 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) 1627 mtu = IPV6_MAXPLEN; 1628 return mtu; 1629 } 1630 1631 static unsigned int ip6_mtu(const struct dst_entry *dst) 1632 { 1633 const struct rt6_info *rt = (const struct rt6_info *)dst; 1634 unsigned int mtu = rt->rt6i_pmtu; 1635 struct inet6_dev *idev; 1636 1637 if (mtu) 1638 goto out; 1639 1640 mtu = dst_metric_raw(dst, RTAX_MTU); 1641 if (mtu) 1642 goto out; 1643 1644 mtu = IPV6_MIN_MTU; 1645 1646 rcu_read_lock(); 1647 idev = __in6_dev_get(dst->dev); 1648 if (idev) 1649 mtu = idev->cnf.mtu6; 1650 rcu_read_unlock(); 1651 1652 out: 1653 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); 1654 1655 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1656 } 1657 1658 struct dst_entry *icmp6_dst_alloc(struct net_device *dev, 1659 struct flowi6 *fl6) 1660 { 1661 struct dst_entry *dst; 1662 struct rt6_info *rt; 1663 struct inet6_dev *idev = in6_dev_get(dev); 1664 struct net *net = dev_net(dev); 1665 1666 if (unlikely(!idev)) 1667 return ERR_PTR(-ENODEV); 1668 1669 rt = ip6_dst_alloc(net, dev, 0); 1670 if (unlikely(!rt)) { 1671 in6_dev_put(idev); 1672 dst = ERR_PTR(-ENOMEM); 1673 goto out; 1674 } 1675 1676 rt->dst.flags |= DST_HOST; 1677 rt->dst.output = ip6_output; 1678 rt->rt6i_gateway = fl6->daddr; 1679 rt->rt6i_dst.addr = fl6->daddr; 1680 rt->rt6i_dst.plen = 128; 1681 rt->rt6i_idev = idev; 1682 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); 1683 1684 /* Add this dst into uncached_list so that rt6_ifdown() can 1685 * do proper release of the net_device 1686 */ 1687 rt6_uncached_list_add(rt); 1688 1689 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); 1690 1691 out: 1692 return dst; 1693 } 1694 1695 static int ip6_dst_gc(struct dst_ops *ops) 1696 { 1697 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); 1698 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; 1699 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; 1700 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; 1701 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; 1702 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; 1703 int entries; 1704 1705 entries = dst_entries_get_fast(ops); 1706 if (time_after(rt_last_gc + rt_min_interval, jiffies) && 1707 entries <= rt_max_size) 1708 goto out; 1709 1710 net->ipv6.ip6_rt_gc_expire++; 1711 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true); 1712 entries = dst_entries_get_slow(ops); 1713 if (entries < ops->gc_thresh) 1714 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1715 out: 1716 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; 1717 return entries > rt_max_size; 1718 } 1719 1720 static int ip6_convert_metrics(struct mx6_config *mxc, 1721 const struct fib6_config *cfg) 1722 { 1723 bool ecn_ca = false; 1724 struct nlattr *nla; 1725 int remaining; 1726 u32 *mp; 1727 1728 if (!cfg->fc_mx) 1729 return 0; 1730 1731 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1732 if (unlikely(!mp)) 1733 return -ENOMEM; 1734 1735 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { 1736 int type = nla_type(nla); 1737 u32 val; 1738 1739 if (!type) 1740 continue; 1741 if (unlikely(type > RTAX_MAX)) 1742 goto err; 1743 1744 if (type == RTAX_CC_ALGO) { 1745 char tmp[TCP_CA_NAME_MAX]; 1746 1747 nla_strlcpy(tmp, nla, sizeof(tmp)); 1748 val = tcp_ca_get_key_by_name(tmp, &ecn_ca); 1749 if (val == TCP_CA_UNSPEC) 1750 goto err; 1751 } else { 1752 val = nla_get_u32(nla); 1753 } 1754 if (type == RTAX_HOPLIMIT && val > 255) 1755 val = 255; 1756 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 1757 goto err; 1758 1759 mp[type - 1] = val; 1760 __set_bit(type - 1, mxc->mx_valid); 1761 } 1762 1763 if (ecn_ca) { 1764 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid); 1765 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 1766 } 1767 1768 mxc->mx = mp; 1769 return 0; 1770 err: 1771 kfree(mp); 1772 return -EINVAL; 1773 } 1774 1775 static struct rt6_info *ip6_nh_lookup_table(struct net *net, 1776 struct fib6_config *cfg, 1777 const struct in6_addr *gw_addr) 1778 { 1779 struct flowi6 fl6 = { 1780 .flowi6_oif = cfg->fc_ifindex, 1781 .daddr = *gw_addr, 1782 .saddr = cfg->fc_prefsrc, 1783 }; 1784 struct fib6_table *table; 1785 struct rt6_info *rt; 1786 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE; 1787 1788 table = fib6_get_table(net, cfg->fc_table); 1789 if (!table) 1790 return NULL; 1791 1792 if (!ipv6_addr_any(&cfg->fc_prefsrc)) 1793 flags |= RT6_LOOKUP_F_HAS_SADDR; 1794 1795 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); 1796 1797 /* if table lookup failed, fall back to full lookup */ 1798 if (rt == net->ipv6.ip6_null_entry) { 1799 ip6_rt_put(rt); 1800 rt = NULL; 1801 } 1802 1803 return rt; 1804 } 1805 1806 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, 1807 struct netlink_ext_ack *extack) 1808 { 1809 struct net *net = cfg->fc_nlinfo.nl_net; 1810 struct rt6_info *rt = NULL; 1811 struct net_device *dev = NULL; 1812 struct inet6_dev *idev = NULL; 1813 struct fib6_table *table; 1814 int addr_type; 1815 int err = -EINVAL; 1816 1817 /* RTF_PCPU is an internal flag; can not be set by userspace */ 1818 if (cfg->fc_flags & RTF_PCPU) { 1819 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); 1820 goto out; 1821 } 1822 1823 if (cfg->fc_dst_len > 128) { 1824 NL_SET_ERR_MSG(extack, "Invalid prefix length"); 1825 goto out; 1826 } 1827 if (cfg->fc_src_len > 128) { 1828 NL_SET_ERR_MSG(extack, "Invalid source address length"); 1829 goto out; 1830 } 1831 #ifndef CONFIG_IPV6_SUBTREES 1832 if (cfg->fc_src_len) { 1833 NL_SET_ERR_MSG(extack, 1834 "Specifying source address requires IPV6_SUBTREES to be enabled"); 1835 goto out; 1836 } 1837 #endif 1838 if (cfg->fc_ifindex) { 1839 err = -ENODEV; 1840 dev = dev_get_by_index(net, cfg->fc_ifindex); 1841 if (!dev) 1842 goto out; 1843 idev = in6_dev_get(dev); 1844 if (!idev) 1845 goto out; 1846 } 1847 1848 if (cfg->fc_metric == 0) 1849 cfg->fc_metric = IP6_RT_PRIO_USER; 1850 1851 err = -ENOBUFS; 1852 if (cfg->fc_nlinfo.nlh && 1853 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { 1854 table = fib6_get_table(net, cfg->fc_table); 1855 if (!table) { 1856 pr_warn("NLM_F_CREATE should be specified when creating new route\n"); 1857 table = fib6_new_table(net, cfg->fc_table); 1858 } 1859 } else { 1860 table = fib6_new_table(net, cfg->fc_table); 1861 } 1862 1863 if (!table) 1864 goto out; 1865 1866 rt = ip6_dst_alloc(net, NULL, 1867 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); 1868 1869 if (!rt) { 1870 err = -ENOMEM; 1871 goto out; 1872 } 1873 1874 if (cfg->fc_flags & RTF_EXPIRES) 1875 rt6_set_expires(rt, jiffies + 1876 clock_t_to_jiffies(cfg->fc_expires)); 1877 else 1878 rt6_clean_expires(rt); 1879 1880 if (cfg->fc_protocol == RTPROT_UNSPEC) 1881 cfg->fc_protocol = RTPROT_BOOT; 1882 rt->rt6i_protocol = cfg->fc_protocol; 1883 1884 addr_type = ipv6_addr_type(&cfg->fc_dst); 1885 1886 if (addr_type & IPV6_ADDR_MULTICAST) 1887 rt->dst.input = ip6_mc_input; 1888 else if (cfg->fc_flags & RTF_LOCAL) 1889 rt->dst.input = ip6_input; 1890 else 1891 rt->dst.input = ip6_forward; 1892 1893 rt->dst.output = ip6_output; 1894 1895 if (cfg->fc_encap) { 1896 struct lwtunnel_state *lwtstate; 1897 1898 err = lwtunnel_build_state(cfg->fc_encap_type, 1899 cfg->fc_encap, AF_INET6, cfg, 1900 &lwtstate, extack); 1901 if (err) 1902 goto out; 1903 rt->dst.lwtstate = lwtstate_get(lwtstate); 1904 if (lwtunnel_output_redirect(rt->dst.lwtstate)) { 1905 rt->dst.lwtstate->orig_output = rt->dst.output; 1906 rt->dst.output = lwtunnel_output; 1907 } 1908 if (lwtunnel_input_redirect(rt->dst.lwtstate)) { 1909 rt->dst.lwtstate->orig_input = rt->dst.input; 1910 rt->dst.input = lwtunnel_input; 1911 } 1912 } 1913 1914 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 1915 rt->rt6i_dst.plen = cfg->fc_dst_len; 1916 if (rt->rt6i_dst.plen == 128) 1917 rt->dst.flags |= DST_HOST; 1918 1919 #ifdef CONFIG_IPV6_SUBTREES 1920 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1921 rt->rt6i_src.plen = cfg->fc_src_len; 1922 #endif 1923 1924 rt->rt6i_metric = cfg->fc_metric; 1925 1926 /* We cannot add true routes via loopback here, 1927 they would result in kernel looping; promote them to reject routes 1928 */ 1929 if ((cfg->fc_flags & RTF_REJECT) || 1930 (dev && (dev->flags & IFF_LOOPBACK) && 1931 !(addr_type & IPV6_ADDR_LOOPBACK) && 1932 !(cfg->fc_flags & RTF_LOCAL))) { 1933 /* hold loopback dev/idev if we haven't done so. */ 1934 if (dev != net->loopback_dev) { 1935 if (dev) { 1936 dev_put(dev); 1937 in6_dev_put(idev); 1938 } 1939 dev = net->loopback_dev; 1940 dev_hold(dev); 1941 idev = in6_dev_get(dev); 1942 if (!idev) { 1943 err = -ENODEV; 1944 goto out; 1945 } 1946 } 1947 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 1948 switch (cfg->fc_type) { 1949 case RTN_BLACKHOLE: 1950 rt->dst.error = -EINVAL; 1951 rt->dst.output = dst_discard_out; 1952 rt->dst.input = dst_discard; 1953 break; 1954 case RTN_PROHIBIT: 1955 rt->dst.error = -EACCES; 1956 rt->dst.output = ip6_pkt_prohibit_out; 1957 rt->dst.input = ip6_pkt_prohibit; 1958 break; 1959 case RTN_THROW: 1960 case RTN_UNREACHABLE: 1961 default: 1962 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN 1963 : (cfg->fc_type == RTN_UNREACHABLE) 1964 ? -EHOSTUNREACH : -ENETUNREACH; 1965 rt->dst.output = ip6_pkt_discard_out; 1966 rt->dst.input = ip6_pkt_discard; 1967 break; 1968 } 1969 goto install_route; 1970 } 1971 1972 if (cfg->fc_flags & RTF_GATEWAY) { 1973 const struct in6_addr *gw_addr; 1974 int gwa_type; 1975 1976 gw_addr = &cfg->fc_gateway; 1977 gwa_type = ipv6_addr_type(gw_addr); 1978 1979 /* if gw_addr is local we will fail to detect this in case 1980 * address is still TENTATIVE (DAD in progress). rt6_lookup() 1981 * will return already-added prefix route via interface that 1982 * prefix route was assigned to, which might be non-loopback. 1983 */ 1984 err = -EINVAL; 1985 if (ipv6_chk_addr_and_flags(net, gw_addr, 1986 gwa_type & IPV6_ADDR_LINKLOCAL ? 1987 dev : NULL, 0, 0)) { 1988 NL_SET_ERR_MSG(extack, "Invalid gateway address"); 1989 goto out; 1990 } 1991 rt->rt6i_gateway = *gw_addr; 1992 1993 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { 1994 struct rt6_info *grt = NULL; 1995 1996 /* IPv6 strictly inhibits using not link-local 1997 addresses as nexthop address. 1998 Otherwise, router will not able to send redirects. 1999 It is very good, but in some (rare!) circumstances 2000 (SIT, PtP, NBMA NOARP links) it is handy to allow 2001 some exceptions. --ANK 2002 We allow IPv4-mapped nexthops to support RFC4798-type 2003 addressing 2004 */ 2005 if (!(gwa_type & (IPV6_ADDR_UNICAST | 2006 IPV6_ADDR_MAPPED))) { 2007 NL_SET_ERR_MSG(extack, 2008 "Invalid gateway address"); 2009 goto out; 2010 } 2011 2012 if (cfg->fc_table) { 2013 grt = ip6_nh_lookup_table(net, cfg, gw_addr); 2014 2015 if (grt) { 2016 if (grt->rt6i_flags & RTF_GATEWAY || 2017 (dev && dev != grt->dst.dev)) { 2018 ip6_rt_put(grt); 2019 grt = NULL; 2020 } 2021 } 2022 } 2023 2024 if (!grt) 2025 grt = rt6_lookup(net, gw_addr, NULL, 2026 cfg->fc_ifindex, 1); 2027 2028 err = -EHOSTUNREACH; 2029 if (!grt) 2030 goto out; 2031 if (dev) { 2032 if (dev != grt->dst.dev) { 2033 ip6_rt_put(grt); 2034 goto out; 2035 } 2036 } else { 2037 dev = grt->dst.dev; 2038 idev = grt->rt6i_idev; 2039 dev_hold(dev); 2040 in6_dev_hold(grt->rt6i_idev); 2041 } 2042 if (!(grt->rt6i_flags & RTF_GATEWAY)) 2043 err = 0; 2044 ip6_rt_put(grt); 2045 2046 if (err) 2047 goto out; 2048 } 2049 err = -EINVAL; 2050 if (!dev) { 2051 NL_SET_ERR_MSG(extack, "Egress device not specified"); 2052 goto out; 2053 } else if (dev->flags & IFF_LOOPBACK) { 2054 NL_SET_ERR_MSG(extack, 2055 "Egress device can not be loopback device for this route"); 2056 goto out; 2057 } 2058 } 2059 2060 err = -ENODEV; 2061 if (!dev) 2062 goto out; 2063 2064 if (!ipv6_addr_any(&cfg->fc_prefsrc)) { 2065 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { 2066 NL_SET_ERR_MSG(extack, "Invalid source address"); 2067 err = -EINVAL; 2068 goto out; 2069 } 2070 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; 2071 rt->rt6i_prefsrc.plen = 128; 2072 } else 2073 rt->rt6i_prefsrc.plen = 0; 2074 2075 rt->rt6i_flags = cfg->fc_flags; 2076 2077 install_route: 2078 rt->dst.dev = dev; 2079 rt->rt6i_idev = idev; 2080 rt->rt6i_table = table; 2081 2082 cfg->fc_nlinfo.nl_net = dev_net(dev); 2083 2084 return rt; 2085 out: 2086 if (dev) 2087 dev_put(dev); 2088 if (idev) 2089 in6_dev_put(idev); 2090 if (rt) 2091 dst_release_immediate(&rt->dst); 2092 2093 return ERR_PTR(err); 2094 } 2095 2096 int ip6_route_add(struct fib6_config *cfg, 2097 struct netlink_ext_ack *extack) 2098 { 2099 struct mx6_config mxc = { .mx = NULL, }; 2100 struct rt6_info *rt; 2101 int err; 2102 2103 rt = ip6_route_info_create(cfg, extack); 2104 if (IS_ERR(rt)) { 2105 err = PTR_ERR(rt); 2106 rt = NULL; 2107 goto out; 2108 } 2109 2110 err = ip6_convert_metrics(&mxc, cfg); 2111 if (err) 2112 goto out; 2113 2114 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack); 2115 2116 kfree(mxc.mx); 2117 2118 return err; 2119 out: 2120 if (rt) 2121 dst_release_immediate(&rt->dst); 2122 2123 return err; 2124 } 2125 2126 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) 2127 { 2128 int err; 2129 struct fib6_table *table; 2130 struct net *net = dev_net(rt->dst.dev); 2131 2132 if (rt == net->ipv6.ip6_null_entry) { 2133 err = -ENOENT; 2134 goto out; 2135 } 2136 2137 table = rt->rt6i_table; 2138 write_lock_bh(&table->tb6_lock); 2139 err = fib6_del(rt, info); 2140 write_unlock_bh(&table->tb6_lock); 2141 2142 out: 2143 ip6_rt_put(rt); 2144 return err; 2145 } 2146 2147 int ip6_del_rt(struct rt6_info *rt) 2148 { 2149 struct nl_info info = { 2150 .nl_net = dev_net(rt->dst.dev), 2151 }; 2152 return __ip6_del_rt(rt, &info); 2153 } 2154 2155 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) 2156 { 2157 struct nl_info *info = &cfg->fc_nlinfo; 2158 struct net *net = info->nl_net; 2159 struct sk_buff *skb = NULL; 2160 struct fib6_table *table; 2161 int err = -ENOENT; 2162 2163 if (rt == net->ipv6.ip6_null_entry) 2164 goto out_put; 2165 table = rt->rt6i_table; 2166 write_lock_bh(&table->tb6_lock); 2167 2168 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { 2169 struct rt6_info *sibling, *next_sibling; 2170 2171 /* prefer to send a single notification with all hops */ 2172 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 2173 if (skb) { 2174 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 2175 2176 if (rt6_fill_node(net, skb, rt, 2177 NULL, NULL, 0, RTM_DELROUTE, 2178 info->portid, seq, 0) < 0) { 2179 kfree_skb(skb); 2180 skb = NULL; 2181 } else 2182 info->skip_notify = 1; 2183 } 2184 2185 list_for_each_entry_safe(sibling, next_sibling, 2186 &rt->rt6i_siblings, 2187 rt6i_siblings) { 2188 err = fib6_del(sibling, info); 2189 if (err) 2190 goto out_unlock; 2191 } 2192 } 2193 2194 err = fib6_del(rt, info); 2195 out_unlock: 2196 write_unlock_bh(&table->tb6_lock); 2197 out_put: 2198 ip6_rt_put(rt); 2199 2200 if (skb) { 2201 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 2202 info->nlh, gfp_any()); 2203 } 2204 return err; 2205 } 2206 2207 static int ip6_route_del(struct fib6_config *cfg, 2208 struct netlink_ext_ack *extack) 2209 { 2210 struct fib6_table *table; 2211 struct fib6_node *fn; 2212 struct rt6_info *rt; 2213 int err = -ESRCH; 2214 2215 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); 2216 if (!table) { 2217 NL_SET_ERR_MSG(extack, "FIB table does not exist"); 2218 return err; 2219 } 2220 2221 read_lock_bh(&table->tb6_lock); 2222 2223 fn = fib6_locate(&table->tb6_root, 2224 &cfg->fc_dst, cfg->fc_dst_len, 2225 &cfg->fc_src, cfg->fc_src_len); 2226 2227 if (fn) { 2228 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { 2229 if ((rt->rt6i_flags & RTF_CACHE) && 2230 !(cfg->fc_flags & RTF_CACHE)) 2231 continue; 2232 if (cfg->fc_ifindex && 2233 (!rt->dst.dev || 2234 rt->dst.dev->ifindex != cfg->fc_ifindex)) 2235 continue; 2236 if (cfg->fc_flags & RTF_GATEWAY && 2237 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) 2238 continue; 2239 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) 2240 continue; 2241 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) 2242 continue; 2243 dst_hold(&rt->dst); 2244 read_unlock_bh(&table->tb6_lock); 2245 2246 /* if gateway was specified only delete the one hop */ 2247 if (cfg->fc_flags & RTF_GATEWAY) 2248 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 2249 2250 return __ip6_del_rt_siblings(rt, cfg); 2251 } 2252 } 2253 read_unlock_bh(&table->tb6_lock); 2254 2255 return err; 2256 } 2257 2258 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) 2259 { 2260 struct netevent_redirect netevent; 2261 struct rt6_info *rt, *nrt = NULL; 2262 struct ndisc_options ndopts; 2263 struct inet6_dev *in6_dev; 2264 struct neighbour *neigh; 2265 struct rd_msg *msg; 2266 int optlen, on_link; 2267 u8 *lladdr; 2268 2269 optlen = skb_tail_pointer(skb) - skb_transport_header(skb); 2270 optlen -= sizeof(*msg); 2271 2272 if (optlen < 0) { 2273 net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); 2274 return; 2275 } 2276 2277 msg = (struct rd_msg *)icmp6_hdr(skb); 2278 2279 if (ipv6_addr_is_multicast(&msg->dest)) { 2280 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); 2281 return; 2282 } 2283 2284 on_link = 0; 2285 if (ipv6_addr_equal(&msg->dest, &msg->target)) { 2286 on_link = 1; 2287 } else if (ipv6_addr_type(&msg->target) != 2288 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { 2289 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); 2290 return; 2291 } 2292 2293 in6_dev = __in6_dev_get(skb->dev); 2294 if (!in6_dev) 2295 return; 2296 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) 2297 return; 2298 2299 /* RFC2461 8.1: 2300 * The IP source address of the Redirect MUST be the same as the current 2301 * first-hop router for the specified ICMP Destination Address. 2302 */ 2303 2304 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { 2305 net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); 2306 return; 2307 } 2308 2309 lladdr = NULL; 2310 if (ndopts.nd_opts_tgt_lladdr) { 2311 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, 2312 skb->dev); 2313 if (!lladdr) { 2314 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); 2315 return; 2316 } 2317 } 2318 2319 rt = (struct rt6_info *) dst; 2320 if (rt->rt6i_flags & RTF_REJECT) { 2321 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); 2322 return; 2323 } 2324 2325 /* Redirect received -> path was valid. 2326 * Look, redirects are sent only in response to data packets, 2327 * so that this nexthop apparently is reachable. --ANK 2328 */ 2329 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); 2330 2331 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); 2332 if (!neigh) 2333 return; 2334 2335 /* 2336 * We have finally decided to accept it. 2337 */ 2338 2339 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, 2340 NEIGH_UPDATE_F_WEAK_OVERRIDE| 2341 NEIGH_UPDATE_F_OVERRIDE| 2342 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| 2343 NEIGH_UPDATE_F_ISROUTER)), 2344 NDISC_REDIRECT, &ndopts); 2345 2346 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); 2347 if (!nrt) 2348 goto out; 2349 2350 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; 2351 if (on_link) 2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2353 2354 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2355 2356 if (ip6_ins_rt(nrt)) 2357 goto out_release; 2358 2359 netevent.old = &rt->dst; 2360 netevent.new = &nrt->dst; 2361 netevent.daddr = &msg->dest; 2362 netevent.neigh = neigh; 2363 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 2364 2365 if (rt->rt6i_flags & RTF_CACHE) { 2366 rt = (struct rt6_info *) dst_clone(&rt->dst); 2367 ip6_del_rt(rt); 2368 } 2369 2370 out_release: 2371 /* Release the reference taken in 2372 * ip6_rt_cache_alloc() 2373 */ 2374 dst_release(&nrt->dst); 2375 2376 out: 2377 neigh_release(neigh); 2378 } 2379 2380 /* 2381 * Misc support functions 2382 */ 2383 2384 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) 2385 { 2386 BUG_ON(from->dst.from); 2387 2388 rt->rt6i_flags &= ~RTF_EXPIRES; 2389 dst_hold(&from->dst); 2390 rt->dst.from = &from->dst; 2391 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true); 2392 } 2393 2394 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) 2395 { 2396 rt->dst.input = ort->dst.input; 2397 rt->dst.output = ort->dst.output; 2398 rt->rt6i_dst = ort->rt6i_dst; 2399 rt->dst.error = ort->dst.error; 2400 rt->rt6i_idev = ort->rt6i_idev; 2401 if (rt->rt6i_idev) 2402 in6_dev_hold(rt->rt6i_idev); 2403 rt->dst.lastuse = jiffies; 2404 rt->rt6i_gateway = ort->rt6i_gateway; 2405 rt->rt6i_flags = ort->rt6i_flags; 2406 rt6_set_from(rt, ort); 2407 rt->rt6i_metric = ort->rt6i_metric; 2408 #ifdef CONFIG_IPV6_SUBTREES 2409 rt->rt6i_src = ort->rt6i_src; 2410 #endif 2411 rt->rt6i_prefsrc = ort->rt6i_prefsrc; 2412 rt->rt6i_table = ort->rt6i_table; 2413 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); 2414 } 2415 2416 #ifdef CONFIG_IPV6_ROUTE_INFO 2417 static struct rt6_info *rt6_get_route_info(struct net *net, 2418 const struct in6_addr *prefix, int prefixlen, 2419 const struct in6_addr *gwaddr, 2420 struct net_device *dev) 2421 { 2422 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; 2423 int ifindex = dev->ifindex; 2424 struct fib6_node *fn; 2425 struct rt6_info *rt = NULL; 2426 struct fib6_table *table; 2427 2428 table = fib6_get_table(net, tb_id); 2429 if (!table) 2430 return NULL; 2431 2432 read_lock_bh(&table->tb6_lock); 2433 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0); 2434 if (!fn) 2435 goto out; 2436 2437 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { 2438 if (rt->dst.dev->ifindex != ifindex) 2439 continue; 2440 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) 2441 continue; 2442 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) 2443 continue; 2444 dst_hold(&rt->dst); 2445 break; 2446 } 2447 out: 2448 read_unlock_bh(&table->tb6_lock); 2449 return rt; 2450 } 2451 2452 static struct rt6_info *rt6_add_route_info(struct net *net, 2453 const struct in6_addr *prefix, int prefixlen, 2454 const struct in6_addr *gwaddr, 2455 struct net_device *dev, 2456 unsigned int pref) 2457 { 2458 struct fib6_config cfg = { 2459 .fc_metric = IP6_RT_PRIO_USER, 2460 .fc_ifindex = dev->ifindex, 2461 .fc_dst_len = prefixlen, 2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2463 RTF_UP | RTF_PREF(pref), 2464 .fc_nlinfo.portid = 0, 2465 .fc_nlinfo.nlh = NULL, 2466 .fc_nlinfo.nl_net = net, 2467 }; 2468 2469 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, 2470 cfg.fc_dst = *prefix; 2471 cfg.fc_gateway = *gwaddr; 2472 2473 /* We should treat it as a default route if prefix length is 0. */ 2474 if (!prefixlen) 2475 cfg.fc_flags |= RTF_DEFAULT; 2476 2477 ip6_route_add(&cfg, NULL); 2478 2479 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); 2480 } 2481 #endif 2482 2483 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) 2484 { 2485 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; 2486 struct rt6_info *rt; 2487 struct fib6_table *table; 2488 2489 table = fib6_get_table(dev_net(dev), tb_id); 2490 if (!table) 2491 return NULL; 2492 2493 read_lock_bh(&table->tb6_lock); 2494 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { 2495 if (dev == rt->dst.dev && 2496 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 2497 ipv6_addr_equal(&rt->rt6i_gateway, addr)) 2498 break; 2499 } 2500 if (rt) 2501 dst_hold(&rt->dst); 2502 read_unlock_bh(&table->tb6_lock); 2503 return rt; 2504 } 2505 2506 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, 2507 struct net_device *dev, 2508 unsigned int pref) 2509 { 2510 struct fib6_config cfg = { 2511 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, 2512 .fc_metric = IP6_RT_PRIO_USER, 2513 .fc_ifindex = dev->ifindex, 2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2516 .fc_nlinfo.portid = 0, 2517 .fc_nlinfo.nlh = NULL, 2518 .fc_nlinfo.nl_net = dev_net(dev), 2519 }; 2520 2521 cfg.fc_gateway = *gwaddr; 2522 2523 if (!ip6_route_add(&cfg, NULL)) { 2524 struct fib6_table *table; 2525 2526 table = fib6_get_table(dev_net(dev), cfg.fc_table); 2527 if (table) 2528 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; 2529 } 2530 2531 return rt6_get_dflt_router(gwaddr, dev); 2532 } 2533 2534 static void __rt6_purge_dflt_routers(struct fib6_table *table) 2535 { 2536 struct rt6_info *rt; 2537 2538 restart: 2539 read_lock_bh(&table->tb6_lock); 2540 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { 2541 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 2542 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { 2543 dst_hold(&rt->dst); 2544 read_unlock_bh(&table->tb6_lock); 2545 ip6_del_rt(rt); 2546 goto restart; 2547 } 2548 } 2549 read_unlock_bh(&table->tb6_lock); 2550 2551 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; 2552 } 2553 2554 void rt6_purge_dflt_routers(struct net *net) 2555 { 2556 struct fib6_table *table; 2557 struct hlist_head *head; 2558 unsigned int h; 2559 2560 rcu_read_lock(); 2561 2562 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { 2563 head = &net->ipv6.fib_table_hash[h]; 2564 hlist_for_each_entry_rcu(table, head, tb6_hlist) { 2565 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) 2566 __rt6_purge_dflt_routers(table); 2567 } 2568 } 2569 2570 rcu_read_unlock(); 2571 } 2572 2573 static void rtmsg_to_fib6_config(struct net *net, 2574 struct in6_rtmsg *rtmsg, 2575 struct fib6_config *cfg) 2576 { 2577 memset(cfg, 0, sizeof(*cfg)); 2578 2579 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? 2580 : RT6_TABLE_MAIN; 2581 cfg->fc_ifindex = rtmsg->rtmsg_ifindex; 2582 cfg->fc_metric = rtmsg->rtmsg_metric; 2583 cfg->fc_expires = rtmsg->rtmsg_info; 2584 cfg->fc_dst_len = rtmsg->rtmsg_dst_len; 2585 cfg->fc_src_len = rtmsg->rtmsg_src_len; 2586 cfg->fc_flags = rtmsg->rtmsg_flags; 2587 2588 cfg->fc_nlinfo.nl_net = net; 2589 2590 cfg->fc_dst = rtmsg->rtmsg_dst; 2591 cfg->fc_src = rtmsg->rtmsg_src; 2592 cfg->fc_gateway = rtmsg->rtmsg_gateway; 2593 } 2594 2595 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) 2596 { 2597 struct fib6_config cfg; 2598 struct in6_rtmsg rtmsg; 2599 int err; 2600 2601 switch (cmd) { 2602 case SIOCADDRT: /* Add a route */ 2603 case SIOCDELRT: /* Delete a route */ 2604 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2605 return -EPERM; 2606 err = copy_from_user(&rtmsg, arg, 2607 sizeof(struct in6_rtmsg)); 2608 if (err) 2609 return -EFAULT; 2610 2611 rtmsg_to_fib6_config(net, &rtmsg, &cfg); 2612 2613 rtnl_lock(); 2614 switch (cmd) { 2615 case SIOCADDRT: 2616 err = ip6_route_add(&cfg, NULL); 2617 break; 2618 case SIOCDELRT: 2619 err = ip6_route_del(&cfg, NULL); 2620 break; 2621 default: 2622 err = -EINVAL; 2623 } 2624 rtnl_unlock(); 2625 2626 return err; 2627 } 2628 2629 return -EINVAL; 2630 } 2631 2632 /* 2633 * Drop the packet on the floor 2634 */ 2635 2636 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) 2637 { 2638 int type; 2639 struct dst_entry *dst = skb_dst(skb); 2640 switch (ipstats_mib_noroutes) { 2641 case IPSTATS_MIB_INNOROUTES: 2642 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 2643 if (type == IPV6_ADDR_ANY) { 2644 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 2645 IPSTATS_MIB_INADDRERRORS); 2646 break; 2647 } 2648 /* FALLTHROUGH */ 2649 case IPSTATS_MIB_OUTNOROUTES: 2650 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 2651 ipstats_mib_noroutes); 2652 break; 2653 } 2654 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); 2655 kfree_skb(skb); 2656 return 0; 2657 } 2658 2659 static int ip6_pkt_discard(struct sk_buff *skb) 2660 { 2661 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); 2662 } 2663 2664 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) 2665 { 2666 skb->dev = skb_dst(skb)->dev; 2667 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); 2668 } 2669 2670 static int ip6_pkt_prohibit(struct sk_buff *skb) 2671 { 2672 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); 2673 } 2674 2675 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) 2676 { 2677 skb->dev = skb_dst(skb)->dev; 2678 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); 2679 } 2680 2681 /* 2682 * Allocate a dst for local (unicast / anycast) address. 2683 */ 2684 2685 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 2686 const struct in6_addr *addr, 2687 bool anycast) 2688 { 2689 u32 tb_id; 2690 struct net *net = dev_net(idev->dev); 2691 struct net_device *dev = net->loopback_dev; 2692 struct rt6_info *rt; 2693 2694 /* use L3 Master device as loopback for host routes if device 2695 * is enslaved and address is not link local or multicast 2696 */ 2697 if (!rt6_need_strict(addr)) 2698 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev; 2699 2700 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); 2701 if (!rt) 2702 return ERR_PTR(-ENOMEM); 2703 2704 in6_dev_hold(idev); 2705 2706 rt->dst.flags |= DST_HOST; 2707 rt->dst.input = ip6_input; 2708 rt->dst.output = ip6_output; 2709 rt->rt6i_idev = idev; 2710 2711 rt->rt6i_protocol = RTPROT_KERNEL; 2712 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; 2713 if (anycast) 2714 rt->rt6i_flags |= RTF_ANYCAST; 2715 else 2716 rt->rt6i_flags |= RTF_LOCAL; 2717 2718 rt->rt6i_gateway = *addr; 2719 rt->rt6i_dst.addr = *addr; 2720 rt->rt6i_dst.plen = 128; 2721 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; 2722 rt->rt6i_table = fib6_get_table(net, tb_id); 2723 2724 return rt; 2725 } 2726 2727 /* remove deleted ip from prefsrc entries */ 2728 struct arg_dev_net_ip { 2729 struct net_device *dev; 2730 struct net *net; 2731 struct in6_addr *addr; 2732 }; 2733 2734 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) 2735 { 2736 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; 2737 struct net *net = ((struct arg_dev_net_ip *)arg)->net; 2738 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; 2739 2740 if (((void *)rt->dst.dev == dev || !dev) && 2741 rt != net->ipv6.ip6_null_entry && 2742 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { 2743 /* remove prefsrc entry */ 2744 rt->rt6i_prefsrc.plen = 0; 2745 } 2746 return 0; 2747 } 2748 2749 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) 2750 { 2751 struct net *net = dev_net(ifp->idev->dev); 2752 struct arg_dev_net_ip adni = { 2753 .dev = ifp->idev->dev, 2754 .net = net, 2755 .addr = &ifp->addr, 2756 }; 2757 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 2758 } 2759 2760 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) 2761 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) 2762 2763 /* Remove routers and update dst entries when gateway turn into host. */ 2764 static int fib6_clean_tohost(struct rt6_info *rt, void *arg) 2765 { 2766 struct in6_addr *gateway = (struct in6_addr *)arg; 2767 2768 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || 2769 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && 2770 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { 2771 return -1; 2772 } 2773 return 0; 2774 } 2775 2776 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) 2777 { 2778 fib6_clean_all(net, fib6_clean_tohost, gateway); 2779 } 2780 2781 struct arg_dev_net { 2782 struct net_device *dev; 2783 struct net *net; 2784 }; 2785 2786 /* called with write lock held for table with rt */ 2787 static int fib6_ifdown(struct rt6_info *rt, void *arg) 2788 { 2789 const struct arg_dev_net *adn = arg; 2790 const struct net_device *dev = adn->dev; 2791 2792 if ((rt->dst.dev == dev || !dev) && 2793 rt != adn->net->ipv6.ip6_null_entry && 2794 (rt->rt6i_nsiblings == 0 || 2795 (dev && netdev_unregistering(dev)) || 2796 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) 2797 return -1; 2798 2799 return 0; 2800 } 2801 2802 void rt6_ifdown(struct net *net, struct net_device *dev) 2803 { 2804 struct arg_dev_net adn = { 2805 .dev = dev, 2806 .net = net, 2807 }; 2808 2809 fib6_clean_all(net, fib6_ifdown, &adn); 2810 if (dev) 2811 rt6_uncached_list_flush_dev(net, dev); 2812 } 2813 2814 struct rt6_mtu_change_arg { 2815 struct net_device *dev; 2816 unsigned int mtu; 2817 }; 2818 2819 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) 2820 { 2821 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 2822 struct inet6_dev *idev; 2823 2824 /* In IPv6 pmtu discovery is not optional, 2825 so that RTAX_MTU lock cannot disable it. 2826 We still use this lock to block changes 2827 caused by addrconf/ndisc. 2828 */ 2829 2830 idev = __in6_dev_get(arg->dev); 2831 if (!idev) 2832 return 0; 2833 2834 /* For administrative MTU increase, there is no way to discover 2835 IPv6 PMTU increase, so PMTU increase should be updated here. 2836 Since RFC 1981 doesn't include administrative MTU increase 2837 update PMTU increase is a MUST. (i.e. jumbo frame) 2838 */ 2839 /* 2840 If new MTU is less than route PMTU, this new MTU will be the 2841 lowest MTU in the path, update the route PMTU to reflect PMTU 2842 decreases; if new MTU is greater than route PMTU, and the 2843 old MTU is the lowest MTU in the path, update the route PMTU 2844 to reflect the increase. In this case if the other nodes' MTU 2845 also have the lowest MTU, TOO BIG MESSAGE will be lead to 2846 PMTU discovery. 2847 */ 2848 if (rt->dst.dev == arg->dev && 2849 dst_metric_raw(&rt->dst, RTAX_MTU) && 2850 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 2851 if (rt->rt6i_flags & RTF_CACHE) { 2852 /* For RTF_CACHE with rt6i_pmtu == 0 2853 * (i.e. a redirected route), 2854 * the metrics of its rt->dst.from has already 2855 * been updated. 2856 */ 2857 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu) 2858 rt->rt6i_pmtu = arg->mtu; 2859 } else if (dst_mtu(&rt->dst) >= arg->mtu || 2860 (dst_mtu(&rt->dst) < arg->mtu && 2861 dst_mtu(&rt->dst) == idev->cnf.mtu6)) { 2862 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); 2863 } 2864 } 2865 return 0; 2866 } 2867 2868 void rt6_mtu_change(struct net_device *dev, unsigned int mtu) 2869 { 2870 struct rt6_mtu_change_arg arg = { 2871 .dev = dev, 2872 .mtu = mtu, 2873 }; 2874 2875 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); 2876 } 2877 2878 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 2879 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, 2880 [RTA_OIF] = { .type = NLA_U32 }, 2881 [RTA_IIF] = { .type = NLA_U32 }, 2882 [RTA_PRIORITY] = { .type = NLA_U32 }, 2883 [RTA_METRICS] = { .type = NLA_NESTED }, 2884 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 2885 [RTA_PREF] = { .type = NLA_U8 }, 2886 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 2887 [RTA_ENCAP] = { .type = NLA_NESTED }, 2888 [RTA_EXPIRES] = { .type = NLA_U32 }, 2889 [RTA_UID] = { .type = NLA_U32 }, 2890 [RTA_MARK] = { .type = NLA_U32 }, 2891 }; 2892 2893 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 2894 struct fib6_config *cfg, 2895 struct netlink_ext_ack *extack) 2896 { 2897 struct rtmsg *rtm; 2898 struct nlattr *tb[RTA_MAX+1]; 2899 unsigned int pref; 2900 int err; 2901 2902 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, 2903 NULL); 2904 if (err < 0) 2905 goto errout; 2906 2907 err = -EINVAL; 2908 rtm = nlmsg_data(nlh); 2909 memset(cfg, 0, sizeof(*cfg)); 2910 2911 cfg->fc_table = rtm->rtm_table; 2912 cfg->fc_dst_len = rtm->rtm_dst_len; 2913 cfg->fc_src_len = rtm->rtm_src_len; 2914 cfg->fc_flags = RTF_UP; 2915 cfg->fc_protocol = rtm->rtm_protocol; 2916 cfg->fc_type = rtm->rtm_type; 2917 2918 if (rtm->rtm_type == RTN_UNREACHABLE || 2919 rtm->rtm_type == RTN_BLACKHOLE || 2920 rtm->rtm_type == RTN_PROHIBIT || 2921 rtm->rtm_type == RTN_THROW) 2922 cfg->fc_flags |= RTF_REJECT; 2923 2924 if (rtm->rtm_type == RTN_LOCAL) 2925 cfg->fc_flags |= RTF_LOCAL; 2926 2927 if (rtm->rtm_flags & RTM_F_CLONED) 2928 cfg->fc_flags |= RTF_CACHE; 2929 2930 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; 2931 cfg->fc_nlinfo.nlh = nlh; 2932 cfg->fc_nlinfo.nl_net = sock_net(skb->sk); 2933 2934 if (tb[RTA_GATEWAY]) { 2935 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); 2936 cfg->fc_flags |= RTF_GATEWAY; 2937 } 2938 2939 if (tb[RTA_DST]) { 2940 int plen = (rtm->rtm_dst_len + 7) >> 3; 2941 2942 if (nla_len(tb[RTA_DST]) < plen) 2943 goto errout; 2944 2945 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); 2946 } 2947 2948 if (tb[RTA_SRC]) { 2949 int plen = (rtm->rtm_src_len + 7) >> 3; 2950 2951 if (nla_len(tb[RTA_SRC]) < plen) 2952 goto errout; 2953 2954 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 2955 } 2956 2957 if (tb[RTA_PREFSRC]) 2958 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); 2959 2960 if (tb[RTA_OIF]) 2961 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 2962 2963 if (tb[RTA_PRIORITY]) 2964 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); 2965 2966 if (tb[RTA_METRICS]) { 2967 cfg->fc_mx = nla_data(tb[RTA_METRICS]); 2968 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); 2969 } 2970 2971 if (tb[RTA_TABLE]) 2972 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); 2973 2974 if (tb[RTA_MULTIPATH]) { 2975 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 2976 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 2977 2978 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, 2979 cfg->fc_mp_len, extack); 2980 if (err < 0) 2981 goto errout; 2982 } 2983 2984 if (tb[RTA_PREF]) { 2985 pref = nla_get_u8(tb[RTA_PREF]); 2986 if (pref != ICMPV6_ROUTER_PREF_LOW && 2987 pref != ICMPV6_ROUTER_PREF_HIGH) 2988 pref = ICMPV6_ROUTER_PREF_MEDIUM; 2989 cfg->fc_flags |= RTF_PREF(pref); 2990 } 2991 2992 if (tb[RTA_ENCAP]) 2993 cfg->fc_encap = tb[RTA_ENCAP]; 2994 2995 if (tb[RTA_ENCAP_TYPE]) { 2996 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 2997 2998 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); 2999 if (err < 0) 3000 goto errout; 3001 } 3002 3003 if (tb[RTA_EXPIRES]) { 3004 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 3005 3006 if (addrconf_finite_timeout(timeout)) { 3007 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); 3008 cfg->fc_flags |= RTF_EXPIRES; 3009 } 3010 } 3011 3012 err = 0; 3013 errout: 3014 return err; 3015 } 3016 3017 struct rt6_nh { 3018 struct rt6_info *rt6_info; 3019 struct fib6_config r_cfg; 3020 struct mx6_config mxc; 3021 struct list_head next; 3022 }; 3023 3024 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) 3025 { 3026 struct rt6_nh *nh; 3027 3028 list_for_each_entry(nh, rt6_nh_list, next) { 3029 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", 3030 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, 3031 nh->r_cfg.fc_ifindex); 3032 } 3033 } 3034 3035 static int ip6_route_info_append(struct list_head *rt6_nh_list, 3036 struct rt6_info *rt, struct fib6_config *r_cfg) 3037 { 3038 struct rt6_nh *nh; 3039 struct rt6_info *rtnh; 3040 int err = -EEXIST; 3041 3042 list_for_each_entry(nh, rt6_nh_list, next) { 3043 /* check if rt6_info already exists */ 3044 rtnh = nh->rt6_info; 3045 3046 if (rtnh->dst.dev == rt->dst.dev && 3047 rtnh->rt6i_idev == rt->rt6i_idev && 3048 ipv6_addr_equal(&rtnh->rt6i_gateway, 3049 &rt->rt6i_gateway)) 3050 return err; 3051 } 3052 3053 nh = kzalloc(sizeof(*nh), GFP_KERNEL); 3054 if (!nh) 3055 return -ENOMEM; 3056 nh->rt6_info = rt; 3057 err = ip6_convert_metrics(&nh->mxc, r_cfg); 3058 if (err) { 3059 kfree(nh); 3060 return err; 3061 } 3062 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 3063 list_add_tail(&nh->next, rt6_nh_list); 3064 3065 return 0; 3066 } 3067 3068 static void ip6_route_mpath_notify(struct rt6_info *rt, 3069 struct rt6_info *rt_last, 3070 struct nl_info *info, 3071 __u16 nlflags) 3072 { 3073 /* if this is an APPEND route, then rt points to the first route 3074 * inserted and rt_last points to last route inserted. Userspace 3075 * wants a consistent dump of the route which starts at the first 3076 * nexthop. Since sibling routes are always added at the end of 3077 * the list, find the first sibling of the last route appended 3078 */ 3079 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { 3080 rt = list_first_entry(&rt_last->rt6i_siblings, 3081 struct rt6_info, 3082 rt6i_siblings); 3083 } 3084 3085 if (rt) 3086 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); 3087 } 3088 3089 static int ip6_route_multipath_add(struct fib6_config *cfg, 3090 struct netlink_ext_ack *extack) 3091 { 3092 struct rt6_info *rt_notif = NULL, *rt_last = NULL; 3093 struct nl_info *info = &cfg->fc_nlinfo; 3094 struct fib6_config r_cfg; 3095 struct rtnexthop *rtnh; 3096 struct rt6_info *rt; 3097 struct rt6_nh *err_nh; 3098 struct rt6_nh *nh, *nh_safe; 3099 __u16 nlflags; 3100 int remaining; 3101 int attrlen; 3102 int err = 1; 3103 int nhn = 0; 3104 int replace = (cfg->fc_nlinfo.nlh && 3105 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); 3106 LIST_HEAD(rt6_nh_list); 3107 3108 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; 3109 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) 3110 nlflags |= NLM_F_APPEND; 3111 3112 remaining = cfg->fc_mp_len; 3113 rtnh = (struct rtnexthop *)cfg->fc_mp; 3114 3115 /* Parse a Multipath Entry and build a list (rt6_nh_list) of 3116 * rt6_info structs per nexthop 3117 */ 3118 while (rtnh_ok(rtnh, remaining)) { 3119 memcpy(&r_cfg, cfg, sizeof(*cfg)); 3120 if (rtnh->rtnh_ifindex) 3121 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 3122 3123 attrlen = rtnh_attrlen(rtnh); 3124 if (attrlen > 0) { 3125 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 3126 3127 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 3128 if (nla) { 3129 r_cfg.fc_gateway = nla_get_in6_addr(nla); 3130 r_cfg.fc_flags |= RTF_GATEWAY; 3131 } 3132 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); 3133 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); 3134 if (nla) 3135 r_cfg.fc_encap_type = nla_get_u16(nla); 3136 } 3137 3138 rt = ip6_route_info_create(&r_cfg, extack); 3139 if (IS_ERR(rt)) { 3140 err = PTR_ERR(rt); 3141 rt = NULL; 3142 goto cleanup; 3143 } 3144 3145 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); 3146 if (err) { 3147 dst_release_immediate(&rt->dst); 3148 goto cleanup; 3149 } 3150 3151 rtnh = rtnh_next(rtnh, &remaining); 3152 } 3153 3154 /* for add and replace send one notification with all nexthops. 3155 * Skip the notification in fib6_add_rt2node and send one with 3156 * the full route when done 3157 */ 3158 info->skip_notify = 1; 3159 3160 err_nh = NULL; 3161 list_for_each_entry(nh, &rt6_nh_list, next) { 3162 rt_last = nh->rt6_info; 3163 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); 3164 /* save reference to first route for notification */ 3165 if (!rt_notif && !err) 3166 rt_notif = nh->rt6_info; 3167 3168 /* nh->rt6_info is used or freed at this point, reset to NULL*/ 3169 nh->rt6_info = NULL; 3170 if (err) { 3171 if (replace && nhn) 3172 ip6_print_replace_route_err(&rt6_nh_list); 3173 err_nh = nh; 3174 goto add_errout; 3175 } 3176 3177 /* Because each route is added like a single route we remove 3178 * these flags after the first nexthop: if there is a collision, 3179 * we have already failed to add the first nexthop: 3180 * fib6_add_rt2node() has rejected it; when replacing, old 3181 * nexthops have been replaced by first new, the rest should 3182 * be added to it. 3183 */ 3184 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | 3185 NLM_F_REPLACE); 3186 nhn++; 3187 } 3188 3189 /* success ... tell user about new route */ 3190 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 3191 goto cleanup; 3192 3193 add_errout: 3194 /* send notification for routes that were added so that 3195 * the delete notifications sent by ip6_route_del are 3196 * coherent 3197 */ 3198 if (rt_notif) 3199 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); 3200 3201 /* Delete routes that were already added */ 3202 list_for_each_entry(nh, &rt6_nh_list, next) { 3203 if (err_nh == nh) 3204 break; 3205 ip6_route_del(&nh->r_cfg, extack); 3206 } 3207 3208 cleanup: 3209 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { 3210 if (nh->rt6_info) 3211 dst_release_immediate(&nh->rt6_info->dst); 3212 kfree(nh->mxc.mx); 3213 list_del(&nh->next); 3214 kfree(nh); 3215 } 3216 3217 return err; 3218 } 3219 3220 static int ip6_route_multipath_del(struct fib6_config *cfg, 3221 struct netlink_ext_ack *extack) 3222 { 3223 struct fib6_config r_cfg; 3224 struct rtnexthop *rtnh; 3225 int remaining; 3226 int attrlen; 3227 int err = 1, last_err = 0; 3228 3229 remaining = cfg->fc_mp_len; 3230 rtnh = (struct rtnexthop *)cfg->fc_mp; 3231 3232 /* Parse a Multipath Entry */ 3233 while (rtnh_ok(rtnh, remaining)) { 3234 memcpy(&r_cfg, cfg, sizeof(*cfg)); 3235 if (rtnh->rtnh_ifindex) 3236 r_cfg.fc_ifindex = rtnh->rtnh_ifindex; 3237 3238 attrlen = rtnh_attrlen(rtnh); 3239 if (attrlen > 0) { 3240 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 3241 3242 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 3243 if (nla) { 3244 nla_memcpy(&r_cfg.fc_gateway, nla, 16); 3245 r_cfg.fc_flags |= RTF_GATEWAY; 3246 } 3247 } 3248 err = ip6_route_del(&r_cfg, extack); 3249 if (err) 3250 last_err = err; 3251 3252 rtnh = rtnh_next(rtnh, &remaining); 3253 } 3254 3255 return last_err; 3256 } 3257 3258 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, 3259 struct netlink_ext_ack *extack) 3260 { 3261 struct fib6_config cfg; 3262 int err; 3263 3264 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 3265 if (err < 0) 3266 return err; 3267 3268 if (cfg.fc_mp) 3269 return ip6_route_multipath_del(&cfg, extack); 3270 else { 3271 cfg.fc_delete_all_nh = 1; 3272 return ip6_route_del(&cfg, extack); 3273 } 3274 } 3275 3276 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, 3277 struct netlink_ext_ack *extack) 3278 { 3279 struct fib6_config cfg; 3280 int err; 3281 3282 err = rtm_to_fib6_config(skb, nlh, &cfg, extack); 3283 if (err < 0) 3284 return err; 3285 3286 if (cfg.fc_mp) 3287 return ip6_route_multipath_add(&cfg, extack); 3288 else 3289 return ip6_route_add(&cfg, extack); 3290 } 3291 3292 static size_t rt6_nlmsg_size(struct rt6_info *rt) 3293 { 3294 int nexthop_len = 0; 3295 3296 if (rt->rt6i_nsiblings) { 3297 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 3298 + NLA_ALIGN(sizeof(struct rtnexthop)) 3299 + nla_total_size(16) /* RTA_GATEWAY */ 3300 + lwtunnel_get_encap_size(rt->dst.lwtstate); 3301 3302 nexthop_len *= rt->rt6i_nsiblings; 3303 } 3304 3305 return NLMSG_ALIGN(sizeof(struct rtmsg)) 3306 + nla_total_size(16) /* RTA_SRC */ 3307 + nla_total_size(16) /* RTA_DST */ 3308 + nla_total_size(16) /* RTA_GATEWAY */ 3309 + nla_total_size(16) /* RTA_PREFSRC */ 3310 + nla_total_size(4) /* RTA_TABLE */ 3311 + nla_total_size(4) /* RTA_IIF */ 3312 + nla_total_size(4) /* RTA_OIF */ 3313 + nla_total_size(4) /* RTA_PRIORITY */ 3314 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ 3315 + nla_total_size(sizeof(struct rta_cacheinfo)) 3316 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ 3317 + nla_total_size(1) /* RTA_PREF */ 3318 + lwtunnel_get_encap_size(rt->dst.lwtstate) 3319 + nexthop_len; 3320 } 3321 3322 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, 3323 unsigned int *flags, bool skip_oif) 3324 { 3325 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { 3326 *flags |= RTNH_F_LINKDOWN; 3327 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) 3328 *flags |= RTNH_F_DEAD; 3329 } 3330 3331 if (rt->rt6i_flags & RTF_GATEWAY) { 3332 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) 3333 goto nla_put_failure; 3334 } 3335 3336 /* not needed for multipath encoding b/c it has a rtnexthop struct */ 3337 if (!skip_oif && rt->dst.dev && 3338 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 3339 goto nla_put_failure; 3340 3341 if (rt->dst.lwtstate && 3342 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) 3343 goto nla_put_failure; 3344 3345 return 0; 3346 3347 nla_put_failure: 3348 return -EMSGSIZE; 3349 } 3350 3351 /* add multipath next hop */ 3352 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) 3353 { 3354 struct rtnexthop *rtnh; 3355 unsigned int flags = 0; 3356 3357 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 3358 if (!rtnh) 3359 goto nla_put_failure; 3360 3361 rtnh->rtnh_hops = 0; 3362 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; 3363 3364 if (rt6_nexthop_info(skb, rt, &flags, true) < 0) 3365 goto nla_put_failure; 3366 3367 rtnh->rtnh_flags = flags; 3368 3369 /* length of rtnetlink header + attributes */ 3370 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; 3371 3372 return 0; 3373 3374 nla_put_failure: 3375 return -EMSGSIZE; 3376 } 3377 3378 static int rt6_fill_node(struct net *net, 3379 struct sk_buff *skb, struct rt6_info *rt, 3380 struct in6_addr *dst, struct in6_addr *src, 3381 int iif, int type, u32 portid, u32 seq, 3382 unsigned int flags) 3383 { 3384 u32 metrics[RTAX_MAX]; 3385 struct rtmsg *rtm; 3386 struct nlmsghdr *nlh; 3387 long expires; 3388 u32 table; 3389 3390 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 3391 if (!nlh) 3392 return -EMSGSIZE; 3393 3394 rtm = nlmsg_data(nlh); 3395 rtm->rtm_family = AF_INET6; 3396 rtm->rtm_dst_len = rt->rt6i_dst.plen; 3397 rtm->rtm_src_len = rt->rt6i_src.plen; 3398 rtm->rtm_tos = 0; 3399 if (rt->rt6i_table) 3400 table = rt->rt6i_table->tb6_id; 3401 else 3402 table = RT6_TABLE_UNSPEC; 3403 rtm->rtm_table = table; 3404 if (nla_put_u32(skb, RTA_TABLE, table)) 3405 goto nla_put_failure; 3406 if (rt->rt6i_flags & RTF_REJECT) { 3407 switch (rt->dst.error) { 3408 case -EINVAL: 3409 rtm->rtm_type = RTN_BLACKHOLE; 3410 break; 3411 case -EACCES: 3412 rtm->rtm_type = RTN_PROHIBIT; 3413 break; 3414 case -EAGAIN: 3415 rtm->rtm_type = RTN_THROW; 3416 break; 3417 default: 3418 rtm->rtm_type = RTN_UNREACHABLE; 3419 break; 3420 } 3421 } 3422 else if (rt->rt6i_flags & RTF_LOCAL) 3423 rtm->rtm_type = RTN_LOCAL; 3424 else if (rt->rt6i_flags & RTF_ANYCAST) 3425 rtm->rtm_type = RTN_ANYCAST; 3426 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 3427 rtm->rtm_type = RTN_LOCAL; 3428 else 3429 rtm->rtm_type = RTN_UNICAST; 3430 rtm->rtm_flags = 0; 3431 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3432 rtm->rtm_protocol = rt->rt6i_protocol; 3433 if (rt->rt6i_flags & RTF_DYNAMIC) 3434 rtm->rtm_protocol = RTPROT_REDIRECT; 3435 else if (rt->rt6i_flags & RTF_ADDRCONF) { 3436 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) 3437 rtm->rtm_protocol = RTPROT_RA; 3438 else 3439 rtm->rtm_protocol = RTPROT_KERNEL; 3440 } 3441 3442 if (rt->rt6i_flags & RTF_CACHE) 3443 rtm->rtm_flags |= RTM_F_CLONED; 3444 3445 if (dst) { 3446 if (nla_put_in6_addr(skb, RTA_DST, dst)) 3447 goto nla_put_failure; 3448 rtm->rtm_dst_len = 128; 3449 } else if (rtm->rtm_dst_len) 3450 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) 3451 goto nla_put_failure; 3452 #ifdef CONFIG_IPV6_SUBTREES 3453 if (src) { 3454 if (nla_put_in6_addr(skb, RTA_SRC, src)) 3455 goto nla_put_failure; 3456 rtm->rtm_src_len = 128; 3457 } else if (rtm->rtm_src_len && 3458 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) 3459 goto nla_put_failure; 3460 #endif 3461 if (iif) { 3462 #ifdef CONFIG_IPV6_MROUTE 3463 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { 3464 int err = ip6mr_get_route(net, skb, rtm, portid); 3465 3466 if (err == 0) 3467 return 0; 3468 if (err < 0) 3469 goto nla_put_failure; 3470 } else 3471 #endif 3472 if (nla_put_u32(skb, RTA_IIF, iif)) 3473 goto nla_put_failure; 3474 } else if (dst) { 3475 struct in6_addr saddr_buf; 3476 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && 3477 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 3478 goto nla_put_failure; 3479 } 3480 3481 if (rt->rt6i_prefsrc.plen) { 3482 struct in6_addr saddr_buf; 3483 saddr_buf = rt->rt6i_prefsrc.addr; 3484 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 3485 goto nla_put_failure; 3486 } 3487 3488 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 3489 if (rt->rt6i_pmtu) 3490 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu; 3491 if (rtnetlink_put_metrics(skb, metrics) < 0) 3492 goto nla_put_failure; 3493 3494 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) 3495 goto nla_put_failure; 3496 3497 /* For multipath routes, walk the siblings list and add 3498 * each as a nexthop within RTA_MULTIPATH. 3499 */ 3500 if (rt->rt6i_nsiblings) { 3501 struct rt6_info *sibling, *next_sibling; 3502 struct nlattr *mp; 3503 3504 mp = nla_nest_start(skb, RTA_MULTIPATH); 3505 if (!mp) 3506 goto nla_put_failure; 3507 3508 if (rt6_add_nexthop(skb, rt) < 0) 3509 goto nla_put_failure; 3510 3511 list_for_each_entry_safe(sibling, next_sibling, 3512 &rt->rt6i_siblings, rt6i_siblings) { 3513 if (rt6_add_nexthop(skb, sibling) < 0) 3514 goto nla_put_failure; 3515 } 3516 3517 nla_nest_end(skb, mp); 3518 } else { 3519 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) 3520 goto nla_put_failure; 3521 } 3522 3523 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; 3524 3525 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) 3526 goto nla_put_failure; 3527 3528 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) 3529 goto nla_put_failure; 3530 3531 3532 nlmsg_end(skb, nlh); 3533 return 0; 3534 3535 nla_put_failure: 3536 nlmsg_cancel(skb, nlh); 3537 return -EMSGSIZE; 3538 } 3539 3540 int rt6_dump_route(struct rt6_info *rt, void *p_arg) 3541 { 3542 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; 3543 struct net *net = arg->net; 3544 3545 if (rt == net->ipv6.ip6_null_entry) 3546 return 0; 3547 3548 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { 3549 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); 3550 3551 /* user wants prefix routes only */ 3552 if (rtm->rtm_flags & RTM_F_PREFIX && 3553 !(rt->rt6i_flags & RTF_PREFIX_RT)) { 3554 /* success since this is not a prefix route */ 3555 return 1; 3556 } 3557 } 3558 3559 return rt6_fill_node(net, 3560 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, 3561 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, 3562 NLM_F_MULTI); 3563 } 3564 3565 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3566 struct netlink_ext_ack *extack) 3567 { 3568 struct net *net = sock_net(in_skb->sk); 3569 struct nlattr *tb[RTA_MAX+1]; 3570 int err, iif = 0, oif = 0; 3571 struct dst_entry *dst; 3572 struct rt6_info *rt; 3573 struct sk_buff *skb; 3574 struct rtmsg *rtm; 3575 struct flowi6 fl6; 3576 bool fibmatch; 3577 3578 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, 3579 extack); 3580 if (err < 0) 3581 goto errout; 3582 3583 err = -EINVAL; 3584 memset(&fl6, 0, sizeof(fl6)); 3585 rtm = nlmsg_data(nlh); 3586 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); 3587 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); 3588 3589 if (tb[RTA_SRC]) { 3590 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) 3591 goto errout; 3592 3593 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); 3594 } 3595 3596 if (tb[RTA_DST]) { 3597 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) 3598 goto errout; 3599 3600 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); 3601 } 3602 3603 if (tb[RTA_IIF]) 3604 iif = nla_get_u32(tb[RTA_IIF]); 3605 3606 if (tb[RTA_OIF]) 3607 oif = nla_get_u32(tb[RTA_OIF]); 3608 3609 if (tb[RTA_MARK]) 3610 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); 3611 3612 if (tb[RTA_UID]) 3613 fl6.flowi6_uid = make_kuid(current_user_ns(), 3614 nla_get_u32(tb[RTA_UID])); 3615 else 3616 fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); 3617 3618 if (iif) { 3619 struct net_device *dev; 3620 int flags = 0; 3621 3622 dev = __dev_get_by_index(net, iif); 3623 if (!dev) { 3624 err = -ENODEV; 3625 goto errout; 3626 } 3627 3628 fl6.flowi6_iif = iif; 3629 3630 if (!ipv6_addr_any(&fl6.saddr)) 3631 flags |= RT6_LOOKUP_F_HAS_SADDR; 3632 3633 if (!fibmatch) 3634 dst = ip6_route_input_lookup(net, dev, &fl6, flags); 3635 } else { 3636 fl6.flowi6_oif = oif; 3637 3638 if (!fibmatch) 3639 dst = ip6_route_output(net, NULL, &fl6); 3640 } 3641 3642 if (fibmatch) 3643 dst = ip6_route_lookup(net, &fl6, 0); 3644 3645 rt = container_of(dst, struct rt6_info, dst); 3646 if (rt->dst.error) { 3647 err = rt->dst.error; 3648 ip6_rt_put(rt); 3649 goto errout; 3650 } 3651 3652 if (rt == net->ipv6.ip6_null_entry) { 3653 err = rt->dst.error; 3654 ip6_rt_put(rt); 3655 goto errout; 3656 } 3657 3658 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3659 if (!skb) { 3660 ip6_rt_put(rt); 3661 err = -ENOBUFS; 3662 goto errout; 3663 } 3664 3665 skb_dst_set(skb, &rt->dst); 3666 if (fibmatch) 3667 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif, 3668 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 3669 nlh->nlmsg_seq, 0); 3670 else 3671 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, 3672 RTM_NEWROUTE, NETLINK_CB(in_skb).portid, 3673 nlh->nlmsg_seq, 0); 3674 if (err < 0) { 3675 kfree_skb(skb); 3676 goto errout; 3677 } 3678 3679 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 3680 errout: 3681 return err; 3682 } 3683 3684 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, 3685 unsigned int nlm_flags) 3686 { 3687 struct sk_buff *skb; 3688 struct net *net = info->nl_net; 3689 u32 seq; 3690 int err; 3691 3692 err = -ENOBUFS; 3693 seq = info->nlh ? info->nlh->nlmsg_seq : 0; 3694 3695 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); 3696 if (!skb) 3697 goto errout; 3698 3699 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 3700 event, info->portid, seq, nlm_flags); 3701 if (err < 0) { 3702 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 3703 WARN_ON(err == -EMSGSIZE); 3704 kfree_skb(skb); 3705 goto errout; 3706 } 3707 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, 3708 info->nlh, gfp_any()); 3709 return; 3710 errout: 3711 if (err < 0) 3712 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); 3713 } 3714 3715 static int ip6_route_dev_notify(struct notifier_block *this, 3716 unsigned long event, void *ptr) 3717 { 3718 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3719 struct net *net = dev_net(dev); 3720 3721 if (!(dev->flags & IFF_LOOPBACK)) 3722 return NOTIFY_OK; 3723 3724 if (event == NETDEV_REGISTER) { 3725 net->ipv6.ip6_null_entry->dst.dev = dev; 3726 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 3727 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3728 net->ipv6.ip6_prohibit_entry->dst.dev = dev; 3729 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 3730 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3731 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3732 #endif 3733 } else if (event == NETDEV_UNREGISTER && 3734 dev->reg_state != NETREG_UNREGISTERED) { 3735 /* NETDEV_UNREGISTER could be fired for multiple times by 3736 * netdev_wait_allrefs(). Make sure we only call this once. 3737 */ 3738 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); 3739 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3740 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); 3741 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); 3742 #endif 3743 } 3744 3745 return NOTIFY_OK; 3746 } 3747 3748 /* 3749 * /proc 3750 */ 3751 3752 #ifdef CONFIG_PROC_FS 3753 3754 static const struct file_operations ipv6_route_proc_fops = { 3755 .owner = THIS_MODULE, 3756 .open = ipv6_route_open, 3757 .read = seq_read, 3758 .llseek = seq_lseek, 3759 .release = seq_release_net, 3760 }; 3761 3762 static int rt6_stats_seq_show(struct seq_file *seq, void *v) 3763 { 3764 struct net *net = (struct net *)seq->private; 3765 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 3766 net->ipv6.rt6_stats->fib_nodes, 3767 net->ipv6.rt6_stats->fib_route_nodes, 3768 net->ipv6.rt6_stats->fib_rt_alloc, 3769 net->ipv6.rt6_stats->fib_rt_entries, 3770 net->ipv6.rt6_stats->fib_rt_cache, 3771 dst_entries_get_slow(&net->ipv6.ip6_dst_ops), 3772 net->ipv6.rt6_stats->fib_discarded_routes); 3773 3774 return 0; 3775 } 3776 3777 static int rt6_stats_seq_open(struct inode *inode, struct file *file) 3778 { 3779 return single_open_net(inode, file, rt6_stats_seq_show); 3780 } 3781 3782 static const struct file_operations rt6_stats_seq_fops = { 3783 .owner = THIS_MODULE, 3784 .open = rt6_stats_seq_open, 3785 .read = seq_read, 3786 .llseek = seq_lseek, 3787 .release = single_release_net, 3788 }; 3789 #endif /* CONFIG_PROC_FS */ 3790 3791 #ifdef CONFIG_SYSCTL 3792 3793 static 3794 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 3795 void __user *buffer, size_t *lenp, loff_t *ppos) 3796 { 3797 struct net *net; 3798 int delay; 3799 if (!write) 3800 return -EINVAL; 3801 3802 net = (struct net *)ctl->extra1; 3803 delay = net->ipv6.sysctl.flush_delay; 3804 proc_dointvec(ctl, write, buffer, lenp, ppos); 3805 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); 3806 return 0; 3807 } 3808 3809 struct ctl_table ipv6_route_table_template[] = { 3810 { 3811 .procname = "flush", 3812 .data = &init_net.ipv6.sysctl.flush_delay, 3813 .maxlen = sizeof(int), 3814 .mode = 0200, 3815 .proc_handler = ipv6_sysctl_rtcache_flush 3816 }, 3817 { 3818 .procname = "gc_thresh", 3819 .data = &ip6_dst_ops_template.gc_thresh, 3820 .maxlen = sizeof(int), 3821 .mode = 0644, 3822 .proc_handler = proc_dointvec, 3823 }, 3824 { 3825 .procname = "max_size", 3826 .data = &init_net.ipv6.sysctl.ip6_rt_max_size, 3827 .maxlen = sizeof(int), 3828 .mode = 0644, 3829 .proc_handler = proc_dointvec, 3830 }, 3831 { 3832 .procname = "gc_min_interval", 3833 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 3834 .maxlen = sizeof(int), 3835 .mode = 0644, 3836 .proc_handler = proc_dointvec_jiffies, 3837 }, 3838 { 3839 .procname = "gc_timeout", 3840 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, 3841 .maxlen = sizeof(int), 3842 .mode = 0644, 3843 .proc_handler = proc_dointvec_jiffies, 3844 }, 3845 { 3846 .procname = "gc_interval", 3847 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, 3848 .maxlen = sizeof(int), 3849 .mode = 0644, 3850 .proc_handler = proc_dointvec_jiffies, 3851 }, 3852 { 3853 .procname = "gc_elasticity", 3854 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, 3855 .maxlen = sizeof(int), 3856 .mode = 0644, 3857 .proc_handler = proc_dointvec, 3858 }, 3859 { 3860 .procname = "mtu_expires", 3861 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, 3862 .maxlen = sizeof(int), 3863 .mode = 0644, 3864 .proc_handler = proc_dointvec_jiffies, 3865 }, 3866 { 3867 .procname = "min_adv_mss", 3868 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, 3869 .maxlen = sizeof(int), 3870 .mode = 0644, 3871 .proc_handler = proc_dointvec, 3872 }, 3873 { 3874 .procname = "gc_min_interval_ms", 3875 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, 3876 .maxlen = sizeof(int), 3877 .mode = 0644, 3878 .proc_handler = proc_dointvec_ms_jiffies, 3879 }, 3880 { } 3881 }; 3882 3883 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) 3884 { 3885 struct ctl_table *table; 3886 3887 table = kmemdup(ipv6_route_table_template, 3888 sizeof(ipv6_route_table_template), 3889 GFP_KERNEL); 3890 3891 if (table) { 3892 table[0].data = &net->ipv6.sysctl.flush_delay; 3893 table[0].extra1 = net; 3894 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 3895 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 3896 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 3897 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; 3898 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; 3899 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; 3900 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 3901 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 3902 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 3903 3904 /* Don't export sysctls to unprivileged users */ 3905 if (net->user_ns != &init_user_ns) 3906 table[0].procname = NULL; 3907 } 3908 3909 return table; 3910 } 3911 #endif 3912 3913 static int __net_init ip6_route_net_init(struct net *net) 3914 { 3915 int ret = -ENOMEM; 3916 3917 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, 3918 sizeof(net->ipv6.ip6_dst_ops)); 3919 3920 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) 3921 goto out_ip6_dst_ops; 3922 3923 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, 3924 sizeof(*net->ipv6.ip6_null_entry), 3925 GFP_KERNEL); 3926 if (!net->ipv6.ip6_null_entry) 3927 goto out_ip6_dst_entries; 3928 net->ipv6.ip6_null_entry->dst.path = 3929 (struct dst_entry *)net->ipv6.ip6_null_entry; 3930 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; 3931 dst_init_metrics(&net->ipv6.ip6_null_entry->dst, 3932 ip6_template_metrics, true); 3933 3934 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3935 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 3936 sizeof(*net->ipv6.ip6_prohibit_entry), 3937 GFP_KERNEL); 3938 if (!net->ipv6.ip6_prohibit_entry) 3939 goto out_ip6_null_entry; 3940 net->ipv6.ip6_prohibit_entry->dst.path = 3941 (struct dst_entry *)net->ipv6.ip6_prohibit_entry; 3942 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; 3943 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, 3944 ip6_template_metrics, true); 3945 3946 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 3947 sizeof(*net->ipv6.ip6_blk_hole_entry), 3948 GFP_KERNEL); 3949 if (!net->ipv6.ip6_blk_hole_entry) 3950 goto out_ip6_prohibit_entry; 3951 net->ipv6.ip6_blk_hole_entry->dst.path = 3952 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; 3953 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; 3954 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, 3955 ip6_template_metrics, true); 3956 #endif 3957 3958 net->ipv6.sysctl.flush_delay = 0; 3959 net->ipv6.sysctl.ip6_rt_max_size = 4096; 3960 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; 3961 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; 3962 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; 3963 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; 3964 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; 3965 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; 3966 3967 net->ipv6.ip6_rt_gc_expire = 30*HZ; 3968 3969 ret = 0; 3970 out: 3971 return ret; 3972 3973 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3974 out_ip6_prohibit_entry: 3975 kfree(net->ipv6.ip6_prohibit_entry); 3976 out_ip6_null_entry: 3977 kfree(net->ipv6.ip6_null_entry); 3978 #endif 3979 out_ip6_dst_entries: 3980 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 3981 out_ip6_dst_ops: 3982 goto out; 3983 } 3984 3985 static void __net_exit ip6_route_net_exit(struct net *net) 3986 { 3987 kfree(net->ipv6.ip6_null_entry); 3988 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3989 kfree(net->ipv6.ip6_prohibit_entry); 3990 kfree(net->ipv6.ip6_blk_hole_entry); 3991 #endif 3992 dst_entries_destroy(&net->ipv6.ip6_dst_ops); 3993 } 3994 3995 static int __net_init ip6_route_net_init_late(struct net *net) 3996 { 3997 #ifdef CONFIG_PROC_FS 3998 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops); 3999 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops); 4000 #endif 4001 return 0; 4002 } 4003 4004 static void __net_exit ip6_route_net_exit_late(struct net *net) 4005 { 4006 #ifdef CONFIG_PROC_FS 4007 remove_proc_entry("ipv6_route", net->proc_net); 4008 remove_proc_entry("rt6_stats", net->proc_net); 4009 #endif 4010 } 4011 4012 static struct pernet_operations ip6_route_net_ops = { 4013 .init = ip6_route_net_init, 4014 .exit = ip6_route_net_exit, 4015 }; 4016 4017 static int __net_init ipv6_inetpeer_init(struct net *net) 4018 { 4019 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); 4020 4021 if (!bp) 4022 return -ENOMEM; 4023 inet_peer_base_init(bp); 4024 net->ipv6.peers = bp; 4025 return 0; 4026 } 4027 4028 static void __net_exit ipv6_inetpeer_exit(struct net *net) 4029 { 4030 struct inet_peer_base *bp = net->ipv6.peers; 4031 4032 net->ipv6.peers = NULL; 4033 inetpeer_invalidate_tree(bp); 4034 kfree(bp); 4035 } 4036 4037 static struct pernet_operations ipv6_inetpeer_ops = { 4038 .init = ipv6_inetpeer_init, 4039 .exit = ipv6_inetpeer_exit, 4040 }; 4041 4042 static struct pernet_operations ip6_route_net_late_ops = { 4043 .init = ip6_route_net_init_late, 4044 .exit = ip6_route_net_exit_late, 4045 }; 4046 4047 static struct notifier_block ip6_route_dev_notifier = { 4048 .notifier_call = ip6_route_dev_notify, 4049 .priority = ADDRCONF_NOTIFY_PRIORITY - 10, 4050 }; 4051 4052 void __init ip6_route_init_special_entries(void) 4053 { 4054 /* Registering of the loopback is done before this portion of code, 4055 * the loopback reference in rt6_info will not be taken, do it 4056 * manually for init_net */ 4057 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; 4058 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 4059 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 4060 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; 4061 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 4062 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; 4063 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 4064 #endif 4065 } 4066 4067 int __init ip6_route_init(void) 4068 { 4069 int ret; 4070 int cpu; 4071 4072 ret = -ENOMEM; 4073 ip6_dst_ops_template.kmem_cachep = 4074 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 4075 SLAB_HWCACHE_ALIGN, NULL); 4076 if (!ip6_dst_ops_template.kmem_cachep) 4077 goto out; 4078 4079 ret = dst_entries_init(&ip6_dst_blackhole_ops); 4080 if (ret) 4081 goto out_kmem_cache; 4082 4083 ret = register_pernet_subsys(&ipv6_inetpeer_ops); 4084 if (ret) 4085 goto out_dst_entries; 4086 4087 ret = register_pernet_subsys(&ip6_route_net_ops); 4088 if (ret) 4089 goto out_register_inetpeer; 4090 4091 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 4092 4093 ret = fib6_init(); 4094 if (ret) 4095 goto out_register_subsys; 4096 4097 ret = xfrm6_init(); 4098 if (ret) 4099 goto out_fib6_init; 4100 4101 ret = fib6_rules_init(); 4102 if (ret) 4103 goto xfrm6_init; 4104 4105 ret = register_pernet_subsys(&ip6_route_net_late_ops); 4106 if (ret) 4107 goto fib6_rules_init; 4108 4109 ret = -ENOBUFS; 4110 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || 4111 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || 4112 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) 4113 goto out_register_late_subsys; 4114 4115 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 4116 if (ret) 4117 goto out_register_late_subsys; 4118 4119 for_each_possible_cpu(cpu) { 4120 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); 4121 4122 INIT_LIST_HEAD(&ul->head); 4123 spin_lock_init(&ul->lock); 4124 } 4125 4126 out: 4127 return ret; 4128 4129 out_register_late_subsys: 4130 unregister_pernet_subsys(&ip6_route_net_late_ops); 4131 fib6_rules_init: 4132 fib6_rules_cleanup(); 4133 xfrm6_init: 4134 xfrm6_fini(); 4135 out_fib6_init: 4136 fib6_gc_cleanup(); 4137 out_register_subsys: 4138 unregister_pernet_subsys(&ip6_route_net_ops); 4139 out_register_inetpeer: 4140 unregister_pernet_subsys(&ipv6_inetpeer_ops); 4141 out_dst_entries: 4142 dst_entries_destroy(&ip6_dst_blackhole_ops); 4143 out_kmem_cache: 4144 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 4145 goto out; 4146 } 4147 4148 void ip6_route_cleanup(void) 4149 { 4150 unregister_netdevice_notifier(&ip6_route_dev_notifier); 4151 unregister_pernet_subsys(&ip6_route_net_late_ops); 4152 fib6_rules_cleanup(); 4153 xfrm6_fini(); 4154 fib6_gc_cleanup(); 4155 unregister_pernet_subsys(&ipv6_inetpeer_ops); 4156 unregister_pernet_subsys(&ip6_route_net_ops); 4157 dst_entries_destroy(&ip6_dst_blackhole_ops); 4158 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 4159 } 4160