1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 */
9
10 /* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
19 * Ville Nuorvala
20 * Fixed routing subtrees.
21 */
22
23 #define pr_fmt(fmt) "IPv6: " fmt
24
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/dst_metadata.h>
56 #include <net/xfrm.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
59 #include <net/rtnh.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
63 #include <net/ip.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
66
67 #ifdef CONFIG_SYSCTL
68 #include <linux/sysctl.h>
69 #endif
70
71 static int ip6_rt_type_to_error(u8 fib6_type);
72
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
77
78 enum rt6_nud_state {
79 RT6_NUD_FAIL_HARD = -3,
80 RT6_NUD_FAIL_PROBE = -2,
81 RT6_NUD_FAIL_DO_RR = -1,
82 RT6_NUD_SUCCEED = 1
83 };
84
85 INDIRECT_CALLABLE_SCOPE
86 struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
87 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
88 INDIRECT_CALLABLE_SCOPE
89 unsigned int ip6_mtu(const struct dst_entry *dst);
90 static void ip6_negative_advice(struct sock *sk,
91 struct dst_entry *dst);
92 static void ip6_dst_destroy(struct dst_entry *);
93 static void ip6_dst_ifdown(struct dst_entry *,
94 struct net_device *dev);
95 static void ip6_dst_gc(struct dst_ops *ops);
96
97 static int ip6_pkt_discard(struct sk_buff *skb);
98 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99 static int ip6_pkt_prohibit(struct sk_buff *skb);
100 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
101 static void ip6_link_failure(struct sk_buff *skb);
102 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
103 struct sk_buff *skb, u32 mtu,
104 bool confirm_neigh);
105 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
106 struct sk_buff *skb);
107 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 int strict);
109 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
110 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
111 struct fib6_info *rt, struct dst_entry *dst,
112 struct in6_addr *dest, struct in6_addr *src,
113 int iif, int type, u32 portid, u32 seq,
114 unsigned int flags);
115 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
116 const struct in6_addr *daddr,
117 const struct in6_addr *saddr);
118
119 #ifdef CONFIG_IPV6_ROUTE_INFO
120 static struct fib6_info *rt6_add_route_info(struct net *net,
121 const struct in6_addr *prefix, int prefixlen,
122 const struct in6_addr *gwaddr,
123 struct net_device *dev,
124 unsigned int pref);
125 static struct fib6_info *rt6_get_route_info(struct net *net,
126 const struct in6_addr *prefix, int prefixlen,
127 const struct in6_addr *gwaddr,
128 struct net_device *dev);
129 #endif
130
131 struct uncached_list {
132 spinlock_t lock;
133 struct list_head head;
134 };
135
136 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
137
rt6_uncached_list_add(struct rt6_info * rt)138 void rt6_uncached_list_add(struct rt6_info *rt)
139 {
140 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
141
142 rt->dst.rt_uncached_list = ul;
143
144 spin_lock_bh(&ul->lock);
145 list_add_tail(&rt->dst.rt_uncached, &ul->head);
146 spin_unlock_bh(&ul->lock);
147 }
148
rt6_uncached_list_del(struct rt6_info * rt)149 void rt6_uncached_list_del(struct rt6_info *rt)
150 {
151 if (!list_empty(&rt->dst.rt_uncached)) {
152 struct uncached_list *ul = rt->dst.rt_uncached_list;
153
154 spin_lock_bh(&ul->lock);
155 list_del_init(&rt->dst.rt_uncached);
156 spin_unlock_bh(&ul->lock);
157 }
158 }
159
rt6_uncached_list_flush_dev(struct net_device * dev)160 static void rt6_uncached_list_flush_dev(struct net_device *dev)
161 {
162 int cpu;
163
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 struct rt6_info *rt, *safe;
167
168 if (list_empty(&ul->head))
169 continue;
170
171 spin_lock_bh(&ul->lock);
172 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
173 struct inet6_dev *rt_idev = rt->rt6i_idev;
174 struct net_device *rt_dev = rt->dst.dev;
175 bool handled = false;
176
177 if (rt_idev && rt_idev->dev == dev) {
178 rt->rt6i_idev = in6_dev_get(blackhole_netdev);
179 in6_dev_put(rt_idev);
180 handled = true;
181 }
182
183 if (rt_dev == dev) {
184 rt->dst.dev = blackhole_netdev;
185 netdev_ref_replace(rt_dev, blackhole_netdev,
186 &rt->dst.dev_tracker,
187 GFP_ATOMIC);
188 handled = true;
189 }
190 if (handled)
191 list_del_init(&rt->dst.rt_uncached);
192 }
193 spin_unlock_bh(&ul->lock);
194 }
195 }
196
choose_neigh_daddr(const struct in6_addr * p,struct sk_buff * skb,const void * daddr)197 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
198 struct sk_buff *skb,
199 const void *daddr)
200 {
201 if (!ipv6_addr_any(p))
202 return (const void *) p;
203 else if (skb)
204 return &ipv6_hdr(skb)->daddr;
205 return daddr;
206 }
207
ip6_neigh_lookup(const struct in6_addr * gw,struct net_device * dev,struct sk_buff * skb,const void * daddr)208 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
209 struct net_device *dev,
210 struct sk_buff *skb,
211 const void *daddr)
212 {
213 struct neighbour *n;
214
215 daddr = choose_neigh_daddr(gw, skb, daddr);
216 n = __ipv6_neigh_lookup(dev, daddr);
217 if (n)
218 return n;
219
220 n = neigh_create(&nd_tbl, daddr, dev);
221 return IS_ERR(n) ? NULL : n;
222 }
223
ip6_dst_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)224 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
225 struct sk_buff *skb,
226 const void *daddr)
227 {
228 const struct rt6_info *rt = dst_rt6_info(dst);
229
230 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
231 dst_dev(dst), skb, daddr);
232 }
233
ip6_confirm_neigh(const struct dst_entry * dst,const void * daddr)234 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
235 {
236 const struct rt6_info *rt = dst_rt6_info(dst);
237 struct net_device *dev = dst_dev(dst);
238
239 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
240 if (!daddr)
241 return;
242 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
243 return;
244 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
245 return;
246 __ipv6_confirm_neigh(dev, daddr);
247 }
248
249 static struct dst_ops ip6_dst_ops_template = {
250 .family = AF_INET6,
251 .gc = ip6_dst_gc,
252 .gc_thresh = 1024,
253 .check = ip6_dst_check,
254 .default_advmss = ip6_default_advmss,
255 .mtu = ip6_mtu,
256 .cow_metrics = dst_cow_metrics_generic,
257 .destroy = ip6_dst_destroy,
258 .ifdown = ip6_dst_ifdown,
259 .negative_advice = ip6_negative_advice,
260 .link_failure = ip6_link_failure,
261 .update_pmtu = ip6_rt_update_pmtu,
262 .redirect = rt6_do_redirect,
263 .local_out = __ip6_local_out,
264 .neigh_lookup = ip6_dst_neigh_lookup,
265 .confirm_neigh = ip6_confirm_neigh,
266 };
267
268 static struct dst_ops ip6_dst_blackhole_ops = {
269 .family = AF_INET6,
270 .default_advmss = ip6_default_advmss,
271 .neigh_lookup = ip6_dst_neigh_lookup,
272 .check = ip6_dst_check,
273 .destroy = ip6_dst_destroy,
274 .cow_metrics = dst_cow_metrics_generic,
275 .update_pmtu = dst_blackhole_update_pmtu,
276 .redirect = dst_blackhole_redirect,
277 .mtu = dst_blackhole_mtu,
278 };
279
280 static const u32 ip6_template_metrics[RTAX_MAX] = {
281 [RTAX_HOPLIMIT - 1] = 0,
282 };
283
284 static const struct fib6_info fib6_null_entry_template = {
285 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
286 .fib6_protocol = RTPROT_KERNEL,
287 .fib6_metric = ~(u32)0,
288 .fib6_ref = REFCOUNT_INIT(1),
289 .fib6_type = RTN_UNREACHABLE,
290 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
291 };
292
293 static const struct rt6_info ip6_null_entry_template = {
294 .dst = {
295 .__rcuref = RCUREF_INIT(1),
296 .__use = 1,
297 .obsolete = DST_OBSOLETE_FORCE_CHK,
298 .error = -ENETUNREACH,
299 .input = ip6_pkt_discard,
300 .output = ip6_pkt_discard_out,
301 },
302 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
303 };
304
305 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
306
307 static const struct rt6_info ip6_prohibit_entry_template = {
308 .dst = {
309 .__rcuref = RCUREF_INIT(1),
310 .__use = 1,
311 .obsolete = DST_OBSOLETE_FORCE_CHK,
312 .error = -EACCES,
313 .input = ip6_pkt_prohibit,
314 .output = ip6_pkt_prohibit_out,
315 },
316 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
317 };
318
319 static const struct rt6_info ip6_blk_hole_entry_template = {
320 .dst = {
321 .__rcuref = RCUREF_INIT(1),
322 .__use = 1,
323 .obsolete = DST_OBSOLETE_FORCE_CHK,
324 .error = -EINVAL,
325 .input = dst_discard,
326 .output = dst_discard_out,
327 },
328 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
329 };
330
331 #endif
332
rt6_info_init(struct rt6_info * rt)333 static void rt6_info_init(struct rt6_info *rt)
334 {
335 memset_after(rt, 0, dst);
336 }
337
338 /* allocate dst with ip6_dst_ops */
ip6_dst_alloc(struct net * net,struct net_device * dev,int flags)339 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
340 int flags)
341 {
342 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
343 DST_OBSOLETE_FORCE_CHK, flags);
344
345 if (rt) {
346 rt6_info_init(rt);
347 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
348 }
349
350 return rt;
351 }
352 EXPORT_SYMBOL(ip6_dst_alloc);
353
ip6_dst_destroy(struct dst_entry * dst)354 static void ip6_dst_destroy(struct dst_entry *dst)
355 {
356 struct rt6_info *rt = dst_rt6_info(dst);
357 struct fib6_info *from;
358 struct inet6_dev *idev;
359
360 ip_dst_metrics_put(dst);
361 rt6_uncached_list_del(rt);
362
363 idev = rt->rt6i_idev;
364 if (idev) {
365 rt->rt6i_idev = NULL;
366 in6_dev_put(idev);
367 }
368
369 from = unrcu_pointer(xchg(&rt->from, NULL));
370 fib6_info_release(from);
371 }
372
ip6_dst_ifdown(struct dst_entry * dst,struct net_device * dev)373 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
374 {
375 struct rt6_info *rt = dst_rt6_info(dst);
376 struct inet6_dev *idev = rt->rt6i_idev;
377 struct fib6_info *from;
378
379 if (idev && idev->dev != blackhole_netdev) {
380 struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
381
382 if (blackhole_idev) {
383 rt->rt6i_idev = blackhole_idev;
384 in6_dev_put(idev);
385 }
386 }
387 from = unrcu_pointer(xchg(&rt->from, NULL));
388 fib6_info_release(from);
389 }
390
__rt6_check_expired(const struct rt6_info * rt)391 static bool __rt6_check_expired(const struct rt6_info *rt)
392 {
393 if (rt->rt6i_flags & RTF_EXPIRES)
394 return time_after(jiffies, READ_ONCE(rt->dst.expires));
395 return false;
396 }
397
rt6_check_expired(const struct rt6_info * rt)398 static bool rt6_check_expired(const struct rt6_info *rt)
399 {
400 struct fib6_info *from;
401
402 from = rcu_dereference(rt->from);
403
404 if (rt->rt6i_flags & RTF_EXPIRES) {
405 if (time_after(jiffies, READ_ONCE(rt->dst.expires)))
406 return true;
407 } else if (from) {
408 return READ_ONCE(rt->dst.obsolete) != DST_OBSOLETE_FORCE_CHK ||
409 fib6_check_expired(from);
410 }
411 return false;
412 }
413
414 static struct fib6_info *
rt6_multipath_first_sibling_rcu(const struct fib6_info * rt)415 rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
416 {
417 struct fib6_info *iter;
418 struct fib6_node *fn;
419
420 fn = rcu_dereference(rt->fib6_node);
421 if (!fn)
422 goto out;
423 iter = rcu_dereference(fn->leaf);
424 if (!iter)
425 goto out;
426
427 while (iter) {
428 if (iter->fib6_metric == rt->fib6_metric &&
429 rt6_qualify_for_ecmp(iter))
430 return iter;
431 iter = rcu_dereference(iter->fib6_next);
432 }
433
434 out:
435 return NULL;
436 }
437
fib6_select_path(const struct net * net,struct fib6_result * res,struct flowi6 * fl6,int oif,bool have_oif_match,const struct sk_buff * skb,int strict)438 void fib6_select_path(const struct net *net, struct fib6_result *res,
439 struct flowi6 *fl6, int oif, bool have_oif_match,
440 const struct sk_buff *skb, int strict)
441 {
442 struct fib6_info *first, *match = res->f6i;
443 struct fib6_info *sibling;
444 int hash;
445
446 if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
447 goto out;
448
449 if (match->nh && have_oif_match && res->nh)
450 return;
451
452 if (skb)
453 IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
454
455 /* We might have already computed the hash for ICMPv6 errors. In such
456 * case it will always be non-zero. Otherwise now is the time to do it.
457 */
458 if (!fl6->mp_hash &&
459 (!match->nh || nexthop_is_multipath(match->nh)))
460 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
461
462 if (unlikely(match->nh)) {
463 nexthop_path_fib6_result(res, fl6->mp_hash);
464 return;
465 }
466
467 first = rt6_multipath_first_sibling_rcu(match);
468 if (!first)
469 goto out;
470
471 hash = fl6->mp_hash;
472 if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
473 if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
474 strict) >= 0)
475 match = first;
476 goto out;
477 }
478
479 list_for_each_entry_rcu(sibling, &first->fib6_siblings,
480 fib6_siblings) {
481 const struct fib6_nh *nh = sibling->fib6_nh;
482 int nh_upper_bound;
483
484 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
485 if (hash > nh_upper_bound)
486 continue;
487 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
488 break;
489 match = sibling;
490 break;
491 }
492
493 out:
494 res->f6i = match;
495 res->nh = match->fib6_nh;
496 }
497
498 /*
499 * Route lookup. rcu_read_lock() should be held.
500 */
501
__rt6_device_match(struct net * net,const struct fib6_nh * nh,const struct in6_addr * saddr,int oif,int flags)502 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
503 const struct in6_addr *saddr, int oif, int flags)
504 {
505 const struct net_device *dev;
506
507 if (nh->fib_nh_flags & RTNH_F_DEAD)
508 return false;
509
510 dev = nh->fib_nh_dev;
511 if (oif) {
512 if (dev->ifindex == oif)
513 return true;
514 } else {
515 if (ipv6_chk_addr(net, saddr, dev,
516 flags & RT6_LOOKUP_F_IFACE))
517 return true;
518 }
519
520 return false;
521 }
522
523 struct fib6_nh_dm_arg {
524 struct net *net;
525 const struct in6_addr *saddr;
526 int oif;
527 int flags;
528 struct fib6_nh *nh;
529 };
530
__rt6_nh_dev_match(struct fib6_nh * nh,void * _arg)531 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
532 {
533 struct fib6_nh_dm_arg *arg = _arg;
534
535 arg->nh = nh;
536 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
537 arg->flags);
538 }
539
540 /* returns fib6_nh from nexthop or NULL */
rt6_nh_dev_match(struct net * net,struct nexthop * nh,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)541 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
542 struct fib6_result *res,
543 const struct in6_addr *saddr,
544 int oif, int flags)
545 {
546 struct fib6_nh_dm_arg arg = {
547 .net = net,
548 .saddr = saddr,
549 .oif = oif,
550 .flags = flags,
551 };
552
553 if (nexthop_is_blackhole(nh))
554 return NULL;
555
556 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
557 return arg.nh;
558
559 return NULL;
560 }
561
rt6_device_match(struct net * net,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)562 static void rt6_device_match(struct net *net, struct fib6_result *res,
563 const struct in6_addr *saddr, int oif, int flags)
564 {
565 struct fib6_info *f6i = res->f6i;
566 struct fib6_info *spf6i;
567 struct fib6_nh *nh;
568
569 if (!oif && ipv6_addr_any(saddr)) {
570 if (unlikely(f6i->nh)) {
571 nh = nexthop_fib6_nh(f6i->nh);
572 if (nexthop_is_blackhole(f6i->nh))
573 goto out_blackhole;
574 } else {
575 nh = f6i->fib6_nh;
576 }
577 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
578 goto out;
579 }
580
581 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
582 bool matched = false;
583
584 if (unlikely(spf6i->nh)) {
585 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
586 oif, flags);
587 if (nh)
588 matched = true;
589 } else {
590 nh = spf6i->fib6_nh;
591 if (__rt6_device_match(net, nh, saddr, oif, flags))
592 matched = true;
593 }
594 if (matched) {
595 res->f6i = spf6i;
596 goto out;
597 }
598 }
599
600 if (oif && flags & RT6_LOOKUP_F_IFACE) {
601 res->f6i = net->ipv6.fib6_null_entry;
602 nh = res->f6i->fib6_nh;
603 goto out;
604 }
605
606 if (unlikely(f6i->nh)) {
607 nh = nexthop_fib6_nh(f6i->nh);
608 if (nexthop_is_blackhole(f6i->nh))
609 goto out_blackhole;
610 } else {
611 nh = f6i->fib6_nh;
612 }
613
614 if (nh->fib_nh_flags & RTNH_F_DEAD) {
615 res->f6i = net->ipv6.fib6_null_entry;
616 nh = res->f6i->fib6_nh;
617 }
618 out:
619 res->nh = nh;
620 res->fib6_type = res->f6i->fib6_type;
621 res->fib6_flags = res->f6i->fib6_flags;
622 return;
623
624 out_blackhole:
625 res->fib6_flags |= RTF_REJECT;
626 res->fib6_type = RTN_BLACKHOLE;
627 res->nh = nh;
628 }
629
630 #ifdef CONFIG_IPV6_ROUTER_PREF
631 struct __rt6_probe_work {
632 struct work_struct work;
633 struct in6_addr target;
634 struct net_device *dev;
635 netdevice_tracker dev_tracker;
636 };
637
rt6_probe_deferred(struct work_struct * w)638 static void rt6_probe_deferred(struct work_struct *w)
639 {
640 struct in6_addr mcaddr;
641 struct __rt6_probe_work *work =
642 container_of(w, struct __rt6_probe_work, work);
643
644 addrconf_addr_solict_mult(&work->target, &mcaddr);
645 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
646 netdev_put(work->dev, &work->dev_tracker);
647 kfree(work);
648 }
649
rt6_probe(struct fib6_nh * fib6_nh)650 static void rt6_probe(struct fib6_nh *fib6_nh)
651 {
652 struct __rt6_probe_work *work = NULL;
653 const struct in6_addr *nh_gw;
654 unsigned long last_probe;
655 struct neighbour *neigh;
656 struct net_device *dev;
657 struct inet6_dev *idev;
658
659 /*
660 * Okay, this does not seem to be appropriate
661 * for now, however, we need to check if it
662 * is really so; aka Router Reachability Probing.
663 *
664 * Router Reachability Probe MUST be rate-limited
665 * to no more than one per minute.
666 */
667 if (!fib6_nh->fib_nh_gw_family)
668 return;
669
670 nh_gw = &fib6_nh->fib_nh_gw6;
671 dev = fib6_nh->fib_nh_dev;
672 rcu_read_lock();
673 last_probe = READ_ONCE(fib6_nh->last_probe);
674 idev = __in6_dev_get(dev);
675 if (!idev)
676 goto out;
677 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
678 if (neigh) {
679 if (READ_ONCE(neigh->nud_state) & NUD_VALID)
680 goto out;
681
682 write_lock_bh(&neigh->lock);
683 if (!(neigh->nud_state & NUD_VALID) &&
684 time_after(jiffies,
685 neigh->updated +
686 READ_ONCE(idev->cnf.rtr_probe_interval))) {
687 work = kmalloc(sizeof(*work), GFP_ATOMIC);
688 if (work)
689 __neigh_set_probe_once(neigh);
690 }
691 write_unlock_bh(&neigh->lock);
692 } else if (time_after(jiffies, last_probe +
693 READ_ONCE(idev->cnf.rtr_probe_interval))) {
694 work = kmalloc(sizeof(*work), GFP_ATOMIC);
695 }
696
697 if (!work || cmpxchg(&fib6_nh->last_probe,
698 last_probe, jiffies) != last_probe) {
699 kfree(work);
700 } else {
701 INIT_WORK(&work->work, rt6_probe_deferred);
702 work->target = *nh_gw;
703 netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
704 work->dev = dev;
705 schedule_work(&work->work);
706 }
707
708 out:
709 rcu_read_unlock();
710 }
711 #else
rt6_probe(struct fib6_nh * fib6_nh)712 static inline void rt6_probe(struct fib6_nh *fib6_nh)
713 {
714 }
715 #endif
716
717 /*
718 * Default Router Selection (RFC 2461 6.3.6)
719 */
rt6_check_neigh(const struct fib6_nh * fib6_nh)720 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
721 {
722 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
723 struct neighbour *neigh;
724
725 rcu_read_lock();
726 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
727 &fib6_nh->fib_nh_gw6);
728 if (neigh) {
729 u8 nud_state = READ_ONCE(neigh->nud_state);
730
731 if (nud_state & NUD_VALID)
732 ret = RT6_NUD_SUCCEED;
733 #ifdef CONFIG_IPV6_ROUTER_PREF
734 else if (!(nud_state & NUD_FAILED))
735 ret = RT6_NUD_SUCCEED;
736 else
737 ret = RT6_NUD_FAIL_PROBE;
738 #endif
739 } else {
740 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
741 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
742 }
743 rcu_read_unlock();
744
745 return ret;
746 }
747
rt6_score_route(const struct fib6_nh * nh,u32 fib6_flags,int oif,int strict)748 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
749 int strict)
750 {
751 int m = 0;
752
753 if (!oif || nh->fib_nh_dev->ifindex == oif)
754 m = 2;
755
756 if (!m && (strict & RT6_LOOKUP_F_IFACE))
757 return RT6_NUD_FAIL_HARD;
758 #ifdef CONFIG_IPV6_ROUTER_PREF
759 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
760 #endif
761 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
762 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
763 int n = rt6_check_neigh(nh);
764 if (n < 0)
765 return n;
766 }
767 return m;
768 }
769
find_match(struct fib6_nh * nh,u32 fib6_flags,int oif,int strict,int * mpri,bool * do_rr)770 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
771 int oif, int strict, int *mpri, bool *do_rr)
772 {
773 bool match_do_rr = false;
774 bool rc = false;
775 int m;
776
777 if (nh->fib_nh_flags & RTNH_F_DEAD)
778 goto out;
779
780 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
781 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
782 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
783 goto out;
784
785 m = rt6_score_route(nh, fib6_flags, oif, strict);
786 if (m == RT6_NUD_FAIL_DO_RR) {
787 match_do_rr = true;
788 m = 0; /* lowest valid score */
789 } else if (m == RT6_NUD_FAIL_HARD) {
790 goto out;
791 }
792
793 if (strict & RT6_LOOKUP_F_REACHABLE)
794 rt6_probe(nh);
795
796 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
797 if (m > *mpri) {
798 *do_rr = match_do_rr;
799 *mpri = m;
800 rc = true;
801 }
802 out:
803 return rc;
804 }
805
806 struct fib6_nh_frl_arg {
807 u32 flags;
808 int oif;
809 int strict;
810 int *mpri;
811 bool *do_rr;
812 struct fib6_nh *nh;
813 };
814
rt6_nh_find_match(struct fib6_nh * nh,void * _arg)815 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
816 {
817 struct fib6_nh_frl_arg *arg = _arg;
818
819 arg->nh = nh;
820 return find_match(nh, arg->flags, arg->oif, arg->strict,
821 arg->mpri, arg->do_rr);
822 }
823
__find_rr_leaf(struct fib6_info * f6i_start,struct fib6_info * nomatch,u32 metric,struct fib6_result * res,struct fib6_info ** cont,int oif,int strict,bool * do_rr,int * mpri)824 static void __find_rr_leaf(struct fib6_info *f6i_start,
825 struct fib6_info *nomatch, u32 metric,
826 struct fib6_result *res, struct fib6_info **cont,
827 int oif, int strict, bool *do_rr, int *mpri)
828 {
829 struct fib6_info *f6i;
830
831 for (f6i = f6i_start;
832 f6i && f6i != nomatch;
833 f6i = rcu_dereference(f6i->fib6_next)) {
834 bool matched = false;
835 struct fib6_nh *nh;
836
837 if (cont && f6i->fib6_metric != metric) {
838 *cont = f6i;
839 return;
840 }
841
842 if (fib6_check_expired(f6i))
843 continue;
844
845 if (unlikely(f6i->nh)) {
846 struct fib6_nh_frl_arg arg = {
847 .flags = f6i->fib6_flags,
848 .oif = oif,
849 .strict = strict,
850 .mpri = mpri,
851 .do_rr = do_rr
852 };
853
854 if (nexthop_is_blackhole(f6i->nh)) {
855 res->fib6_flags = RTF_REJECT;
856 res->fib6_type = RTN_BLACKHOLE;
857 res->f6i = f6i;
858 res->nh = nexthop_fib6_nh(f6i->nh);
859 return;
860 }
861 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
862 &arg)) {
863 matched = true;
864 nh = arg.nh;
865 }
866 } else {
867 nh = f6i->fib6_nh;
868 if (find_match(nh, f6i->fib6_flags, oif, strict,
869 mpri, do_rr))
870 matched = true;
871 }
872 if (matched) {
873 res->f6i = f6i;
874 res->nh = nh;
875 res->fib6_flags = f6i->fib6_flags;
876 res->fib6_type = f6i->fib6_type;
877 }
878 }
879 }
880
find_rr_leaf(struct fib6_node * fn,struct fib6_info * leaf,struct fib6_info * rr_head,int oif,int strict,bool * do_rr,struct fib6_result * res)881 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
882 struct fib6_info *rr_head, int oif, int strict,
883 bool *do_rr, struct fib6_result *res)
884 {
885 u32 metric = rr_head->fib6_metric;
886 struct fib6_info *cont = NULL;
887 int mpri = -1;
888
889 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
890 oif, strict, do_rr, &mpri);
891
892 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
893 oif, strict, do_rr, &mpri);
894
895 if (res->f6i || !cont)
896 return;
897
898 __find_rr_leaf(cont, NULL, metric, res, NULL,
899 oif, strict, do_rr, &mpri);
900 }
901
rt6_select(struct net * net,struct fib6_node * fn,int oif,struct fib6_result * res,int strict)902 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
903 struct fib6_result *res, int strict)
904 {
905 struct fib6_info *leaf = rcu_dereference(fn->leaf);
906 struct fib6_info *rt0;
907 bool do_rr = false;
908 int key_plen;
909
910 /* make sure this function or its helpers sets f6i */
911 res->f6i = NULL;
912
913 if (!leaf || leaf == net->ipv6.fib6_null_entry)
914 goto out;
915
916 rt0 = rcu_dereference(fn->rr_ptr);
917 if (!rt0)
918 rt0 = leaf;
919
920 /* Double check to make sure fn is not an intermediate node
921 * and fn->leaf does not points to its child's leaf
922 * (This might happen if all routes under fn are deleted from
923 * the tree and fib6_repair_tree() is called on the node.)
924 */
925 key_plen = rt0->fib6_dst.plen;
926 #ifdef CONFIG_IPV6_SUBTREES
927 if (rt0->fib6_src.plen)
928 key_plen = rt0->fib6_src.plen;
929 #endif
930 if (fn->fn_bit != key_plen)
931 goto out;
932
933 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
934 if (do_rr) {
935 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
936
937 /* no entries matched; do round-robin */
938 if (!next || next->fib6_metric != rt0->fib6_metric)
939 next = leaf;
940
941 if (next != rt0) {
942 spin_lock_bh(&leaf->fib6_table->tb6_lock);
943 /* make sure next is not being deleted from the tree */
944 if (next->fib6_node)
945 rcu_assign_pointer(fn->rr_ptr, next);
946 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
947 }
948 }
949
950 out:
951 if (!res->f6i) {
952 res->f6i = net->ipv6.fib6_null_entry;
953 res->nh = res->f6i->fib6_nh;
954 res->fib6_flags = res->f6i->fib6_flags;
955 res->fib6_type = res->f6i->fib6_type;
956 }
957 }
958
rt6_is_gw_or_nonexthop(const struct fib6_result * res)959 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
960 {
961 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
962 res->nh->fib_nh_gw_family;
963 }
964
965 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_route_rcv(struct net_device * dev,u8 * opt,int len,const struct in6_addr * gwaddr)966 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
967 const struct in6_addr *gwaddr)
968 {
969 struct net *net = dev_net(dev);
970 struct route_info *rinfo = (struct route_info *) opt;
971 struct in6_addr prefix_buf, *prefix;
972 struct fib6_table *table;
973 unsigned int pref;
974 unsigned long lifetime;
975 struct fib6_info *rt;
976
977 if (len < sizeof(struct route_info)) {
978 return -EINVAL;
979 }
980
981 /* Sanity check for prefix_len and length */
982 if (rinfo->length > 3) {
983 return -EINVAL;
984 } else if (rinfo->prefix_len > 128) {
985 return -EINVAL;
986 } else if (rinfo->prefix_len > 64) {
987 if (rinfo->length < 2) {
988 return -EINVAL;
989 }
990 } else if (rinfo->prefix_len > 0) {
991 if (rinfo->length < 1) {
992 return -EINVAL;
993 }
994 }
995
996 pref = rinfo->route_pref;
997 if (pref == ICMPV6_ROUTER_PREF_INVALID)
998 return -EINVAL;
999
1000 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
1001
1002 if (rinfo->length == 3)
1003 prefix = (struct in6_addr *)rinfo->prefix;
1004 else {
1005 /* this function is safe */
1006 ipv6_addr_prefix(&prefix_buf,
1007 (struct in6_addr *)rinfo->prefix,
1008 rinfo->prefix_len);
1009 prefix = &prefix_buf;
1010 }
1011
1012 if (rinfo->prefix_len == 0)
1013 rt = rt6_get_dflt_router(net, gwaddr, dev);
1014 else
1015 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
1016 gwaddr, dev);
1017
1018 if (rt && !lifetime) {
1019 ip6_del_rt(net, rt, false);
1020 rt = NULL;
1021 }
1022
1023 if (!rt && lifetime)
1024 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
1025 dev, pref);
1026 else if (rt)
1027 rt->fib6_flags = RTF_ROUTEINFO |
1028 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
1029
1030 if (rt) {
1031 table = rt->fib6_table;
1032 spin_lock_bh(&table->tb6_lock);
1033
1034 if (!addrconf_finite_timeout(lifetime)) {
1035 fib6_clean_expires(rt);
1036 fib6_remove_gc_list(rt);
1037 } else {
1038 fib6_set_expires(rt, jiffies + HZ * lifetime);
1039 fib6_add_gc_list(rt);
1040 }
1041
1042 spin_unlock_bh(&table->tb6_lock);
1043
1044 fib6_info_release(rt);
1045 }
1046 return 0;
1047 }
1048 #endif
1049
1050 /*
1051 * Misc support functions
1052 */
1053
1054 /* called with rcu_lock held */
ip6_rt_get_dev_rcu(const struct fib6_result * res)1055 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1056 {
1057 struct net_device *dev = res->nh->fib_nh_dev;
1058
1059 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1060 /* for copies of local routes, dst->dev needs to be the
1061 * device if it is a master device, the master device if
1062 * device is enslaved, and the loopback as the default
1063 */
1064 if (netif_is_l3_slave(dev) &&
1065 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1066 dev = l3mdev_master_dev_rcu(dev);
1067 else if (!netif_is_l3_master(dev))
1068 dev = dev_net(dev)->loopback_dev;
1069 /* last case is netif_is_l3_master(dev) is true in which
1070 * case we want dev returned to be dev
1071 */
1072 }
1073
1074 return dev;
1075 }
1076
1077 static const int fib6_prop[RTN_MAX + 1] = {
1078 [RTN_UNSPEC] = 0,
1079 [RTN_UNICAST] = 0,
1080 [RTN_LOCAL] = 0,
1081 [RTN_BROADCAST] = 0,
1082 [RTN_ANYCAST] = 0,
1083 [RTN_MULTICAST] = 0,
1084 [RTN_BLACKHOLE] = -EINVAL,
1085 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1086 [RTN_PROHIBIT] = -EACCES,
1087 [RTN_THROW] = -EAGAIN,
1088 [RTN_NAT] = -EINVAL,
1089 [RTN_XRESOLVE] = -EINVAL,
1090 };
1091
ip6_rt_type_to_error(u8 fib6_type)1092 static int ip6_rt_type_to_error(u8 fib6_type)
1093 {
1094 return fib6_prop[fib6_type];
1095 }
1096
fib6_info_dst_flags(struct fib6_info * rt)1097 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1098 {
1099 unsigned short flags = 0;
1100
1101 if (rt->dst_nocount)
1102 flags |= DST_NOCOUNT;
1103 if (rt->dst_nopolicy)
1104 flags |= DST_NOPOLICY;
1105
1106 return flags;
1107 }
1108
ip6_rt_init_dst_reject(struct rt6_info * rt,u8 fib6_type)1109 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1110 {
1111 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1112
1113 switch (fib6_type) {
1114 case RTN_BLACKHOLE:
1115 rt->dst.output = dst_discard_out;
1116 rt->dst.input = dst_discard;
1117 break;
1118 case RTN_PROHIBIT:
1119 rt->dst.output = ip6_pkt_prohibit_out;
1120 rt->dst.input = ip6_pkt_prohibit;
1121 break;
1122 case RTN_THROW:
1123 case RTN_UNREACHABLE:
1124 default:
1125 rt->dst.output = ip6_pkt_discard_out;
1126 rt->dst.input = ip6_pkt_discard;
1127 break;
1128 }
1129 }
1130
ip6_rt_init_dst(struct rt6_info * rt,const struct fib6_result * res)1131 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1132 {
1133 struct fib6_info *f6i = res->f6i;
1134
1135 if (res->fib6_flags & RTF_REJECT) {
1136 ip6_rt_init_dst_reject(rt, res->fib6_type);
1137 return;
1138 }
1139
1140 rt->dst.error = 0;
1141 rt->dst.output = ip6_output;
1142
1143 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1144 rt->dst.input = ip6_input;
1145 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1146 rt->dst.input = ip6_mc_input;
1147 rt->dst.output = ip6_mr_output;
1148 } else {
1149 rt->dst.input = ip6_forward;
1150 }
1151
1152 if (res->nh->fib_nh_lws) {
1153 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1154 lwtunnel_set_redirect(&rt->dst);
1155 }
1156
1157 rt->dst.lastuse = jiffies;
1158 }
1159
1160 /* Caller must already hold reference to @from */
rt6_set_from(struct rt6_info * rt,struct fib6_info * from)1161 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1162 {
1163 rt->rt6i_flags &= ~RTF_EXPIRES;
1164 rcu_assign_pointer(rt->from, from);
1165 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1166 }
1167
1168 /* Caller must already hold reference to f6i in result */
ip6_rt_copy_init(struct rt6_info * rt,const struct fib6_result * res)1169 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1170 {
1171 const struct fib6_nh *nh = res->nh;
1172 const struct net_device *dev = nh->fib_nh_dev;
1173 struct fib6_info *f6i = res->f6i;
1174
1175 ip6_rt_init_dst(rt, res);
1176
1177 rt->rt6i_dst = f6i->fib6_dst;
1178 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1179 rt->rt6i_flags = res->fib6_flags;
1180 if (nh->fib_nh_gw_family) {
1181 rt->rt6i_gateway = nh->fib_nh_gw6;
1182 rt->rt6i_flags |= RTF_GATEWAY;
1183 }
1184 rt6_set_from(rt, f6i);
1185 #ifdef CONFIG_IPV6_SUBTREES
1186 rt->rt6i_src = f6i->fib6_src;
1187 #endif
1188 }
1189
fib6_backtrack(struct fib6_node * fn,struct in6_addr * saddr)1190 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1191 struct in6_addr *saddr)
1192 {
1193 struct fib6_node *pn, *sn;
1194 while (1) {
1195 if (fn->fn_flags & RTN_TL_ROOT)
1196 return NULL;
1197 pn = rcu_dereference(fn->parent);
1198 sn = FIB6_SUBTREE(pn);
1199 if (sn && sn != fn)
1200 fn = fib6_node_lookup(sn, NULL, saddr);
1201 else
1202 fn = pn;
1203 if (fn->fn_flags & RTN_RTINFO)
1204 return fn;
1205 }
1206 }
1207
ip6_hold_safe(struct net * net,struct rt6_info ** prt)1208 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1209 {
1210 struct rt6_info *rt = *prt;
1211
1212 if (dst_hold_safe(&rt->dst))
1213 return true;
1214 if (net) {
1215 rt = net->ipv6.ip6_null_entry;
1216 dst_hold(&rt->dst);
1217 } else {
1218 rt = NULL;
1219 }
1220 *prt = rt;
1221 return false;
1222 }
1223
1224 /* called with rcu_lock held */
ip6_create_rt_rcu(const struct fib6_result * res)1225 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1226 {
1227 struct net_device *dev = res->nh->fib_nh_dev;
1228 struct fib6_info *f6i = res->f6i;
1229 unsigned short flags;
1230 struct rt6_info *nrt;
1231
1232 if (!fib6_info_hold_safe(f6i))
1233 goto fallback;
1234
1235 flags = fib6_info_dst_flags(f6i);
1236 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1237 if (!nrt) {
1238 fib6_info_release(f6i);
1239 goto fallback;
1240 }
1241
1242 ip6_rt_copy_init(nrt, res);
1243 return nrt;
1244
1245 fallback:
1246 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1247 dst_hold(&nrt->dst);
1248 return nrt;
1249 }
1250
ip6_pol_route_lookup(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1251 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1252 struct fib6_table *table,
1253 struct flowi6 *fl6,
1254 const struct sk_buff *skb,
1255 int flags)
1256 {
1257 struct fib6_result res = {};
1258 struct fib6_node *fn;
1259 struct rt6_info *rt;
1260
1261 rcu_read_lock();
1262 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1263 restart:
1264 res.f6i = rcu_dereference(fn->leaf);
1265 if (!res.f6i)
1266 res.f6i = net->ipv6.fib6_null_entry;
1267 else
1268 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1269 flags);
1270
1271 if (res.f6i == net->ipv6.fib6_null_entry) {
1272 fn = fib6_backtrack(fn, &fl6->saddr);
1273 if (fn)
1274 goto restart;
1275
1276 rt = net->ipv6.ip6_null_entry;
1277 dst_hold(&rt->dst);
1278 goto out;
1279 } else if (res.fib6_flags & RTF_REJECT) {
1280 goto do_create;
1281 }
1282
1283 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1284 fl6->flowi6_oif != 0, skb, flags);
1285
1286 /* Search through exception table */
1287 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1288 if (rt) {
1289 if (ip6_hold_safe(net, &rt))
1290 dst_use_noref(&rt->dst, jiffies);
1291 } else {
1292 do_create:
1293 rt = ip6_create_rt_rcu(&res);
1294 }
1295
1296 out:
1297 trace_fib6_table_lookup(net, &res, table, fl6);
1298
1299 rcu_read_unlock();
1300
1301 return rt;
1302 }
1303
ip6_route_lookup(struct net * net,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1304 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1305 const struct sk_buff *skb, int flags)
1306 {
1307 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1308 }
1309 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1310
rt6_lookup(struct net * net,const struct in6_addr * daddr,const struct in6_addr * saddr,int oif,const struct sk_buff * skb,int strict)1311 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1312 const struct in6_addr *saddr, int oif,
1313 const struct sk_buff *skb, int strict)
1314 {
1315 struct flowi6 fl6 = {
1316 .flowi6_oif = oif,
1317 .daddr = *daddr,
1318 };
1319 struct dst_entry *dst;
1320 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1321
1322 if (saddr) {
1323 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1324 flags |= RT6_LOOKUP_F_HAS_SADDR;
1325 }
1326
1327 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1328 if (dst->error == 0)
1329 return dst_rt6_info(dst);
1330
1331 dst_release(dst);
1332
1333 return NULL;
1334 }
1335 EXPORT_SYMBOL(rt6_lookup);
1336
1337 /* ip6_ins_rt is called with FREE table->tb6_lock.
1338 * It takes new route entry, the addition fails by any reason the
1339 * route is released.
1340 * Caller must hold dst before calling it.
1341 */
1342
__ip6_ins_rt(struct fib6_info * rt,struct nl_info * info,struct netlink_ext_ack * extack)1343 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1344 struct netlink_ext_ack *extack)
1345 {
1346 int err;
1347 struct fib6_table *table;
1348
1349 table = rt->fib6_table;
1350 spin_lock_bh(&table->tb6_lock);
1351 err = fib6_add(&table->tb6_root, rt, info, extack);
1352 spin_unlock_bh(&table->tb6_lock);
1353
1354 return err;
1355 }
1356
ip6_ins_rt(struct net * net,struct fib6_info * rt)1357 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1358 {
1359 struct nl_info info = { .nl_net = net, };
1360
1361 return __ip6_ins_rt(rt, &info, NULL);
1362 }
1363
ip6_rt_cache_alloc(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1364 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1365 const struct in6_addr *daddr,
1366 const struct in6_addr *saddr)
1367 {
1368 struct fib6_info *f6i = res->f6i;
1369 struct net_device *dev;
1370 struct rt6_info *rt;
1371
1372 /*
1373 * Clone the route.
1374 */
1375
1376 if (!fib6_info_hold_safe(f6i))
1377 return NULL;
1378
1379 dev = ip6_rt_get_dev_rcu(res);
1380 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1381 if (!rt) {
1382 fib6_info_release(f6i);
1383 return NULL;
1384 }
1385
1386 ip6_rt_copy_init(rt, res);
1387 rt->rt6i_flags |= RTF_CACHE;
1388 rt->rt6i_dst.addr = *daddr;
1389 rt->rt6i_dst.plen = 128;
1390
1391 if (!rt6_is_gw_or_nonexthop(res)) {
1392 if (f6i->fib6_dst.plen != 128 &&
1393 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1394 rt->rt6i_flags |= RTF_ANYCAST;
1395 #ifdef CONFIG_IPV6_SUBTREES
1396 if (rt->rt6i_src.plen && saddr) {
1397 rt->rt6i_src.addr = *saddr;
1398 rt->rt6i_src.plen = 128;
1399 }
1400 #endif
1401 }
1402
1403 return rt;
1404 }
1405
ip6_rt_pcpu_alloc(const struct fib6_result * res)1406 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1407 {
1408 struct fib6_info *f6i = res->f6i;
1409 unsigned short flags = fib6_info_dst_flags(f6i);
1410 struct net_device *dev;
1411 struct rt6_info *pcpu_rt;
1412
1413 if (!fib6_info_hold_safe(f6i))
1414 return NULL;
1415
1416 rcu_read_lock();
1417 dev = ip6_rt_get_dev_rcu(res);
1418 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1419 rcu_read_unlock();
1420 if (!pcpu_rt) {
1421 fib6_info_release(f6i);
1422 return NULL;
1423 }
1424 ip6_rt_copy_init(pcpu_rt, res);
1425 pcpu_rt->rt6i_flags |= RTF_PCPU;
1426
1427 if (f6i->nh)
1428 pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1429
1430 return pcpu_rt;
1431 }
1432
rt6_is_valid(const struct rt6_info * rt6)1433 static bool rt6_is_valid(const struct rt6_info *rt6)
1434 {
1435 return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1436 }
1437
1438 /* It should be called with rcu_read_lock() acquired */
rt6_get_pcpu_route(const struct fib6_result * res)1439 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1440 {
1441 struct rt6_info *pcpu_rt;
1442
1443 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1444
1445 if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1446 struct rt6_info *prev, **p;
1447
1448 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1449 /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
1450 prev = xchg(p, NULL);
1451 if (prev) {
1452 dst_dev_put(&prev->dst);
1453 dst_release(&prev->dst);
1454 }
1455
1456 pcpu_rt = NULL;
1457 }
1458
1459 return pcpu_rt;
1460 }
1461
rt6_make_pcpu_route(struct net * net,const struct fib6_result * res)1462 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1463 const struct fib6_result *res)
1464 {
1465 struct rt6_info *pcpu_rt, *prev, **p;
1466
1467 pcpu_rt = ip6_rt_pcpu_alloc(res);
1468 if (!pcpu_rt)
1469 return NULL;
1470
1471 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1472 prev = cmpxchg(p, NULL, pcpu_rt);
1473 if (unlikely(prev)) {
1474 /*
1475 * Another task on this CPU already installed a pcpu_rt.
1476 * This can happen on PREEMPT_RT where preemption is possible.
1477 * Free our allocation and return the existing one.
1478 */
1479 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RT));
1480
1481 dst_dev_put(&pcpu_rt->dst);
1482 dst_release(&pcpu_rt->dst);
1483 return prev;
1484 }
1485
1486 if (res->f6i->fib6_destroying) {
1487 struct fib6_info *from;
1488
1489 from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
1490 fib6_info_release(from);
1491 }
1492
1493 return pcpu_rt;
1494 }
1495
1496 /* exception hash table implementation
1497 */
1498 static DEFINE_SPINLOCK(rt6_exception_lock);
1499
1500 /* Remove rt6_ex from hash table and free the memory
1501 * Caller must hold rt6_exception_lock
1502 */
rt6_remove_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex)1503 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1504 struct rt6_exception *rt6_ex)
1505 {
1506 struct net *net;
1507
1508 if (!bucket || !rt6_ex)
1509 return;
1510
1511 net = dev_net(rt6_ex->rt6i->dst.dev);
1512 net->ipv6.rt6_stats->fib_rt_cache--;
1513
1514 /* purge completely the exception to allow releasing the held resources:
1515 * some [sk] cache may keep the dst around for unlimited time
1516 */
1517 dst_dev_put(&rt6_ex->rt6i->dst);
1518
1519 hlist_del_rcu(&rt6_ex->hlist);
1520 dst_release(&rt6_ex->rt6i->dst);
1521 kfree_rcu(rt6_ex, rcu);
1522 WARN_ON_ONCE(!bucket->depth);
1523 bucket->depth--;
1524 }
1525
1526 /* Remove oldest rt6_ex in bucket and free the memory
1527 * Caller must hold rt6_exception_lock
1528 */
rt6_exception_remove_oldest(struct rt6_exception_bucket * bucket)1529 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1530 {
1531 struct rt6_exception *rt6_ex, *oldest = NULL;
1532
1533 if (!bucket)
1534 return;
1535
1536 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1537 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1538 oldest = rt6_ex;
1539 }
1540 rt6_remove_exception(bucket, oldest);
1541 }
1542
rt6_exception_hash(const struct in6_addr * dst,const struct in6_addr * src)1543 static u32 rt6_exception_hash(const struct in6_addr *dst,
1544 const struct in6_addr *src)
1545 {
1546 static siphash_aligned_key_t rt6_exception_key;
1547 struct {
1548 struct in6_addr dst;
1549 struct in6_addr src;
1550 } __aligned(SIPHASH_ALIGNMENT) combined = {
1551 .dst = *dst,
1552 };
1553 u64 val;
1554
1555 net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1556
1557 #ifdef CONFIG_IPV6_SUBTREES
1558 if (src)
1559 combined.src = *src;
1560 #endif
1561 val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1562
1563 return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1564 }
1565
1566 /* Helper function to find the cached rt in the hash table
1567 * and update bucket pointer to point to the bucket for this
1568 * (daddr, saddr) pair
1569 * Caller must hold rt6_exception_lock
1570 */
1571 static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1572 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1573 const struct in6_addr *daddr,
1574 const struct in6_addr *saddr)
1575 {
1576 struct rt6_exception *rt6_ex;
1577 u32 hval;
1578
1579 if (!(*bucket) || !daddr)
1580 return NULL;
1581
1582 hval = rt6_exception_hash(daddr, saddr);
1583 *bucket += hval;
1584
1585 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1586 struct rt6_info *rt6 = rt6_ex->rt6i;
1587 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1588
1589 #ifdef CONFIG_IPV6_SUBTREES
1590 if (matched && saddr)
1591 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1592 #endif
1593 if (matched)
1594 return rt6_ex;
1595 }
1596 return NULL;
1597 }
1598
1599 /* Helper function to find the cached rt in the hash table
1600 * and update bucket pointer to point to the bucket for this
1601 * (daddr, saddr) pair
1602 * Caller must hold rcu_read_lock()
1603 */
1604 static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1605 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1606 const struct in6_addr *daddr,
1607 const struct in6_addr *saddr)
1608 {
1609 struct rt6_exception *rt6_ex;
1610 u32 hval;
1611
1612 WARN_ON_ONCE(!rcu_read_lock_held());
1613
1614 if (!(*bucket) || !daddr)
1615 return NULL;
1616
1617 hval = rt6_exception_hash(daddr, saddr);
1618 *bucket += hval;
1619
1620 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1621 struct rt6_info *rt6 = rt6_ex->rt6i;
1622 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1623
1624 #ifdef CONFIG_IPV6_SUBTREES
1625 if (matched && saddr)
1626 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1627 #endif
1628 if (matched)
1629 return rt6_ex;
1630 }
1631 return NULL;
1632 }
1633
fib6_mtu(const struct fib6_result * res)1634 static unsigned int fib6_mtu(const struct fib6_result *res)
1635 {
1636 const struct fib6_nh *nh = res->nh;
1637 unsigned int mtu;
1638
1639 if (res->f6i->fib6_pmtu) {
1640 mtu = res->f6i->fib6_pmtu;
1641 } else {
1642 struct net_device *dev = nh->fib_nh_dev;
1643 struct inet6_dev *idev;
1644
1645 rcu_read_lock();
1646 idev = __in6_dev_get(dev);
1647 mtu = READ_ONCE(idev->cnf.mtu6);
1648 rcu_read_unlock();
1649 }
1650
1651 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1652
1653 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1654 }
1655
1656 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1657
1658 /* used when the flushed bit is not relevant, only access to the bucket
1659 * (ie., all bucket users except rt6_insert_exception);
1660 *
1661 * called under rcu lock; sometimes called with rt6_exception_lock held
1662 */
1663 static
fib6_nh_get_excptn_bucket(const struct fib6_nh * nh,spinlock_t * lock)1664 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1665 spinlock_t *lock)
1666 {
1667 struct rt6_exception_bucket *bucket;
1668
1669 if (lock)
1670 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1671 lockdep_is_held(lock));
1672 else
1673 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1674
1675 /* remove bucket flushed bit if set */
1676 if (bucket) {
1677 unsigned long p = (unsigned long)bucket;
1678
1679 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1680 bucket = (struct rt6_exception_bucket *)p;
1681 }
1682
1683 return bucket;
1684 }
1685
fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket * bucket)1686 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1687 {
1688 unsigned long p = (unsigned long)bucket;
1689
1690 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1691 }
1692
1693 /* called with rt6_exception_lock held */
fib6_nh_excptn_bucket_set_flushed(struct fib6_nh * nh,spinlock_t * lock)1694 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1695 spinlock_t *lock)
1696 {
1697 struct rt6_exception_bucket *bucket;
1698 unsigned long p;
1699
1700 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1701 lockdep_is_held(lock));
1702
1703 p = (unsigned long)bucket;
1704 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1705 bucket = (struct rt6_exception_bucket *)p;
1706 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1707 }
1708
rt6_insert_exception(struct rt6_info * nrt,const struct fib6_result * res)1709 static int rt6_insert_exception(struct rt6_info *nrt,
1710 const struct fib6_result *res)
1711 {
1712 struct net *net = dev_net(nrt->dst.dev);
1713 struct rt6_exception_bucket *bucket;
1714 struct fib6_info *f6i = res->f6i;
1715 struct in6_addr *src_key = NULL;
1716 struct rt6_exception *rt6_ex;
1717 struct fib6_nh *nh = res->nh;
1718 int max_depth;
1719 int err = 0;
1720
1721 spin_lock_bh(&rt6_exception_lock);
1722
1723 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1724 lockdep_is_held(&rt6_exception_lock));
1725 if (!bucket) {
1726 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1727 GFP_ATOMIC);
1728 if (!bucket) {
1729 err = -ENOMEM;
1730 goto out;
1731 }
1732 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1733 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1734 err = -EINVAL;
1735 goto out;
1736 }
1737
1738 #ifdef CONFIG_IPV6_SUBTREES
1739 /* fib6_src.plen != 0 indicates f6i is in subtree
1740 * and exception table is indexed by a hash of
1741 * both fib6_dst and fib6_src.
1742 * Otherwise, the exception table is indexed by
1743 * a hash of only fib6_dst.
1744 */
1745 if (f6i->fib6_src.plen)
1746 src_key = &nrt->rt6i_src.addr;
1747 #endif
1748 /* rt6_mtu_change() might lower mtu on f6i.
1749 * Only insert this exception route if its mtu
1750 * is less than f6i's mtu value.
1751 */
1752 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1753 err = -EINVAL;
1754 goto out;
1755 }
1756
1757 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1758 src_key);
1759 if (rt6_ex)
1760 rt6_remove_exception(bucket, rt6_ex);
1761
1762 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1763 if (!rt6_ex) {
1764 err = -ENOMEM;
1765 goto out;
1766 }
1767 rt6_ex->rt6i = nrt;
1768 rt6_ex->stamp = jiffies;
1769 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1770 bucket->depth++;
1771 net->ipv6.rt6_stats->fib_rt_cache++;
1772
1773 /* Randomize max depth to avoid some side channels attacks. */
1774 max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
1775 while (bucket->depth > max_depth)
1776 rt6_exception_remove_oldest(bucket);
1777
1778 out:
1779 spin_unlock_bh(&rt6_exception_lock);
1780
1781 /* Update fn->fn_sernum to invalidate all cached dst */
1782 if (!err) {
1783 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1784 fib6_update_sernum(net, f6i);
1785 fib6_add_gc_list(f6i);
1786 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1787 fib6_force_start_gc(net);
1788 }
1789
1790 return err;
1791 }
1792
fib6_nh_flush_exceptions(struct fib6_nh * nh,struct fib6_info * from)1793 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1794 {
1795 struct rt6_exception_bucket *bucket;
1796 struct rt6_exception *rt6_ex;
1797 struct hlist_node *tmp;
1798 int i;
1799
1800 spin_lock_bh(&rt6_exception_lock);
1801
1802 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1803 if (!bucket)
1804 goto out;
1805
1806 /* Prevent rt6_insert_exception() to recreate the bucket list */
1807 if (!from)
1808 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1809
1810 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1811 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1812 if (!from ||
1813 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1814 rt6_remove_exception(bucket, rt6_ex);
1815 }
1816 WARN_ON_ONCE(!from && bucket->depth);
1817 bucket++;
1818 }
1819 out:
1820 spin_unlock_bh(&rt6_exception_lock);
1821 }
1822
rt6_nh_flush_exceptions(struct fib6_nh * nh,void * arg)1823 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1824 {
1825 struct fib6_info *f6i = arg;
1826
1827 fib6_nh_flush_exceptions(nh, f6i);
1828
1829 return 0;
1830 }
1831
rt6_flush_exceptions(struct fib6_info * f6i)1832 void rt6_flush_exceptions(struct fib6_info *f6i)
1833 {
1834 if (f6i->nh) {
1835 rcu_read_lock();
1836 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, f6i);
1837 rcu_read_unlock();
1838 } else {
1839 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1840 }
1841 }
1842
1843 /* Find cached rt in the hash table inside passed in rt
1844 * Caller has to hold rcu_read_lock()
1845 */
rt6_find_cached_rt(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1846 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1847 const struct in6_addr *daddr,
1848 const struct in6_addr *saddr)
1849 {
1850 const struct in6_addr *src_key = NULL;
1851 struct rt6_exception_bucket *bucket;
1852 struct rt6_exception *rt6_ex;
1853 struct rt6_info *ret = NULL;
1854
1855 #ifdef CONFIG_IPV6_SUBTREES
1856 /* fib6i_src.plen != 0 indicates f6i is in subtree
1857 * and exception table is indexed by a hash of
1858 * both fib6_dst and fib6_src.
1859 * However, the src addr used to create the hash
1860 * might not be exactly the passed in saddr which
1861 * is a /128 addr from the flow.
1862 * So we need to use f6i->fib6_src to redo lookup
1863 * if the passed in saddr does not find anything.
1864 * (See the logic in ip6_rt_cache_alloc() on how
1865 * rt->rt6i_src is updated.)
1866 */
1867 if (res->f6i->fib6_src.plen)
1868 src_key = saddr;
1869 find_ex:
1870 #endif
1871 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1872 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1873
1874 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1875 ret = rt6_ex->rt6i;
1876
1877 #ifdef CONFIG_IPV6_SUBTREES
1878 /* Use fib6_src as src_key and redo lookup */
1879 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1880 src_key = &res->f6i->fib6_src.addr;
1881 goto find_ex;
1882 }
1883 #endif
1884
1885 return ret;
1886 }
1887
1888 /* Remove the passed in cached rt from the hash table that contains it */
fib6_nh_remove_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1889 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1890 const struct rt6_info *rt)
1891 {
1892 const struct in6_addr *src_key = NULL;
1893 struct rt6_exception_bucket *bucket;
1894 struct rt6_exception *rt6_ex;
1895 int err;
1896
1897 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1898 return -ENOENT;
1899
1900 spin_lock_bh(&rt6_exception_lock);
1901 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1902
1903 #ifdef CONFIG_IPV6_SUBTREES
1904 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1905 * and exception table is indexed by a hash of
1906 * both rt6i_dst and rt6i_src.
1907 * Otherwise, the exception table is indexed by
1908 * a hash of only rt6i_dst.
1909 */
1910 if (plen)
1911 src_key = &rt->rt6i_src.addr;
1912 #endif
1913 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1914 &rt->rt6i_dst.addr,
1915 src_key);
1916 if (rt6_ex) {
1917 rt6_remove_exception(bucket, rt6_ex);
1918 err = 0;
1919 } else {
1920 err = -ENOENT;
1921 }
1922
1923 spin_unlock_bh(&rt6_exception_lock);
1924 return err;
1925 }
1926
1927 struct fib6_nh_excptn_arg {
1928 struct rt6_info *rt;
1929 int plen;
1930 };
1931
rt6_nh_remove_exception_rt(struct fib6_nh * nh,void * _arg)1932 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1933 {
1934 struct fib6_nh_excptn_arg *arg = _arg;
1935 int err;
1936
1937 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1938 if (err == 0)
1939 return 1;
1940
1941 return 0;
1942 }
1943
rt6_remove_exception_rt(struct rt6_info * rt)1944 static int rt6_remove_exception_rt(struct rt6_info *rt)
1945 {
1946 struct fib6_info *from;
1947
1948 from = rcu_dereference(rt->from);
1949 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1950 return -EINVAL;
1951
1952 if (from->nh) {
1953 struct fib6_nh_excptn_arg arg = {
1954 .rt = rt,
1955 .plen = from->fib6_src.plen
1956 };
1957 int rc;
1958
1959 /* rc = 1 means an entry was found */
1960 rc = nexthop_for_each_fib6_nh(from->nh,
1961 rt6_nh_remove_exception_rt,
1962 &arg);
1963 return rc ? 0 : -ENOENT;
1964 }
1965
1966 return fib6_nh_remove_exception(from->fib6_nh,
1967 from->fib6_src.plen, rt);
1968 }
1969
1970 /* Find rt6_ex which contains the passed in rt cache and
1971 * refresh its stamp
1972 */
fib6_nh_update_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1973 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1974 const struct rt6_info *rt)
1975 {
1976 const struct in6_addr *src_key = NULL;
1977 struct rt6_exception_bucket *bucket;
1978 struct rt6_exception *rt6_ex;
1979
1980 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1981 #ifdef CONFIG_IPV6_SUBTREES
1982 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1983 * and exception table is indexed by a hash of
1984 * both rt6i_dst and rt6i_src.
1985 * Otherwise, the exception table is indexed by
1986 * a hash of only rt6i_dst.
1987 */
1988 if (plen)
1989 src_key = &rt->rt6i_src.addr;
1990 #endif
1991 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1992 if (rt6_ex)
1993 rt6_ex->stamp = jiffies;
1994 }
1995
1996 struct fib6_nh_match_arg {
1997 const struct net_device *dev;
1998 const struct in6_addr *gw;
1999 struct fib6_nh *match;
2000 };
2001
2002 /* determine if fib6_nh has given device and gateway */
fib6_nh_find_match(struct fib6_nh * nh,void * _arg)2003 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
2004 {
2005 struct fib6_nh_match_arg *arg = _arg;
2006
2007 if (arg->dev != nh->fib_nh_dev ||
2008 (arg->gw && !nh->fib_nh_gw_family) ||
2009 (!arg->gw && nh->fib_nh_gw_family) ||
2010 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
2011 return 0;
2012
2013 arg->match = nh;
2014
2015 /* found a match, break the loop */
2016 return 1;
2017 }
2018
rt6_update_exception_stamp_rt(struct rt6_info * rt)2019 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
2020 {
2021 struct fib6_info *from;
2022 struct fib6_nh *fib6_nh;
2023
2024 rcu_read_lock();
2025
2026 from = rcu_dereference(rt->from);
2027 if (!from || !(rt->rt6i_flags & RTF_CACHE))
2028 goto unlock;
2029
2030 if (from->nh) {
2031 struct fib6_nh_match_arg arg = {
2032 .dev = rt->dst.dev,
2033 .gw = &rt->rt6i_gateway,
2034 };
2035
2036 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
2037
2038 if (!arg.match)
2039 goto unlock;
2040 fib6_nh = arg.match;
2041 } else {
2042 fib6_nh = from->fib6_nh;
2043 }
2044 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
2045 unlock:
2046 rcu_read_unlock();
2047 }
2048
rt6_mtu_change_route_allowed(struct inet6_dev * idev,struct rt6_info * rt,int mtu)2049 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2050 struct rt6_info *rt, int mtu)
2051 {
2052 /* If the new MTU is lower than the route PMTU, this new MTU will be the
2053 * lowest MTU in the path: always allow updating the route PMTU to
2054 * reflect PMTU decreases.
2055 *
2056 * If the new MTU is higher, and the route PMTU is equal to the local
2057 * MTU, this means the old MTU is the lowest in the path, so allow
2058 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2059 * handle this.
2060 */
2061
2062 if (dst_mtu(&rt->dst) >= mtu)
2063 return true;
2064
2065 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
2066 return true;
2067
2068 return false;
2069 }
2070
rt6_exceptions_update_pmtu(struct inet6_dev * idev,const struct fib6_nh * nh,int mtu)2071 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2072 const struct fib6_nh *nh, int mtu)
2073 {
2074 struct rt6_exception_bucket *bucket;
2075 struct rt6_exception *rt6_ex;
2076 int i;
2077
2078 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2079 if (!bucket)
2080 return;
2081
2082 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2083 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2084 struct rt6_info *entry = rt6_ex->rt6i;
2085
2086 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2087 * route), the metrics of its rt->from have already
2088 * been updated.
2089 */
2090 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2091 rt6_mtu_change_route_allowed(idev, entry, mtu))
2092 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2093 }
2094 bucket++;
2095 }
2096 }
2097
2098 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2099
fib6_nh_exceptions_clean_tohost(const struct fib6_nh * nh,const struct in6_addr * gateway)2100 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2101 const struct in6_addr *gateway)
2102 {
2103 struct rt6_exception_bucket *bucket;
2104 struct rt6_exception *rt6_ex;
2105 struct hlist_node *tmp;
2106 int i;
2107
2108 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2109 return;
2110
2111 spin_lock_bh(&rt6_exception_lock);
2112 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2113 if (bucket) {
2114 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2115 hlist_for_each_entry_safe(rt6_ex, tmp,
2116 &bucket->chain, hlist) {
2117 struct rt6_info *entry = rt6_ex->rt6i;
2118
2119 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2120 RTF_CACHE_GATEWAY &&
2121 ipv6_addr_equal(gateway,
2122 &entry->rt6i_gateway)) {
2123 rt6_remove_exception(bucket, rt6_ex);
2124 }
2125 }
2126 bucket++;
2127 }
2128 }
2129
2130 spin_unlock_bh(&rt6_exception_lock);
2131 }
2132
rt6_age_examine_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex,struct fib6_gc_args * gc_args,unsigned long now)2133 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2134 struct rt6_exception *rt6_ex,
2135 struct fib6_gc_args *gc_args,
2136 unsigned long now)
2137 {
2138 struct rt6_info *rt = rt6_ex->rt6i;
2139
2140 /* we are pruning and obsoleting aged-out and non gateway exceptions
2141 * even if others have still references to them, so that on next
2142 * dst_check() such references can be dropped.
2143 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2144 * expired, independently from their aging, as per RFC 8201 section 4
2145 */
2146 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2147 if (time_after_eq(now, READ_ONCE(rt->dst.lastuse) +
2148 gc_args->timeout)) {
2149 pr_debug("aging clone %p\n", rt);
2150 rt6_remove_exception(bucket, rt6_ex);
2151 return;
2152 }
2153 } else if (time_after(jiffies, READ_ONCE(rt->dst.expires))) {
2154 pr_debug("purging expired route %p\n", rt);
2155 rt6_remove_exception(bucket, rt6_ex);
2156 return;
2157 }
2158
2159 if (rt->rt6i_flags & RTF_GATEWAY) {
2160 struct neighbour *neigh;
2161
2162 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2163
2164 if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2165 pr_debug("purging route %p via non-router but gateway\n",
2166 rt);
2167 rt6_remove_exception(bucket, rt6_ex);
2168 return;
2169 }
2170 }
2171
2172 gc_args->more++;
2173 }
2174
fib6_nh_age_exceptions(const struct fib6_nh * nh,struct fib6_gc_args * gc_args,unsigned long now)2175 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2176 struct fib6_gc_args *gc_args,
2177 unsigned long now)
2178 {
2179 struct rt6_exception_bucket *bucket;
2180 struct rt6_exception *rt6_ex;
2181 struct hlist_node *tmp;
2182 int i;
2183
2184 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2185 return;
2186
2187 rcu_read_lock_bh();
2188 spin_lock(&rt6_exception_lock);
2189 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2190 if (bucket) {
2191 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2192 hlist_for_each_entry_safe(rt6_ex, tmp,
2193 &bucket->chain, hlist) {
2194 rt6_age_examine_exception(bucket, rt6_ex,
2195 gc_args, now);
2196 }
2197 bucket++;
2198 }
2199 }
2200 spin_unlock(&rt6_exception_lock);
2201 rcu_read_unlock_bh();
2202 }
2203
2204 struct fib6_nh_age_excptn_arg {
2205 struct fib6_gc_args *gc_args;
2206 unsigned long now;
2207 };
2208
rt6_nh_age_exceptions(struct fib6_nh * nh,void * _arg)2209 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2210 {
2211 struct fib6_nh_age_excptn_arg *arg = _arg;
2212
2213 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2214 return 0;
2215 }
2216
rt6_age_exceptions(struct fib6_info * f6i,struct fib6_gc_args * gc_args,unsigned long now)2217 void rt6_age_exceptions(struct fib6_info *f6i,
2218 struct fib6_gc_args *gc_args,
2219 unsigned long now)
2220 {
2221 if (f6i->nh) {
2222 struct fib6_nh_age_excptn_arg arg = {
2223 .gc_args = gc_args,
2224 .now = now
2225 };
2226
2227 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2228 &arg);
2229 } else {
2230 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2231 }
2232 }
2233
2234 /* must be called with rcu lock held */
fib6_table_lookup(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,struct fib6_result * res,int strict)2235 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2236 struct flowi6 *fl6, struct fib6_result *res, int strict)
2237 {
2238 struct fib6_node *fn, *saved_fn;
2239
2240 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2241 saved_fn = fn;
2242
2243 redo_rt6_select:
2244 rt6_select(net, fn, oif, res, strict);
2245 if (res->f6i == net->ipv6.fib6_null_entry) {
2246 fn = fib6_backtrack(fn, &fl6->saddr);
2247 if (fn)
2248 goto redo_rt6_select;
2249 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2250 /* also consider unreachable route */
2251 strict &= ~RT6_LOOKUP_F_REACHABLE;
2252 fn = saved_fn;
2253 goto redo_rt6_select;
2254 }
2255 }
2256
2257 trace_fib6_table_lookup(net, res, table, fl6);
2258
2259 return 0;
2260 }
2261
ip6_pol_route(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2262 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2263 int oif, struct flowi6 *fl6,
2264 const struct sk_buff *skb, int flags)
2265 {
2266 struct fib6_result res = {};
2267 struct rt6_info *rt = NULL;
2268 int strict = 0;
2269
2270 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2271 !rcu_read_lock_held());
2272
2273 strict |= flags & RT6_LOOKUP_F_IFACE;
2274 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2275 if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
2276 strict |= RT6_LOOKUP_F_REACHABLE;
2277
2278 rcu_read_lock();
2279
2280 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2281 if (res.f6i == net->ipv6.fib6_null_entry)
2282 goto out;
2283
2284 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2285
2286 /*Search through exception table */
2287 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2288 if (rt) {
2289 goto out;
2290 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2291 !res.nh->fib_nh_gw_family)) {
2292 /* Create a RTF_CACHE clone which will not be
2293 * owned by the fib6 tree. It is for the special case where
2294 * the daddr in the skb during the neighbor look-up is different
2295 * from the fl6->daddr used to look-up route here.
2296 */
2297 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2298
2299 if (rt) {
2300 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2301 * As rt6_uncached_list_add() does not consume refcnt,
2302 * this refcnt is always returned to the caller even
2303 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2304 */
2305 rt6_uncached_list_add(rt);
2306 rcu_read_unlock();
2307
2308 return rt;
2309 }
2310 } else {
2311 /* Get a percpu copy */
2312 local_bh_disable();
2313 rt = rt6_get_pcpu_route(&res);
2314
2315 if (!rt)
2316 rt = rt6_make_pcpu_route(net, &res);
2317
2318 local_bh_enable();
2319 }
2320 out:
2321 if (!rt)
2322 rt = net->ipv6.ip6_null_entry;
2323 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2324 ip6_hold_safe(net, &rt);
2325 rcu_read_unlock();
2326
2327 return rt;
2328 }
2329 EXPORT_SYMBOL_GPL(ip6_pol_route);
2330
ip6_pol_route_input(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2331 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2332 struct fib6_table *table,
2333 struct flowi6 *fl6,
2334 const struct sk_buff *skb,
2335 int flags)
2336 {
2337 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2338 }
2339
ip6_route_input_lookup(struct net * net,struct net_device * dev,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2340 struct dst_entry *ip6_route_input_lookup(struct net *net,
2341 struct net_device *dev,
2342 struct flowi6 *fl6,
2343 const struct sk_buff *skb,
2344 int flags)
2345 {
2346 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2347 flags |= RT6_LOOKUP_F_IFACE;
2348
2349 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2350 }
2351 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2352
ip6_multipath_l3_keys(const struct sk_buff * skb,struct flow_keys * keys,struct flow_keys * flkeys)2353 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2354 struct flow_keys *keys,
2355 struct flow_keys *flkeys)
2356 {
2357 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2358 const struct ipv6hdr *key_iph = outer_iph;
2359 struct flow_keys *_flkeys = flkeys;
2360 const struct ipv6hdr *inner_iph;
2361 const struct icmp6hdr *icmph;
2362 struct ipv6hdr _inner_iph;
2363 struct icmp6hdr _icmph;
2364
2365 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2366 goto out;
2367
2368 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2369 sizeof(_icmph), &_icmph);
2370 if (!icmph)
2371 goto out;
2372
2373 if (!icmpv6_is_err(icmph->icmp6_type))
2374 goto out;
2375
2376 inner_iph = skb_header_pointer(skb,
2377 skb_transport_offset(skb) + sizeof(*icmph),
2378 sizeof(_inner_iph), &_inner_iph);
2379 if (!inner_iph)
2380 goto out;
2381
2382 key_iph = inner_iph;
2383 _flkeys = NULL;
2384 out:
2385 if (_flkeys) {
2386 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2387 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2388 keys->tags.flow_label = _flkeys->tags.flow_label;
2389 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2390 } else {
2391 keys->addrs.v6addrs.src = key_iph->saddr;
2392 keys->addrs.v6addrs.dst = key_iph->daddr;
2393 keys->tags.flow_label = ip6_flowlabel(key_iph);
2394 keys->basic.ip_proto = key_iph->nexthdr;
2395 }
2396 }
2397
rt6_multipath_custom_hash_outer(const struct net * net,const struct sk_buff * skb,bool * p_has_inner)2398 static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2399 const struct sk_buff *skb,
2400 bool *p_has_inner)
2401 {
2402 u32 hash_fields = ip6_multipath_hash_fields(net);
2403 struct flow_keys keys, hash_keys;
2404
2405 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2406 return 0;
2407
2408 memset(&hash_keys, 0, sizeof(hash_keys));
2409 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2410
2411 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2412 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2413 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2414 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2415 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2416 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2417 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2418 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2419 hash_keys.tags.flow_label = keys.tags.flow_label;
2420 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2421 hash_keys.ports.src = keys.ports.src;
2422 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2423 hash_keys.ports.dst = keys.ports.dst;
2424
2425 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2426 return fib_multipath_hash_from_keys(net, &hash_keys);
2427 }
2428
rt6_multipath_custom_hash_inner(const struct net * net,const struct sk_buff * skb,bool has_inner)2429 static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2430 const struct sk_buff *skb,
2431 bool has_inner)
2432 {
2433 u32 hash_fields = ip6_multipath_hash_fields(net);
2434 struct flow_keys keys, hash_keys;
2435
2436 /* We assume the packet carries an encapsulation, but if none was
2437 * encountered during dissection of the outer flow, then there is no
2438 * point in calling the flow dissector again.
2439 */
2440 if (!has_inner)
2441 return 0;
2442
2443 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2444 return 0;
2445
2446 memset(&hash_keys, 0, sizeof(hash_keys));
2447 skb_flow_dissect_flow_keys(skb, &keys, 0);
2448
2449 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2450 return 0;
2451
2452 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2453 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2454 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2455 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2456 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2457 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2458 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2459 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2460 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2461 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2462 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2463 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2464 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2465 hash_keys.tags.flow_label = keys.tags.flow_label;
2466 }
2467
2468 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2469 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2470 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2471 hash_keys.ports.src = keys.ports.src;
2472 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2473 hash_keys.ports.dst = keys.ports.dst;
2474
2475 return fib_multipath_hash_from_keys(net, &hash_keys);
2476 }
2477
rt6_multipath_custom_hash_skb(const struct net * net,const struct sk_buff * skb)2478 static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2479 const struct sk_buff *skb)
2480 {
2481 u32 mhash, mhash_inner;
2482 bool has_inner = true;
2483
2484 mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2485 mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2486
2487 return jhash_2words(mhash, mhash_inner, 0);
2488 }
2489
rt6_multipath_custom_hash_fl6(const struct net * net,const struct flowi6 * fl6)2490 static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2491 const struct flowi6 *fl6)
2492 {
2493 u32 hash_fields = ip6_multipath_hash_fields(net);
2494 struct flow_keys hash_keys;
2495
2496 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2497 return 0;
2498
2499 memset(&hash_keys, 0, sizeof(hash_keys));
2500 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2501 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2502 hash_keys.addrs.v6addrs.src = fl6->saddr;
2503 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2504 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2505 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2506 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2507 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2508 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2509 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) {
2510 if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2511 hash_keys.ports.src = (__force __be16)get_random_u16();
2512 else
2513 hash_keys.ports.src = fl6->fl6_sport;
2514 }
2515 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2516 hash_keys.ports.dst = fl6->fl6_dport;
2517
2518 return fib_multipath_hash_from_keys(net, &hash_keys);
2519 }
2520
2521 /* if skb is set it will be used and fl6 can be NULL */
rt6_multipath_hash(const struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,struct flow_keys * flkeys)2522 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2523 const struct sk_buff *skb, struct flow_keys *flkeys)
2524 {
2525 struct flow_keys hash_keys;
2526 u32 mhash = 0;
2527
2528 switch (ip6_multipath_hash_policy(net)) {
2529 case 0:
2530 memset(&hash_keys, 0, sizeof(hash_keys));
2531 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2532 if (skb) {
2533 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2534 } else {
2535 hash_keys.addrs.v6addrs.src = fl6->saddr;
2536 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2537 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2538 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2539 }
2540 mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2541 break;
2542 case 1:
2543 if (skb) {
2544 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2545 struct flow_keys keys;
2546
2547 /* short-circuit if we already have L4 hash present */
2548 if (skb->l4_hash)
2549 return skb_get_hash_raw(skb) >> 1;
2550
2551 memset(&hash_keys, 0, sizeof(hash_keys));
2552
2553 if (!flkeys) {
2554 skb_flow_dissect_flow_keys(skb, &keys, flag);
2555 flkeys = &keys;
2556 }
2557 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2558 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2559 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2560 hash_keys.ports.src = flkeys->ports.src;
2561 hash_keys.ports.dst = flkeys->ports.dst;
2562 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2563 } else {
2564 memset(&hash_keys, 0, sizeof(hash_keys));
2565 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2566 hash_keys.addrs.v6addrs.src = fl6->saddr;
2567 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2568 if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2569 hash_keys.ports.src = (__force __be16)get_random_u16();
2570 else
2571 hash_keys.ports.src = fl6->fl6_sport;
2572 hash_keys.ports.dst = fl6->fl6_dport;
2573 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2574 }
2575 mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2576 break;
2577 case 2:
2578 memset(&hash_keys, 0, sizeof(hash_keys));
2579 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2580 if (skb) {
2581 struct flow_keys keys;
2582
2583 if (!flkeys) {
2584 skb_flow_dissect_flow_keys(skb, &keys, 0);
2585 flkeys = &keys;
2586 }
2587
2588 /* Inner can be v4 or v6 */
2589 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2590 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2591 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2592 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2593 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2594 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2595 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2596 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2597 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2598 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2599 } else {
2600 /* Same as case 0 */
2601 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2602 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2603 }
2604 } else {
2605 /* Same as case 0 */
2606 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2607 hash_keys.addrs.v6addrs.src = fl6->saddr;
2608 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2609 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2610 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2611 }
2612 mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2613 break;
2614 case 3:
2615 if (skb)
2616 mhash = rt6_multipath_custom_hash_skb(net, skb);
2617 else
2618 mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2619 break;
2620 }
2621
2622 return mhash >> 1;
2623 }
2624
2625 /* Called with rcu held */
ip6_route_input(struct sk_buff * skb)2626 void ip6_route_input(struct sk_buff *skb)
2627 {
2628 const struct ipv6hdr *iph = ipv6_hdr(skb);
2629 struct net *net = dev_net(skb->dev);
2630 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2631 struct ip_tunnel_info *tun_info;
2632 struct flowi6 fl6 = {
2633 .flowi6_iif = skb->dev->ifindex,
2634 .daddr = iph->daddr,
2635 .saddr = iph->saddr,
2636 .flowlabel = ip6_flowinfo(iph),
2637 .flowi6_mark = skb->mark,
2638 .flowi6_proto = iph->nexthdr,
2639 };
2640 struct flow_keys *flkeys = NULL, _flkeys;
2641
2642 tun_info = skb_tunnel_info(skb);
2643 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2644 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2645
2646 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2647 flkeys = &_flkeys;
2648
2649 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2650 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2651 skb_dst_drop(skb);
2652 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2653 &fl6, skb, flags));
2654 }
2655
ip6_pol_route_output(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2656 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2657 struct fib6_table *table,
2658 struct flowi6 *fl6,
2659 const struct sk_buff *skb,
2660 int flags)
2661 {
2662 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2663 }
2664
ip6_route_output_flags_noref(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2665 static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2666 const struct sock *sk,
2667 struct flowi6 *fl6,
2668 int flags)
2669 {
2670 bool any_src;
2671
2672 if (ipv6_addr_type(&fl6->daddr) &
2673 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2674 struct dst_entry *dst;
2675
2676 /* This function does not take refcnt on the dst */
2677 dst = l3mdev_link_scope_lookup(net, fl6);
2678 if (dst)
2679 return dst;
2680 }
2681
2682 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2683
2684 flags |= RT6_LOOKUP_F_DST_NOREF;
2685 any_src = ipv6_addr_any(&fl6->saddr);
2686 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2687 (fl6->flowi6_oif && any_src))
2688 flags |= RT6_LOOKUP_F_IFACE;
2689
2690 if (!any_src)
2691 flags |= RT6_LOOKUP_F_HAS_SADDR;
2692 else if (sk)
2693 flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
2694
2695 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2696 }
2697
ip6_route_output_flags(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2698 struct dst_entry *ip6_route_output_flags(struct net *net,
2699 const struct sock *sk,
2700 struct flowi6 *fl6,
2701 int flags)
2702 {
2703 struct dst_entry *dst;
2704 struct rt6_info *rt6;
2705
2706 rcu_read_lock();
2707 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2708 rt6 = dst_rt6_info(dst);
2709 /* For dst cached in uncached_list, refcnt is already taken. */
2710 if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
2711 dst = &net->ipv6.ip6_null_entry->dst;
2712 dst_hold(dst);
2713 }
2714 rcu_read_unlock();
2715
2716 return dst;
2717 }
2718 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2719
ip6_blackhole_route(struct net * net,struct dst_entry * dst_orig)2720 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2721 {
2722 struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
2723 struct net_device *loopback_dev = net->loopback_dev;
2724 struct dst_entry *new = NULL;
2725
2726 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
2727 DST_OBSOLETE_DEAD, 0);
2728 if (rt) {
2729 rt6_info_init(rt);
2730 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2731
2732 new = &rt->dst;
2733 new->__use = 1;
2734 new->input = dst_discard;
2735 new->output = dst_discard_out;
2736
2737 dst_copy_metrics(new, &ort->dst);
2738
2739 rt->rt6i_idev = in6_dev_get(loopback_dev);
2740 rt->rt6i_gateway = ort->rt6i_gateway;
2741 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2742
2743 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2744 #ifdef CONFIG_IPV6_SUBTREES
2745 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2746 #endif
2747 }
2748
2749 dst_release(dst_orig);
2750 return new ? new : ERR_PTR(-ENOMEM);
2751 }
2752
2753 /*
2754 * Destination cache support functions
2755 */
2756
fib6_check(struct fib6_info * f6i,u32 cookie)2757 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2758 {
2759 u32 rt_cookie = 0;
2760
2761 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2762 return false;
2763
2764 if (fib6_check_expired(f6i))
2765 return false;
2766
2767 return true;
2768 }
2769
rt6_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2770 static struct dst_entry *rt6_check(struct rt6_info *rt,
2771 struct fib6_info *from,
2772 u32 cookie)
2773 {
2774 u32 rt_cookie = 0;
2775
2776 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2777 rt_cookie != cookie)
2778 return NULL;
2779
2780 if (rt6_check_expired(rt))
2781 return NULL;
2782
2783 return &rt->dst;
2784 }
2785
rt6_dst_from_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2786 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2787 struct fib6_info *from,
2788 u32 cookie)
2789 {
2790 if (!__rt6_check_expired(rt) &&
2791 READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
2792 fib6_check(from, cookie))
2793 return &rt->dst;
2794 return NULL;
2795 }
2796
ip6_dst_check(struct dst_entry * dst,u32 cookie)2797 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2798 u32 cookie)
2799 {
2800 struct dst_entry *dst_ret;
2801 struct fib6_info *from;
2802 struct rt6_info *rt;
2803
2804 rt = dst_rt6_info(dst);
2805
2806 if (rt->sernum)
2807 return rt6_is_valid(rt) ? dst : NULL;
2808
2809 rcu_read_lock();
2810
2811 /* All IPV6 dsts are created with ->obsolete set to the value
2812 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2813 * into this function always.
2814 */
2815
2816 from = rcu_dereference(rt->from);
2817
2818 if (from && (rt->rt6i_flags & RTF_PCPU ||
2819 unlikely(!list_empty(&rt->dst.rt_uncached))))
2820 dst_ret = rt6_dst_from_check(rt, from, cookie);
2821 else
2822 dst_ret = rt6_check(rt, from, cookie);
2823
2824 rcu_read_unlock();
2825
2826 return dst_ret;
2827 }
2828 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2829
ip6_negative_advice(struct sock * sk,struct dst_entry * dst)2830 static void ip6_negative_advice(struct sock *sk,
2831 struct dst_entry *dst)
2832 {
2833 struct rt6_info *rt = dst_rt6_info(dst);
2834
2835 if (rt->rt6i_flags & RTF_CACHE) {
2836 rcu_read_lock();
2837 if (rt6_check_expired(rt)) {
2838 /* rt/dst can not be destroyed yet,
2839 * because of rcu_read_lock()
2840 */
2841 sk_dst_reset(sk);
2842 rt6_remove_exception_rt(rt);
2843 }
2844 rcu_read_unlock();
2845 return;
2846 }
2847 sk_dst_reset(sk);
2848 }
2849
ip6_link_failure(struct sk_buff * skb)2850 static void ip6_link_failure(struct sk_buff *skb)
2851 {
2852 struct rt6_info *rt;
2853
2854 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2855
2856 rt = dst_rt6_info(skb_dst(skb));
2857 if (rt) {
2858 rcu_read_lock();
2859 if (rt->rt6i_flags & RTF_CACHE) {
2860 rt6_remove_exception_rt(rt);
2861 } else {
2862 struct fib6_info *from;
2863 struct fib6_node *fn;
2864
2865 from = rcu_dereference(rt->from);
2866 if (from) {
2867 fn = rcu_dereference(from->fib6_node);
2868 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2869 WRITE_ONCE(fn->fn_sernum, -1);
2870 }
2871 }
2872 rcu_read_unlock();
2873 }
2874 }
2875
rt6_update_expires(struct rt6_info * rt0,int timeout)2876 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2877 {
2878 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2879 struct fib6_info *from;
2880
2881 rcu_read_lock();
2882 from = rcu_dereference(rt0->from);
2883 if (from)
2884 WRITE_ONCE(rt0->dst.expires, from->expires);
2885 rcu_read_unlock();
2886 }
2887
2888 dst_set_expires(&rt0->dst, timeout);
2889 rt0->rt6i_flags |= RTF_EXPIRES;
2890 }
2891
rt6_do_update_pmtu(struct rt6_info * rt,u32 mtu)2892 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2893 {
2894 struct net *net = dev_net(rt->dst.dev);
2895
2896 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2897 rt->rt6i_flags |= RTF_MODIFIED;
2898 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2899 }
2900
rt6_cache_allowed_for_pmtu(const struct rt6_info * rt)2901 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2902 {
2903 return !(rt->rt6i_flags & RTF_CACHE) &&
2904 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2905 }
2906
__ip6_rt_update_pmtu(struct dst_entry * dst,const struct sock * sk,const struct ipv6hdr * iph,u32 mtu,bool confirm_neigh)2907 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2908 const struct ipv6hdr *iph, u32 mtu,
2909 bool confirm_neigh)
2910 {
2911 const struct in6_addr *daddr, *saddr;
2912 struct rt6_info *rt6 = dst_rt6_info(dst);
2913
2914 /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2915 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2916 * [see also comment in rt6_mtu_change_route()]
2917 */
2918
2919 if (iph) {
2920 daddr = &iph->daddr;
2921 saddr = &iph->saddr;
2922 } else if (sk) {
2923 daddr = &sk->sk_v6_daddr;
2924 saddr = &inet6_sk(sk)->saddr;
2925 } else {
2926 daddr = NULL;
2927 saddr = NULL;
2928 }
2929
2930 if (confirm_neigh)
2931 dst_confirm_neigh(dst, daddr);
2932
2933 if (mtu < IPV6_MIN_MTU)
2934 return;
2935 if (mtu >= dst_mtu(dst))
2936 return;
2937
2938 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2939 rt6_do_update_pmtu(rt6, mtu);
2940 /* update rt6_ex->stamp for cache */
2941 if (rt6->rt6i_flags & RTF_CACHE)
2942 rt6_update_exception_stamp_rt(rt6);
2943 } else if (daddr) {
2944 struct fib6_result res = {};
2945 struct rt6_info *nrt6;
2946
2947 rcu_read_lock();
2948 res.f6i = rcu_dereference(rt6->from);
2949 if (!res.f6i)
2950 goto out_unlock;
2951
2952 res.fib6_flags = res.f6i->fib6_flags;
2953 res.fib6_type = res.f6i->fib6_type;
2954
2955 if (res.f6i->nh) {
2956 struct fib6_nh_match_arg arg = {
2957 .dev = dst_dev_rcu(dst),
2958 .gw = &rt6->rt6i_gateway,
2959 };
2960
2961 nexthop_for_each_fib6_nh(res.f6i->nh,
2962 fib6_nh_find_match, &arg);
2963
2964 /* fib6_info uses a nexthop that does not have fib6_nh
2965 * using the dst->dev + gw. Should be impossible.
2966 */
2967 if (!arg.match)
2968 goto out_unlock;
2969
2970 res.nh = arg.match;
2971 } else {
2972 res.nh = res.f6i->fib6_nh;
2973 }
2974
2975 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2976 if (nrt6) {
2977 rt6_do_update_pmtu(nrt6, mtu);
2978 if (rt6_insert_exception(nrt6, &res))
2979 dst_release_immediate(&nrt6->dst);
2980 }
2981 out_unlock:
2982 rcu_read_unlock();
2983 }
2984 }
2985
ip6_rt_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)2986 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2987 struct sk_buff *skb, u32 mtu,
2988 bool confirm_neigh)
2989 {
2990 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2991 confirm_neigh);
2992 }
2993
ip6_update_pmtu(struct sk_buff * skb,struct net * net,__be32 mtu,int oif,u32 mark,kuid_t uid)2994 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2995 int oif, u32 mark, kuid_t uid)
2996 {
2997 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2998 struct dst_entry *dst;
2999 struct flowi6 fl6 = {
3000 .flowi6_oif = oif,
3001 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
3002 .daddr = iph->daddr,
3003 .saddr = iph->saddr,
3004 .flowlabel = ip6_flowinfo(iph),
3005 .flowi6_uid = uid,
3006 };
3007
3008 dst = ip6_route_output(net, NULL, &fl6);
3009 if (!dst->error)
3010 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
3011 dst_release(dst);
3012 }
3013 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
3014
ip6_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,__be32 mtu)3015 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
3016 {
3017 int oif = sk->sk_bound_dev_if;
3018 struct dst_entry *dst;
3019
3020 if (!oif && skb->dev)
3021 oif = l3mdev_master_ifindex(skb->dev);
3022
3023 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
3024 sk_uid(sk));
3025
3026 dst = __sk_dst_get(sk);
3027 if (!dst || !READ_ONCE(dst->obsolete) ||
3028 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
3029 return;
3030
3031 bh_lock_sock(sk);
3032 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
3033 ip6_datagram_dst_update(sk, false);
3034 bh_unlock_sock(sk);
3035 }
3036 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
3037
ip6_sk_dst_store_flow(struct sock * sk,struct dst_entry * dst,const struct flowi6 * fl6)3038 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
3039 const struct flowi6 *fl6)
3040 {
3041 #ifdef CONFIG_IPV6_SUBTREES
3042 struct ipv6_pinfo *np = inet6_sk(sk);
3043 #endif
3044
3045 ip6_dst_store(sk, dst,
3046 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr),
3047 #ifdef CONFIG_IPV6_SUBTREES
3048 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
3049 true :
3050 #endif
3051 false);
3052 }
3053
ip6_redirect_nh_match(const struct fib6_result * res,struct flowi6 * fl6,const struct in6_addr * gw,struct rt6_info ** ret)3054 static bool ip6_redirect_nh_match(const struct fib6_result *res,
3055 struct flowi6 *fl6,
3056 const struct in6_addr *gw,
3057 struct rt6_info **ret)
3058 {
3059 const struct fib6_nh *nh = res->nh;
3060
3061 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
3062 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3063 return false;
3064
3065 /* rt_cache's gateway might be different from its 'parent'
3066 * in the case of an ip redirect.
3067 * So we keep searching in the exception table if the gateway
3068 * is different.
3069 */
3070 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3071 struct rt6_info *rt_cache;
3072
3073 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3074 if (rt_cache &&
3075 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3076 *ret = rt_cache;
3077 return true;
3078 }
3079 return false;
3080 }
3081 return true;
3082 }
3083
3084 struct fib6_nh_rd_arg {
3085 struct fib6_result *res;
3086 struct flowi6 *fl6;
3087 const struct in6_addr *gw;
3088 struct rt6_info **ret;
3089 };
3090
fib6_nh_redirect_match(struct fib6_nh * nh,void * _arg)3091 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3092 {
3093 struct fib6_nh_rd_arg *arg = _arg;
3094
3095 arg->res->nh = nh;
3096 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3097 }
3098
3099 /* Handle redirects */
3100 struct ip6rd_flowi {
3101 struct flowi6 fl6;
3102 struct in6_addr gateway;
3103 };
3104
__ip6_route_redirect(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)3105 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3106 struct fib6_table *table,
3107 struct flowi6 *fl6,
3108 const struct sk_buff *skb,
3109 int flags)
3110 {
3111 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3112 struct rt6_info *ret = NULL;
3113 struct fib6_result res = {};
3114 struct fib6_nh_rd_arg arg = {
3115 .res = &res,
3116 .fl6 = fl6,
3117 .gw = &rdfl->gateway,
3118 .ret = &ret
3119 };
3120 struct fib6_info *rt;
3121 struct fib6_node *fn;
3122
3123 /* Get the "current" route for this destination and
3124 * check if the redirect has come from appropriate router.
3125 *
3126 * RFC 4861 specifies that redirects should only be
3127 * accepted if they come from the nexthop to the target.
3128 * Due to the way the routes are chosen, this notion
3129 * is a bit fuzzy and one might need to check all possible
3130 * routes.
3131 */
3132
3133 rcu_read_lock();
3134 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3135 restart:
3136 for_each_fib6_node_rt_rcu(fn) {
3137 res.f6i = rt;
3138 if (fib6_check_expired(rt))
3139 continue;
3140 if (rt->fib6_flags & RTF_REJECT)
3141 break;
3142 if (unlikely(rt->nh)) {
3143 if (nexthop_is_blackhole(rt->nh))
3144 continue;
3145 /* on match, res->nh is filled in and potentially ret */
3146 if (nexthop_for_each_fib6_nh(rt->nh,
3147 fib6_nh_redirect_match,
3148 &arg))
3149 goto out;
3150 } else {
3151 res.nh = rt->fib6_nh;
3152 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3153 &ret))
3154 goto out;
3155 }
3156 }
3157
3158 if (!rt)
3159 rt = net->ipv6.fib6_null_entry;
3160 else if (rt->fib6_flags & RTF_REJECT) {
3161 ret = net->ipv6.ip6_null_entry;
3162 goto out;
3163 }
3164
3165 if (rt == net->ipv6.fib6_null_entry) {
3166 fn = fib6_backtrack(fn, &fl6->saddr);
3167 if (fn)
3168 goto restart;
3169 }
3170
3171 res.f6i = rt;
3172 res.nh = rt->fib6_nh;
3173 out:
3174 if (ret) {
3175 ip6_hold_safe(net, &ret);
3176 } else {
3177 res.fib6_flags = res.f6i->fib6_flags;
3178 res.fib6_type = res.f6i->fib6_type;
3179 ret = ip6_create_rt_rcu(&res);
3180 }
3181
3182 rcu_read_unlock();
3183
3184 trace_fib6_table_lookup(net, &res, table, fl6);
3185 return ret;
3186 };
3187
ip6_route_redirect(struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,const struct in6_addr * gateway)3188 static struct dst_entry *ip6_route_redirect(struct net *net,
3189 const struct flowi6 *fl6,
3190 const struct sk_buff *skb,
3191 const struct in6_addr *gateway)
3192 {
3193 int flags = RT6_LOOKUP_F_HAS_SADDR;
3194 struct ip6rd_flowi rdfl;
3195
3196 rdfl.fl6 = *fl6;
3197 rdfl.gateway = *gateway;
3198
3199 return fib6_rule_lookup(net, &rdfl.fl6, skb,
3200 flags, __ip6_route_redirect);
3201 }
3202
ip6_redirect(struct sk_buff * skb,struct net * net,int oif,u32 mark,kuid_t uid)3203 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3204 kuid_t uid)
3205 {
3206 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3207 struct dst_entry *dst;
3208 struct flowi6 fl6 = {
3209 .flowi6_iif = LOOPBACK_IFINDEX,
3210 .flowi6_oif = oif,
3211 .flowi6_mark = mark,
3212 .daddr = iph->daddr,
3213 .saddr = iph->saddr,
3214 .flowlabel = ip6_flowinfo(iph),
3215 .flowi6_uid = uid,
3216 };
3217
3218 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3219 rt6_do_redirect(dst, NULL, skb);
3220 dst_release(dst);
3221 }
3222 EXPORT_SYMBOL_GPL(ip6_redirect);
3223
ip6_redirect_no_header(struct sk_buff * skb,struct net * net,int oif)3224 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3225 {
3226 const struct ipv6hdr *iph = ipv6_hdr(skb);
3227 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3228 struct dst_entry *dst;
3229 struct flowi6 fl6 = {
3230 .flowi6_iif = LOOPBACK_IFINDEX,
3231 .flowi6_oif = oif,
3232 .daddr = msg->dest,
3233 .saddr = iph->daddr,
3234 .flowi6_uid = sock_net_uid(net, NULL),
3235 };
3236
3237 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3238 rt6_do_redirect(dst, NULL, skb);
3239 dst_release(dst);
3240 }
3241
ip6_sk_redirect(struct sk_buff * skb,struct sock * sk)3242 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3243 {
3244 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3245 READ_ONCE(sk->sk_mark), sk_uid(sk));
3246 }
3247 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3248
ip6_default_advmss(const struct dst_entry * dst)3249 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3250 {
3251 unsigned int mtu = dst_mtu(dst);
3252 struct net *net;
3253
3254 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3255
3256 rcu_read_lock();
3257
3258 net = dst_dev_net_rcu(dst);
3259 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3260 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3261
3262 rcu_read_unlock();
3263
3264 /*
3265 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3266 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3267 * IPV6_MAXPLEN is also valid and means: "any MSS,
3268 * rely only on pmtu discovery"
3269 */
3270 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3271 mtu = IPV6_MAXPLEN;
3272 return mtu;
3273 }
3274
ip6_mtu(const struct dst_entry * dst)3275 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3276 {
3277 return ip6_dst_mtu_maybe_forward(dst, false);
3278 }
3279 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3280
3281 /* MTU selection:
3282 * 1. mtu on route is locked - use it
3283 * 2. mtu from nexthop exception
3284 * 3. mtu from egress device
3285 *
3286 * based on ip6_dst_mtu_forward and exception logic of
3287 * rt6_find_cached_rt; called with rcu_read_lock
3288 */
ip6_mtu_from_fib6(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)3289 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3290 const struct in6_addr *daddr,
3291 const struct in6_addr *saddr)
3292 {
3293 const struct fib6_nh *nh = res->nh;
3294 struct fib6_info *f6i = res->f6i;
3295 struct inet6_dev *idev;
3296 struct rt6_info *rt;
3297 u32 mtu = 0;
3298
3299 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3300 mtu = f6i->fib6_pmtu;
3301 if (mtu)
3302 goto out;
3303 }
3304
3305 rt = rt6_find_cached_rt(res, daddr, saddr);
3306 if (unlikely(rt)) {
3307 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3308 } else {
3309 struct net_device *dev = nh->fib_nh_dev;
3310
3311 mtu = IPV6_MIN_MTU;
3312 idev = __in6_dev_get(dev);
3313 if (idev)
3314 mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
3315 }
3316
3317 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3318 out:
3319 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3320 }
3321
icmp6_dst_alloc(struct net_device * dev,struct flowi6 * fl6)3322 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3323 struct flowi6 *fl6)
3324 {
3325 struct dst_entry *dst;
3326 struct rt6_info *rt;
3327 struct inet6_dev *idev = in6_dev_get(dev);
3328 struct net *net = dev_net(dev);
3329
3330 if (unlikely(!idev))
3331 return ERR_PTR(-ENODEV);
3332
3333 rt = ip6_dst_alloc(net, dev, 0);
3334 if (unlikely(!rt)) {
3335 in6_dev_put(idev);
3336 dst = ERR_PTR(-ENOMEM);
3337 goto out;
3338 }
3339
3340 rt->dst.input = ip6_input;
3341 rt->dst.output = ip6_output;
3342 rt->rt6i_gateway = fl6->daddr;
3343 rt->rt6i_dst.addr = fl6->daddr;
3344 rt->rt6i_dst.plen = 128;
3345 rt->rt6i_idev = idev;
3346 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3347
3348 /* Add this dst into uncached_list so that rt6_disable_ip() can
3349 * do proper release of the net_device
3350 */
3351 rt6_uncached_list_add(rt);
3352
3353 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3354
3355 out:
3356 return dst;
3357 }
3358
ip6_dst_gc(struct dst_ops * ops)3359 static void ip6_dst_gc(struct dst_ops *ops)
3360 {
3361 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3362 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3363 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3364 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3365 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3366 unsigned int val;
3367 int entries;
3368
3369 if (time_after(rt_last_gc + rt_min_interval, jiffies))
3370 goto out;
3371
3372 fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3373 entries = dst_entries_get_slow(ops);
3374 if (entries < ops->gc_thresh)
3375 atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3376 out:
3377 val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3378 atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3379 }
3380
ip6_nh_lookup_table(struct net * net,struct fib6_config * cfg,const struct in6_addr * gw_addr,u32 tbid,int flags,struct fib6_result * res)3381 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3382 const struct in6_addr *gw_addr, u32 tbid,
3383 int flags, struct fib6_result *res)
3384 {
3385 struct flowi6 fl6 = {
3386 .flowi6_oif = cfg->fc_ifindex,
3387 .daddr = *gw_addr,
3388 .saddr = cfg->fc_prefsrc,
3389 };
3390 struct fib6_table *table;
3391 int err;
3392
3393 table = fib6_get_table(net, tbid);
3394 if (!table)
3395 return -EINVAL;
3396
3397 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3398 flags |= RT6_LOOKUP_F_HAS_SADDR;
3399
3400 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3401
3402 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3403 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3404 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3405 cfg->fc_ifindex != 0, NULL, flags);
3406
3407 return err;
3408 }
3409
ip6_route_check_nh_onlink(struct net * net,struct fib6_config * cfg,const struct net_device * dev,struct netlink_ext_ack * extack)3410 static int ip6_route_check_nh_onlink(struct net *net,
3411 struct fib6_config *cfg,
3412 const struct net_device *dev,
3413 struct netlink_ext_ack *extack)
3414 {
3415 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3416 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3417 struct fib6_result res = {};
3418 int err;
3419
3420 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3421 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3422 /* ignore match if it is the default route */
3423 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3424 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3425 NL_SET_ERR_MSG(extack,
3426 "Nexthop has invalid gateway or device mismatch");
3427 err = -EINVAL;
3428 }
3429
3430 return err;
3431 }
3432
ip6_route_check_nh(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,netdevice_tracker * dev_tracker,struct inet6_dev ** idev)3433 static int ip6_route_check_nh(struct net *net,
3434 struct fib6_config *cfg,
3435 struct net_device **_dev,
3436 netdevice_tracker *dev_tracker,
3437 struct inet6_dev **idev)
3438 {
3439 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3440 struct net_device *dev = _dev ? *_dev : NULL;
3441 int flags = RT6_LOOKUP_F_IFACE;
3442 struct fib6_result res = {};
3443 int err = -EHOSTUNREACH;
3444
3445 if (cfg->fc_table) {
3446 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3447 cfg->fc_table, flags, &res);
3448 /* gw_addr can not require a gateway or resolve to a reject
3449 * route. If a device is given, it must match the result.
3450 */
3451 if (err || res.fib6_flags & RTF_REJECT ||
3452 res.nh->fib_nh_gw_family ||
3453 (dev && dev != res.nh->fib_nh_dev))
3454 err = -EHOSTUNREACH;
3455 }
3456
3457 if (err < 0) {
3458 struct flowi6 fl6 = {
3459 .flowi6_oif = cfg->fc_ifindex,
3460 .daddr = *gw_addr,
3461 };
3462
3463 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3464 if (err || res.fib6_flags & RTF_REJECT ||
3465 res.nh->fib_nh_gw_family)
3466 err = -EHOSTUNREACH;
3467
3468 if (err)
3469 return err;
3470
3471 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3472 cfg->fc_ifindex != 0, NULL, flags);
3473 }
3474
3475 err = 0;
3476 if (dev) {
3477 if (dev != res.nh->fib_nh_dev)
3478 err = -EHOSTUNREACH;
3479 } else {
3480 *_dev = dev = res.nh->fib_nh_dev;
3481 netdev_hold(dev, dev_tracker, GFP_ATOMIC);
3482 *idev = in6_dev_get(dev);
3483 }
3484
3485 return err;
3486 }
3487
ip6_validate_gw(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,netdevice_tracker * dev_tracker,struct inet6_dev ** idev,struct netlink_ext_ack * extack)3488 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3489 struct net_device **_dev,
3490 netdevice_tracker *dev_tracker,
3491 struct inet6_dev **idev,
3492 struct netlink_ext_ack *extack)
3493 {
3494 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3495 int gwa_type = ipv6_addr_type(gw_addr);
3496 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3497 const struct net_device *dev = *_dev;
3498 bool need_addr_check = !dev;
3499 int err = -EINVAL;
3500
3501 /* if gw_addr is local we will fail to detect this in case
3502 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3503 * will return already-added prefix route via interface that
3504 * prefix route was assigned to, which might be non-loopback.
3505 */
3506 if (dev &&
3507 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3508 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3509 goto out;
3510 }
3511
3512 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3513 /* IPv6 strictly inhibits using not link-local
3514 * addresses as nexthop address.
3515 * Otherwise, router will not able to send redirects.
3516 * It is very good, but in some (rare!) circumstances
3517 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3518 * some exceptions. --ANK
3519 * We allow IPv4-mapped nexthops to support RFC4798-type
3520 * addressing
3521 */
3522 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3523 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3524 goto out;
3525 }
3526
3527 rcu_read_lock();
3528
3529 if (cfg->fc_flags & RTNH_F_ONLINK)
3530 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3531 else
3532 err = ip6_route_check_nh(net, cfg, _dev, dev_tracker,
3533 idev);
3534
3535 rcu_read_unlock();
3536
3537 if (err)
3538 goto out;
3539 }
3540
3541 /* reload in case device was changed */
3542 dev = *_dev;
3543
3544 err = -EINVAL;
3545 if (!dev) {
3546 NL_SET_ERR_MSG(extack, "Egress device not specified");
3547 goto out;
3548 } else if (dev->flags & IFF_LOOPBACK) {
3549 NL_SET_ERR_MSG(extack,
3550 "Egress device can not be loopback device for this route");
3551 goto out;
3552 }
3553
3554 /* if we did not check gw_addr above, do so now that the
3555 * egress device has been resolved.
3556 */
3557 if (need_addr_check &&
3558 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3559 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3560 goto out;
3561 }
3562
3563 err = 0;
3564 out:
3565 return err;
3566 }
3567
fib6_is_reject(u32 flags,struct net_device * dev,int addr_type)3568 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3569 {
3570 if ((flags & RTF_REJECT) ||
3571 (dev && (dev->flags & IFF_LOOPBACK) &&
3572 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3573 !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3574 return true;
3575
3576 return false;
3577 }
3578
fib6_nh_init(struct net * net,struct fib6_nh * fib6_nh,struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3579 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3580 struct fib6_config *cfg, gfp_t gfp_flags,
3581 struct netlink_ext_ack *extack)
3582 {
3583 netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
3584 struct net_device *dev = NULL;
3585 struct inet6_dev *idev = NULL;
3586 int addr_type;
3587 int err;
3588
3589 fib6_nh->fib_nh_family = AF_INET6;
3590 #ifdef CONFIG_IPV6_ROUTER_PREF
3591 fib6_nh->last_probe = jiffies;
3592 #endif
3593 if (cfg->fc_is_fdb) {
3594 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3595 fib6_nh->fib_nh_gw_family = AF_INET6;
3596 return 0;
3597 }
3598
3599 err = -ENODEV;
3600 if (cfg->fc_ifindex) {
3601 dev = netdev_get_by_index(net, cfg->fc_ifindex,
3602 dev_tracker, gfp_flags);
3603 if (!dev)
3604 goto out;
3605 idev = in6_dev_get(dev);
3606 if (!idev)
3607 goto out;
3608 }
3609
3610 if (cfg->fc_flags & RTNH_F_ONLINK) {
3611 if (!dev) {
3612 NL_SET_ERR_MSG(extack,
3613 "Nexthop device required for onlink");
3614 goto out;
3615 }
3616
3617 if (!(dev->flags & IFF_UP)) {
3618 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3619 err = -ENETDOWN;
3620 goto out;
3621 }
3622
3623 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3624 }
3625
3626 fib6_nh->fib_nh_weight = 1;
3627
3628 /* We cannot add true routes via loopback here,
3629 * they would result in kernel looping; promote them to reject routes
3630 */
3631 addr_type = ipv6_addr_type(&cfg->fc_dst);
3632 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3633 /* hold loopback dev/idev if we haven't done so. */
3634 if (dev != net->loopback_dev) {
3635 if (dev) {
3636 netdev_put(dev, dev_tracker);
3637 in6_dev_put(idev);
3638 }
3639 dev = net->loopback_dev;
3640 netdev_hold(dev, dev_tracker, gfp_flags);
3641 idev = in6_dev_get(dev);
3642 if (!idev) {
3643 err = -ENODEV;
3644 goto out;
3645 }
3646 }
3647 goto pcpu_alloc;
3648 }
3649
3650 if (cfg->fc_flags & RTF_GATEWAY) {
3651 err = ip6_validate_gw(net, cfg, &dev, dev_tracker,
3652 &idev, extack);
3653 if (err)
3654 goto out;
3655
3656 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3657 fib6_nh->fib_nh_gw_family = AF_INET6;
3658 }
3659
3660 err = -ENODEV;
3661 if (!dev)
3662 goto out;
3663
3664 if (!idev || idev->cnf.disable_ipv6) {
3665 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3666 err = -EACCES;
3667 goto out;
3668 }
3669
3670 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3671 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3672 err = -ENETDOWN;
3673 goto out;
3674 }
3675
3676 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3677 !netif_carrier_ok(dev))
3678 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3679
3680 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3681 cfg->fc_encap_type, cfg, gfp_flags, extack);
3682 if (err)
3683 goto out;
3684
3685 pcpu_alloc:
3686 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3687 if (!fib6_nh->rt6i_pcpu) {
3688 err = -ENOMEM;
3689 goto out;
3690 }
3691
3692 fib6_nh->fib_nh_dev = dev;
3693 fib6_nh->fib_nh_oif = dev->ifindex;
3694 err = 0;
3695 out:
3696 if (idev)
3697 in6_dev_put(idev);
3698
3699 if (err) {
3700 fib_nh_common_release(&fib6_nh->nh_common);
3701 fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
3702 fib6_nh->fib_nh_lws = NULL;
3703 netdev_put(dev, dev_tracker);
3704 }
3705
3706 return err;
3707 }
3708
fib6_nh_release(struct fib6_nh * fib6_nh)3709 void fib6_nh_release(struct fib6_nh *fib6_nh)
3710 {
3711 struct rt6_exception_bucket *bucket;
3712
3713 rcu_read_lock();
3714
3715 fib6_nh_flush_exceptions(fib6_nh, NULL);
3716 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3717 if (bucket) {
3718 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3719 kfree(bucket);
3720 }
3721
3722 rcu_read_unlock();
3723
3724 fib6_nh_release_dsts(fib6_nh);
3725 free_percpu(fib6_nh->rt6i_pcpu);
3726
3727 fib_nh_common_release(&fib6_nh->nh_common);
3728 }
3729
fib6_nh_release_dsts(struct fib6_nh * fib6_nh)3730 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3731 {
3732 int cpu;
3733
3734 if (!fib6_nh->rt6i_pcpu)
3735 return;
3736
3737 for_each_possible_cpu(cpu) {
3738 struct rt6_info *pcpu_rt, **ppcpu_rt;
3739
3740 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3741 pcpu_rt = xchg(ppcpu_rt, NULL);
3742 if (pcpu_rt) {
3743 dst_dev_put(&pcpu_rt->dst);
3744 dst_release(&pcpu_rt->dst);
3745 }
3746 }
3747 }
3748
fib6_config_validate(struct fib6_config * cfg,struct netlink_ext_ack * extack)3749 static int fib6_config_validate(struct fib6_config *cfg,
3750 struct netlink_ext_ack *extack)
3751 {
3752 /* RTF_PCPU is an internal flag; can not be set by userspace */
3753 if (cfg->fc_flags & RTF_PCPU) {
3754 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3755 goto errout;
3756 }
3757
3758 /* RTF_CACHE is an internal flag; can not be set by userspace */
3759 if (cfg->fc_flags & RTF_CACHE) {
3760 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3761 goto errout;
3762 }
3763
3764 if (cfg->fc_type > RTN_MAX) {
3765 NL_SET_ERR_MSG(extack, "Invalid route type");
3766 goto errout;
3767 }
3768
3769 if (cfg->fc_dst_len > 128) {
3770 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3771 goto errout;
3772 }
3773
3774 #ifdef CONFIG_IPV6_SUBTREES
3775 if (cfg->fc_src_len > 128) {
3776 NL_SET_ERR_MSG(extack, "Invalid source address length");
3777 goto errout;
3778 }
3779
3780 if (cfg->fc_nh_id && cfg->fc_src_len) {
3781 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3782 goto errout;
3783 }
3784 #else
3785 if (cfg->fc_src_len) {
3786 NL_SET_ERR_MSG(extack,
3787 "Specifying source address requires IPV6_SUBTREES to be enabled");
3788 goto errout;
3789 }
3790 #endif
3791 return 0;
3792 errout:
3793 return -EINVAL;
3794 }
3795
ip6_route_info_create(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3796 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3797 gfp_t gfp_flags,
3798 struct netlink_ext_ack *extack)
3799 {
3800 struct net *net = cfg->fc_nlinfo.nl_net;
3801 struct fib6_table *table;
3802 struct fib6_info *rt;
3803 int err;
3804
3805 if (cfg->fc_nlinfo.nlh &&
3806 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3807 table = fib6_get_table(net, cfg->fc_table);
3808 if (!table) {
3809 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3810 table = fib6_new_table(net, cfg->fc_table);
3811 }
3812 } else {
3813 table = fib6_new_table(net, cfg->fc_table);
3814 }
3815 if (!table) {
3816 err = -ENOBUFS;
3817 goto err;
3818 }
3819
3820 rt = fib6_info_alloc(gfp_flags, !cfg->fc_nh_id);
3821 if (!rt) {
3822 err = -ENOMEM;
3823 goto err;
3824 }
3825
3826 rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
3827 extack);
3828 if (IS_ERR(rt->fib6_metrics)) {
3829 err = PTR_ERR(rt->fib6_metrics);
3830 goto free;
3831 }
3832
3833 if (cfg->fc_flags & RTF_ADDRCONF)
3834 rt->dst_nocount = true;
3835
3836 if (cfg->fc_flags & RTF_EXPIRES)
3837 fib6_set_expires(rt, jiffies +
3838 clock_t_to_jiffies(cfg->fc_expires));
3839
3840 if (cfg->fc_protocol == RTPROT_UNSPEC)
3841 cfg->fc_protocol = RTPROT_BOOT;
3842
3843 rt->fib6_protocol = cfg->fc_protocol;
3844 rt->fib6_table = table;
3845 rt->fib6_metric = cfg->fc_metric;
3846 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3847 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3848
3849 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3850 rt->fib6_dst.plen = cfg->fc_dst_len;
3851
3852 #ifdef CONFIG_IPV6_SUBTREES
3853 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3854 rt->fib6_src.plen = cfg->fc_src_len;
3855 #endif
3856 return rt;
3857 free:
3858 kfree(rt);
3859 err:
3860 return ERR_PTR(err);
3861 }
3862
ip6_route_info_create_nh(struct fib6_info * rt,struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3863 static int ip6_route_info_create_nh(struct fib6_info *rt,
3864 struct fib6_config *cfg,
3865 gfp_t gfp_flags,
3866 struct netlink_ext_ack *extack)
3867 {
3868 struct net *net = cfg->fc_nlinfo.nl_net;
3869 struct fib6_nh *fib6_nh;
3870 int err;
3871
3872 if (cfg->fc_nh_id) {
3873 struct nexthop *nh;
3874
3875 rcu_read_lock();
3876
3877 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3878 if (!nh) {
3879 err = -EINVAL;
3880 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3881 goto out_free;
3882 }
3883
3884 err = fib6_check_nexthop(nh, cfg, extack);
3885 if (err)
3886 goto out_free;
3887
3888 if (!nexthop_get(nh)) {
3889 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3890 err = -ENOENT;
3891 goto out_free;
3892 }
3893
3894 rt->nh = nh;
3895 fib6_nh = nexthop_fib6_nh(rt->nh);
3896
3897 rcu_read_unlock();
3898 } else {
3899 int addr_type;
3900
3901 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3902 if (err)
3903 goto out_release;
3904
3905 fib6_nh = rt->fib6_nh;
3906
3907 /* We cannot add true routes via loopback here, they would
3908 * result in kernel looping; promote them to reject routes
3909 */
3910 addr_type = ipv6_addr_type(&cfg->fc_dst);
3911 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3912 addr_type))
3913 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3914 }
3915
3916 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3917 struct net_device *dev = fib6_nh->fib_nh_dev;
3918
3919 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3920 NL_SET_ERR_MSG(extack, "Invalid source address");
3921 err = -EINVAL;
3922 goto out_release;
3923 }
3924 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3925 rt->fib6_prefsrc.plen = 128;
3926 }
3927
3928 return 0;
3929 out_release:
3930 fib6_info_release(rt);
3931 return err;
3932 out_free:
3933 rcu_read_unlock();
3934 ip_fib_metrics_put(rt->fib6_metrics);
3935 kfree(rt);
3936 return err;
3937 }
3938
ip6_route_add(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3939 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3940 struct netlink_ext_ack *extack)
3941 {
3942 struct fib6_info *rt;
3943 int err;
3944
3945 err = fib6_config_validate(cfg, extack);
3946 if (err)
3947 return err;
3948
3949 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3950 if (IS_ERR(rt))
3951 return PTR_ERR(rt);
3952
3953 err = ip6_route_info_create_nh(rt, cfg, gfp_flags, extack);
3954 if (err)
3955 return err;
3956
3957 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3958 fib6_info_release(rt);
3959
3960 return err;
3961 }
3962
__ip6_del_rt(struct fib6_info * rt,struct nl_info * info)3963 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3964 {
3965 struct net *net = info->nl_net;
3966 struct fib6_table *table;
3967 int err;
3968
3969 if (rt == net->ipv6.fib6_null_entry) {
3970 err = -ENOENT;
3971 goto out;
3972 }
3973
3974 table = rt->fib6_table;
3975 spin_lock_bh(&table->tb6_lock);
3976 err = fib6_del(rt, info);
3977 spin_unlock_bh(&table->tb6_lock);
3978
3979 out:
3980 fib6_info_release(rt);
3981 return err;
3982 }
3983
ip6_del_rt(struct net * net,struct fib6_info * rt,bool skip_notify)3984 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3985 {
3986 struct nl_info info = {
3987 .nl_net = net,
3988 .skip_notify = skip_notify
3989 };
3990
3991 return __ip6_del_rt(rt, &info);
3992 }
3993
__ip6_del_rt_siblings(struct fib6_info * rt,struct fib6_config * cfg)3994 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3995 {
3996 struct nl_info *info = &cfg->fc_nlinfo;
3997 struct net *net = info->nl_net;
3998 struct sk_buff *skb = NULL;
3999 struct fib6_table *table;
4000 int err = -ENOENT;
4001
4002 if (rt == net->ipv6.fib6_null_entry)
4003 goto out_put;
4004 table = rt->fib6_table;
4005 spin_lock_bh(&table->tb6_lock);
4006
4007 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
4008 struct fib6_info *sibling, *next_sibling;
4009 struct fib6_node *fn;
4010
4011 /* prefer to send a single notification with all hops */
4012 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4013 if (skb) {
4014 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4015
4016 if (rt6_fill_node(net, skb, rt, NULL,
4017 NULL, NULL, 0, RTM_DELROUTE,
4018 info->portid, seq, 0) < 0) {
4019 kfree_skb(skb);
4020 skb = NULL;
4021 } else
4022 info->skip_notify = 1;
4023 }
4024
4025 /* 'rt' points to the first sibling route. If it is not the
4026 * leaf, then we do not need to send a notification. Otherwise,
4027 * we need to check if the last sibling has a next route or not
4028 * and emit a replace or delete notification, respectively.
4029 */
4030 info->skip_notify_kernel = 1;
4031 fn = rcu_dereference_protected(rt->fib6_node,
4032 lockdep_is_held(&table->tb6_lock));
4033 if (rcu_access_pointer(fn->leaf) == rt) {
4034 struct fib6_info *last_sibling, *replace_rt;
4035
4036 last_sibling = list_last_entry(&rt->fib6_siblings,
4037 struct fib6_info,
4038 fib6_siblings);
4039 replace_rt = rcu_dereference_protected(
4040 last_sibling->fib6_next,
4041 lockdep_is_held(&table->tb6_lock));
4042 if (replace_rt)
4043 call_fib6_entry_notifiers_replace(net,
4044 replace_rt);
4045 else
4046 call_fib6_multipath_entry_notifiers(net,
4047 FIB_EVENT_ENTRY_DEL,
4048 rt, rt->fib6_nsiblings,
4049 NULL);
4050 }
4051 list_for_each_entry_safe(sibling, next_sibling,
4052 &rt->fib6_siblings,
4053 fib6_siblings) {
4054 err = fib6_del(sibling, info);
4055 if (err)
4056 goto out_unlock;
4057 }
4058 }
4059
4060 err = fib6_del(rt, info);
4061 out_unlock:
4062 spin_unlock_bh(&table->tb6_lock);
4063 out_put:
4064 fib6_info_release(rt);
4065
4066 if (skb) {
4067 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4068 info->nlh, gfp_any());
4069 }
4070 return err;
4071 }
4072
__ip6_del_cached_rt(struct rt6_info * rt,struct fib6_config * cfg)4073 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
4074 {
4075 int rc = -ESRCH;
4076
4077 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
4078 goto out;
4079
4080 if (cfg->fc_flags & RTF_GATEWAY &&
4081 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
4082 goto out;
4083
4084 rc = rt6_remove_exception_rt(rt);
4085 out:
4086 return rc;
4087 }
4088
ip6_del_cached_rt(struct fib6_config * cfg,struct fib6_info * rt,struct fib6_nh * nh)4089 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
4090 struct fib6_nh *nh)
4091 {
4092 struct fib6_result res = {
4093 .f6i = rt,
4094 .nh = nh,
4095 };
4096 struct rt6_info *rt_cache;
4097
4098 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4099 if (rt_cache)
4100 return __ip6_del_cached_rt(rt_cache, cfg);
4101
4102 return 0;
4103 }
4104
4105 struct fib6_nh_del_cached_rt_arg {
4106 struct fib6_config *cfg;
4107 struct fib6_info *f6i;
4108 };
4109
fib6_nh_del_cached_rt(struct fib6_nh * nh,void * _arg)4110 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4111 {
4112 struct fib6_nh_del_cached_rt_arg *arg = _arg;
4113 int rc;
4114
4115 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4116 return rc != -ESRCH ? rc : 0;
4117 }
4118
ip6_del_cached_rt_nh(struct fib6_config * cfg,struct fib6_info * f6i)4119 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4120 {
4121 struct fib6_nh_del_cached_rt_arg arg = {
4122 .cfg = cfg,
4123 .f6i = f6i
4124 };
4125
4126 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4127 }
4128
ip6_route_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)4129 static int ip6_route_del(struct fib6_config *cfg,
4130 struct netlink_ext_ack *extack)
4131 {
4132 struct fib6_table *table;
4133 struct fib6_info *rt;
4134 struct fib6_node *fn;
4135 int err = -ESRCH;
4136
4137 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4138 if (!table) {
4139 NL_SET_ERR_MSG(extack, "FIB table does not exist");
4140 return err;
4141 }
4142
4143 rcu_read_lock();
4144
4145 fn = fib6_locate(&table->tb6_root,
4146 &cfg->fc_dst, cfg->fc_dst_len,
4147 &cfg->fc_src, cfg->fc_src_len,
4148 !(cfg->fc_flags & RTF_CACHE));
4149
4150 if (fn) {
4151 for_each_fib6_node_rt_rcu(fn) {
4152 struct fib6_nh *nh;
4153
4154 if (rt->nh && cfg->fc_nh_id &&
4155 rt->nh->id != cfg->fc_nh_id)
4156 continue;
4157
4158 if (cfg->fc_flags & RTF_CACHE) {
4159 int rc = 0;
4160
4161 if (rt->nh) {
4162 rc = ip6_del_cached_rt_nh(cfg, rt);
4163 } else if (cfg->fc_nh_id) {
4164 continue;
4165 } else {
4166 nh = rt->fib6_nh;
4167 rc = ip6_del_cached_rt(cfg, rt, nh);
4168 }
4169 if (rc != -ESRCH) {
4170 rcu_read_unlock();
4171 return rc;
4172 }
4173 continue;
4174 }
4175
4176 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4177 continue;
4178 if (cfg->fc_protocol &&
4179 cfg->fc_protocol != rt->fib6_protocol)
4180 continue;
4181
4182 if (rt->nh) {
4183 if (!fib6_info_hold_safe(rt))
4184 continue;
4185
4186 err = __ip6_del_rt(rt, &cfg->fc_nlinfo);
4187 break;
4188 }
4189 if (cfg->fc_nh_id)
4190 continue;
4191
4192 nh = rt->fib6_nh;
4193 if (cfg->fc_ifindex &&
4194 (!nh->fib_nh_dev ||
4195 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4196 continue;
4197 if (cfg->fc_flags & RTF_GATEWAY &&
4198 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4199 continue;
4200 if (!fib6_info_hold_safe(rt))
4201 continue;
4202
4203 /* if gateway was specified only delete the one hop */
4204 if (cfg->fc_flags & RTF_GATEWAY)
4205 err = __ip6_del_rt(rt, &cfg->fc_nlinfo);
4206 else
4207 err = __ip6_del_rt_siblings(rt, cfg);
4208 break;
4209 }
4210 }
4211 rcu_read_unlock();
4212
4213 return err;
4214 }
4215
rt6_do_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)4216 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4217 {
4218 struct netevent_redirect netevent;
4219 struct rt6_info *rt, *nrt = NULL;
4220 struct fib6_result res = {};
4221 struct ndisc_options ndopts;
4222 struct inet6_dev *in6_dev;
4223 struct neighbour *neigh;
4224 struct rd_msg *msg;
4225 int optlen, on_link;
4226 u8 *lladdr;
4227
4228 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4229 optlen -= sizeof(*msg);
4230
4231 if (optlen < 0) {
4232 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4233 return;
4234 }
4235
4236 msg = (struct rd_msg *)icmp6_hdr(skb);
4237
4238 if (ipv6_addr_is_multicast(&msg->dest)) {
4239 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4240 return;
4241 }
4242
4243 on_link = 0;
4244 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4245 on_link = 1;
4246 } else if (ipv6_addr_type(&msg->target) !=
4247 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4248 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4249 return;
4250 }
4251
4252 in6_dev = __in6_dev_get(skb->dev);
4253 if (!in6_dev)
4254 return;
4255 if (READ_ONCE(in6_dev->cnf.forwarding) ||
4256 !READ_ONCE(in6_dev->cnf.accept_redirects))
4257 return;
4258
4259 /* RFC2461 8.1:
4260 * The IP source address of the Redirect MUST be the same as the current
4261 * first-hop router for the specified ICMP Destination Address.
4262 */
4263
4264 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4265 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4266 return;
4267 }
4268
4269 lladdr = NULL;
4270 if (ndopts.nd_opts_tgt_lladdr) {
4271 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4272 skb->dev);
4273 if (!lladdr) {
4274 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4275 return;
4276 }
4277 }
4278
4279 rt = dst_rt6_info(dst);
4280 if (rt->rt6i_flags & RTF_REJECT) {
4281 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4282 return;
4283 }
4284
4285 /* Redirect received -> path was valid.
4286 * Look, redirects are sent only in response to data packets,
4287 * so that this nexthop apparently is reachable. --ANK
4288 */
4289 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4290
4291 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4292 if (!neigh)
4293 return;
4294
4295 /*
4296 * We have finally decided to accept it.
4297 */
4298
4299 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4300 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4301 NEIGH_UPDATE_F_OVERRIDE|
4302 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4303 NEIGH_UPDATE_F_ISROUTER)),
4304 NDISC_REDIRECT, &ndopts);
4305
4306 rcu_read_lock();
4307 res.f6i = rcu_dereference(rt->from);
4308 if (!res.f6i)
4309 goto out;
4310
4311 if (res.f6i->nh) {
4312 struct fib6_nh_match_arg arg = {
4313 .dev = dst_dev_rcu(dst),
4314 .gw = &rt->rt6i_gateway,
4315 };
4316
4317 nexthop_for_each_fib6_nh(res.f6i->nh,
4318 fib6_nh_find_match, &arg);
4319
4320 /* fib6_info uses a nexthop that does not have fib6_nh
4321 * using the dst->dev. Should be impossible
4322 */
4323 if (!arg.match)
4324 goto out;
4325 res.nh = arg.match;
4326 } else {
4327 res.nh = res.f6i->fib6_nh;
4328 }
4329
4330 res.fib6_flags = res.f6i->fib6_flags;
4331 res.fib6_type = res.f6i->fib6_type;
4332 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4333 if (!nrt)
4334 goto out;
4335
4336 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4337 if (on_link)
4338 nrt->rt6i_flags &= ~RTF_GATEWAY;
4339
4340 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4341
4342 /* rt6_insert_exception() will take care of duplicated exceptions */
4343 if (rt6_insert_exception(nrt, &res)) {
4344 dst_release_immediate(&nrt->dst);
4345 goto out;
4346 }
4347
4348 netevent.old = &rt->dst;
4349 netevent.new = &nrt->dst;
4350 netevent.daddr = &msg->dest;
4351 netevent.neigh = neigh;
4352 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4353
4354 out:
4355 rcu_read_unlock();
4356 neigh_release(neigh);
4357 }
4358
4359 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_get_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev)4360 static struct fib6_info *rt6_get_route_info(struct net *net,
4361 const struct in6_addr *prefix, int prefixlen,
4362 const struct in6_addr *gwaddr,
4363 struct net_device *dev)
4364 {
4365 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4366 int ifindex = dev->ifindex;
4367 struct fib6_node *fn;
4368 struct fib6_info *rt = NULL;
4369 struct fib6_table *table;
4370
4371 table = fib6_get_table(net, tb_id);
4372 if (!table)
4373 return NULL;
4374
4375 rcu_read_lock();
4376 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4377 if (!fn)
4378 goto out;
4379
4380 for_each_fib6_node_rt_rcu(fn) {
4381 /* these routes do not use nexthops */
4382 if (rt->nh)
4383 continue;
4384 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4385 continue;
4386 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4387 !rt->fib6_nh->fib_nh_gw_family)
4388 continue;
4389 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4390 continue;
4391 if (!fib6_info_hold_safe(rt))
4392 continue;
4393 break;
4394 }
4395 out:
4396 rcu_read_unlock();
4397 return rt;
4398 }
4399
rt6_add_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref)4400 static struct fib6_info *rt6_add_route_info(struct net *net,
4401 const struct in6_addr *prefix, int prefixlen,
4402 const struct in6_addr *gwaddr,
4403 struct net_device *dev,
4404 unsigned int pref)
4405 {
4406 struct fib6_config cfg = {
4407 .fc_metric = IP6_RT_PRIO_USER,
4408 .fc_ifindex = dev->ifindex,
4409 .fc_dst_len = prefixlen,
4410 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4411 RTF_UP | RTF_PREF(pref),
4412 .fc_protocol = RTPROT_RA,
4413 .fc_type = RTN_UNICAST,
4414 .fc_nlinfo.portid = 0,
4415 .fc_nlinfo.nlh = NULL,
4416 .fc_nlinfo.nl_net = net,
4417 };
4418
4419 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4420 cfg.fc_dst = *prefix;
4421 cfg.fc_gateway = *gwaddr;
4422
4423 /* We should treat it as a default route if prefix length is 0. */
4424 if (!prefixlen)
4425 cfg.fc_flags |= RTF_DEFAULT;
4426
4427 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4428
4429 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4430 }
4431 #endif
4432
rt6_get_dflt_router(struct net * net,const struct in6_addr * addr,struct net_device * dev)4433 struct fib6_info *rt6_get_dflt_router(struct net *net,
4434 const struct in6_addr *addr,
4435 struct net_device *dev)
4436 {
4437 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4438 struct fib6_info *rt;
4439 struct fib6_table *table;
4440
4441 table = fib6_get_table(net, tb_id);
4442 if (!table)
4443 return NULL;
4444
4445 rcu_read_lock();
4446 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4447 struct fib6_nh *nh;
4448
4449 /* RA routes do not use nexthops */
4450 if (rt->nh)
4451 continue;
4452
4453 nh = rt->fib6_nh;
4454 if (dev == nh->fib_nh_dev &&
4455 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4456 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4457 break;
4458 }
4459 if (rt && !fib6_info_hold_safe(rt))
4460 rt = NULL;
4461 rcu_read_unlock();
4462 return rt;
4463 }
4464
rt6_add_dflt_router(struct net * net,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref,u32 defrtr_usr_metric,int lifetime)4465 struct fib6_info *rt6_add_dflt_router(struct net *net,
4466 const struct in6_addr *gwaddr,
4467 struct net_device *dev,
4468 unsigned int pref,
4469 u32 defrtr_usr_metric,
4470 int lifetime)
4471 {
4472 struct fib6_config cfg = {
4473 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4474 .fc_metric = defrtr_usr_metric,
4475 .fc_ifindex = dev->ifindex,
4476 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4477 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4478 .fc_protocol = RTPROT_RA,
4479 .fc_type = RTN_UNICAST,
4480 .fc_nlinfo.portid = 0,
4481 .fc_nlinfo.nlh = NULL,
4482 .fc_nlinfo.nl_net = net,
4483 .fc_expires = jiffies_to_clock_t(lifetime * HZ),
4484 };
4485
4486 cfg.fc_gateway = *gwaddr;
4487
4488 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4489 struct fib6_table *table;
4490
4491 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4492 if (table)
4493 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4494 }
4495
4496 return rt6_get_dflt_router(net, gwaddr, dev);
4497 }
4498
__rt6_purge_dflt_routers(struct net * net,struct fib6_table * table)4499 static void __rt6_purge_dflt_routers(struct net *net,
4500 struct fib6_table *table)
4501 {
4502 struct fib6_info *rt;
4503
4504 restart:
4505 rcu_read_lock();
4506 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4507 struct net_device *dev = fib6_info_nh_dev(rt);
4508 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4509
4510 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4511 (!idev || idev->cnf.accept_ra != 2) &&
4512 fib6_info_hold_safe(rt)) {
4513 rcu_read_unlock();
4514 ip6_del_rt(net, rt, false);
4515 goto restart;
4516 }
4517 }
4518 rcu_read_unlock();
4519
4520 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4521 }
4522
rt6_purge_dflt_routers(struct net * net)4523 void rt6_purge_dflt_routers(struct net *net)
4524 {
4525 struct fib6_table *table;
4526 struct hlist_head *head;
4527 unsigned int h;
4528
4529 rcu_read_lock();
4530
4531 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4532 head = &net->ipv6.fib_table_hash[h];
4533 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4534 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4535 __rt6_purge_dflt_routers(net, table);
4536 }
4537 }
4538
4539 rcu_read_unlock();
4540 }
4541
rtmsg_to_fib6_config(struct net * net,struct in6_rtmsg * rtmsg,struct fib6_config * cfg)4542 static void rtmsg_to_fib6_config(struct net *net,
4543 struct in6_rtmsg *rtmsg,
4544 struct fib6_config *cfg)
4545 {
4546 *cfg = (struct fib6_config){
4547 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4548 : RT6_TABLE_MAIN,
4549 .fc_ifindex = rtmsg->rtmsg_ifindex,
4550 .fc_metric = rtmsg->rtmsg_metric,
4551 .fc_expires = rtmsg->rtmsg_info,
4552 .fc_dst_len = rtmsg->rtmsg_dst_len,
4553 .fc_src_len = rtmsg->rtmsg_src_len,
4554 .fc_flags = rtmsg->rtmsg_flags,
4555 .fc_type = rtmsg->rtmsg_type,
4556
4557 .fc_nlinfo.nl_net = net,
4558
4559 .fc_dst = rtmsg->rtmsg_dst,
4560 .fc_src = rtmsg->rtmsg_src,
4561 .fc_gateway = rtmsg->rtmsg_gateway,
4562 };
4563 }
4564
ipv6_route_ioctl(struct net * net,unsigned int cmd,struct in6_rtmsg * rtmsg)4565 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4566 {
4567 struct fib6_config cfg;
4568 int err;
4569
4570 if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4571 return -EINVAL;
4572 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4573 return -EPERM;
4574
4575 rtmsg_to_fib6_config(net, rtmsg, &cfg);
4576
4577 switch (cmd) {
4578 case SIOCADDRT:
4579 /* Only do the default setting of fc_metric in route adding */
4580 if (cfg.fc_metric == 0)
4581 cfg.fc_metric = IP6_RT_PRIO_USER;
4582 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4583 break;
4584 case SIOCDELRT:
4585 err = ip6_route_del(&cfg, NULL);
4586 break;
4587 }
4588
4589 return err;
4590 }
4591
4592 /*
4593 * Drop the packet on the floor
4594 */
4595
ip6_pkt_drop(struct sk_buff * skb,u8 code,int ipstats_mib_noroutes)4596 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4597 {
4598 struct dst_entry *dst = skb_dst(skb);
4599 struct net_device *dev = dst_dev(dst);
4600 struct net *net = dev_net(dev);
4601 struct inet6_dev *idev;
4602 SKB_DR(reason);
4603 int type;
4604
4605 if (netif_is_l3_master(skb->dev) ||
4606 dev == net->loopback_dev)
4607 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4608 else
4609 idev = ip6_dst_idev(dst);
4610
4611 switch (ipstats_mib_noroutes) {
4612 case IPSTATS_MIB_INNOROUTES:
4613 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4614 if (type == IPV6_ADDR_ANY) {
4615 SKB_DR_SET(reason, IP_INADDRERRORS);
4616 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4617 break;
4618 }
4619 SKB_DR_SET(reason, IP_INNOROUTES);
4620 fallthrough;
4621 case IPSTATS_MIB_OUTNOROUTES:
4622 SKB_DR_OR(reason, IP_OUTNOROUTES);
4623 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4624 break;
4625 }
4626
4627 /* Start over by dropping the dst for l3mdev case */
4628 if (netif_is_l3_master(skb->dev))
4629 skb_dst_drop(skb);
4630
4631 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4632 kfree_skb_reason(skb, reason);
4633 return 0;
4634 }
4635
ip6_pkt_discard(struct sk_buff * skb)4636 static int ip6_pkt_discard(struct sk_buff *skb)
4637 {
4638 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4639 }
4640
ip6_pkt_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)4641 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4642 {
4643 skb->dev = skb_dst_dev(skb);
4644 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4645 }
4646
ip6_pkt_prohibit(struct sk_buff * skb)4647 static int ip6_pkt_prohibit(struct sk_buff *skb)
4648 {
4649 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4650 }
4651
ip6_pkt_prohibit_out(struct net * net,struct sock * sk,struct sk_buff * skb)4652 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4653 {
4654 skb->dev = skb_dst_dev(skb);
4655 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4656 }
4657
4658 /*
4659 * Allocate a dst for local (unicast / anycast) address.
4660 */
4661
addrconf_f6i_alloc(struct net * net,struct inet6_dev * idev,const struct in6_addr * addr,bool anycast,gfp_t gfp_flags,struct netlink_ext_ack * extack)4662 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4663 struct inet6_dev *idev,
4664 const struct in6_addr *addr,
4665 bool anycast, gfp_t gfp_flags,
4666 struct netlink_ext_ack *extack)
4667 {
4668 struct fib6_config cfg = {
4669 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4670 .fc_ifindex = idev->dev->ifindex,
4671 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4672 .fc_dst = *addr,
4673 .fc_dst_len = 128,
4674 .fc_protocol = RTPROT_KERNEL,
4675 .fc_nlinfo.nl_net = net,
4676 .fc_ignore_dev_down = true,
4677 };
4678 struct fib6_info *f6i;
4679 int err;
4680
4681 if (anycast) {
4682 cfg.fc_type = RTN_ANYCAST;
4683 cfg.fc_flags |= RTF_ANYCAST;
4684 } else {
4685 cfg.fc_type = RTN_LOCAL;
4686 cfg.fc_flags |= RTF_LOCAL;
4687 }
4688
4689 f6i = ip6_route_info_create(&cfg, gfp_flags, extack);
4690 if (IS_ERR(f6i))
4691 return f6i;
4692
4693 err = ip6_route_info_create_nh(f6i, &cfg, gfp_flags, extack);
4694 if (err)
4695 return ERR_PTR(err);
4696
4697 f6i->dst_nocount = true;
4698
4699 if (!anycast &&
4700 (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
4701 READ_ONCE(idev->cnf.disable_policy)))
4702 f6i->dst_nopolicy = true;
4703
4704 return f6i;
4705 }
4706
4707 /* remove deleted ip from prefsrc entries */
4708 struct arg_dev_net_ip {
4709 struct net *net;
4710 struct in6_addr *addr;
4711 };
4712
fib6_remove_prefsrc(struct fib6_info * rt,void * arg)4713 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4714 {
4715 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4716 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4717
4718 if (!rt->nh &&
4719 rt != net->ipv6.fib6_null_entry &&
4720 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) &&
4721 !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) {
4722 spin_lock_bh(&rt6_exception_lock);
4723 /* remove prefsrc entry */
4724 rt->fib6_prefsrc.plen = 0;
4725 spin_unlock_bh(&rt6_exception_lock);
4726 }
4727 return 0;
4728 }
4729
rt6_remove_prefsrc(struct inet6_ifaddr * ifp)4730 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4731 {
4732 struct net *net = dev_net(ifp->idev->dev);
4733 struct arg_dev_net_ip adni = {
4734 .net = net,
4735 .addr = &ifp->addr,
4736 };
4737 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4738 }
4739
4740 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4741
4742 /* Remove routers and update dst entries when gateway turn into host. */
fib6_clean_tohost(struct fib6_info * rt,void * arg)4743 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4744 {
4745 struct in6_addr *gateway = (struct in6_addr *)arg;
4746 struct fib6_nh *nh;
4747
4748 /* RA routes do not use nexthops */
4749 if (rt->nh)
4750 return 0;
4751
4752 nh = rt->fib6_nh;
4753 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4754 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4755 return -1;
4756
4757 /* Further clean up cached routes in exception table.
4758 * This is needed because cached route may have a different
4759 * gateway than its 'parent' in the case of an ip redirect.
4760 */
4761 fib6_nh_exceptions_clean_tohost(nh, gateway);
4762
4763 return 0;
4764 }
4765
rt6_clean_tohost(struct net * net,struct in6_addr * gateway)4766 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4767 {
4768 fib6_clean_all(net, fib6_clean_tohost, gateway);
4769 }
4770
4771 struct arg_netdev_event {
4772 const struct net_device *dev;
4773 union {
4774 unsigned char nh_flags;
4775 unsigned long event;
4776 };
4777 };
4778
rt6_multipath_first_sibling(const struct fib6_info * rt)4779 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4780 {
4781 struct fib6_info *iter;
4782 struct fib6_node *fn;
4783
4784 fn = rcu_dereference_protected(rt->fib6_node,
4785 lockdep_is_held(&rt->fib6_table->tb6_lock));
4786 iter = rcu_dereference_protected(fn->leaf,
4787 lockdep_is_held(&rt->fib6_table->tb6_lock));
4788 while (iter) {
4789 if (iter->fib6_metric == rt->fib6_metric &&
4790 rt6_qualify_for_ecmp(iter))
4791 return iter;
4792 iter = rcu_dereference_protected(iter->fib6_next,
4793 lockdep_is_held(&rt->fib6_table->tb6_lock));
4794 }
4795
4796 return NULL;
4797 }
4798
4799 /* only called for fib entries with builtin fib6_nh */
rt6_is_dead(const struct fib6_info * rt)4800 static bool rt6_is_dead(const struct fib6_info *rt)
4801 {
4802 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4803 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4804 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4805 return true;
4806
4807 return false;
4808 }
4809
rt6_multipath_total_weight(const struct fib6_info * rt)4810 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4811 {
4812 struct fib6_info *iter;
4813 int total = 0;
4814
4815 if (!rt6_is_dead(rt))
4816 total += rt->fib6_nh->fib_nh_weight;
4817
4818 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4819 if (!rt6_is_dead(iter))
4820 total += iter->fib6_nh->fib_nh_weight;
4821 }
4822
4823 return total;
4824 }
4825
rt6_upper_bound_set(struct fib6_info * rt,int * weight,int total)4826 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4827 {
4828 int upper_bound = -1;
4829
4830 if (!rt6_is_dead(rt)) {
4831 *weight += rt->fib6_nh->fib_nh_weight;
4832 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4833 total) - 1;
4834 }
4835 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4836 }
4837
rt6_multipath_upper_bound_set(struct fib6_info * rt,int total)4838 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4839 {
4840 struct fib6_info *iter;
4841 int weight = 0;
4842
4843 rt6_upper_bound_set(rt, &weight, total);
4844
4845 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4846 rt6_upper_bound_set(iter, &weight, total);
4847 }
4848
rt6_multipath_rebalance(struct fib6_info * rt)4849 void rt6_multipath_rebalance(struct fib6_info *rt)
4850 {
4851 struct fib6_info *first;
4852 int total;
4853
4854 /* In case the entire multipath route was marked for flushing,
4855 * then there is no need to rebalance upon the removal of every
4856 * sibling route.
4857 */
4858 if (!rt->fib6_nsiblings || rt->should_flush)
4859 return;
4860
4861 /* During lookup routes are evaluated in order, so we need to
4862 * make sure upper bounds are assigned from the first sibling
4863 * onwards.
4864 */
4865 first = rt6_multipath_first_sibling(rt);
4866 if (WARN_ON_ONCE(!first))
4867 return;
4868
4869 total = rt6_multipath_total_weight(first);
4870 rt6_multipath_upper_bound_set(first, total);
4871 }
4872
fib6_ifup(struct fib6_info * rt,void * p_arg)4873 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4874 {
4875 const struct arg_netdev_event *arg = p_arg;
4876 struct net *net = dev_net(arg->dev);
4877
4878 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4879 rt->fib6_nh->fib_nh_dev == arg->dev) {
4880 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4881 fib6_update_sernum_upto_root(net, rt);
4882 rt6_multipath_rebalance(rt);
4883 }
4884
4885 return 0;
4886 }
4887
rt6_sync_up(struct net_device * dev,unsigned char nh_flags)4888 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4889 {
4890 struct arg_netdev_event arg = {
4891 .dev = dev,
4892 {
4893 .nh_flags = nh_flags,
4894 },
4895 };
4896
4897 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4898 arg.nh_flags |= RTNH_F_LINKDOWN;
4899
4900 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4901 }
4902
4903 /* only called for fib entries with inline fib6_nh */
rt6_multipath_uses_dev(const struct fib6_info * rt,const struct net_device * dev)4904 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4905 const struct net_device *dev)
4906 {
4907 struct fib6_info *iter;
4908
4909 if (rt->fib6_nh->fib_nh_dev == dev)
4910 return true;
4911 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4912 if (iter->fib6_nh->fib_nh_dev == dev)
4913 return true;
4914
4915 return false;
4916 }
4917
rt6_multipath_flush(struct fib6_info * rt)4918 static void rt6_multipath_flush(struct fib6_info *rt)
4919 {
4920 struct fib6_info *iter;
4921
4922 rt->should_flush = 1;
4923 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4924 iter->should_flush = 1;
4925 }
4926
rt6_multipath_dead_count(const struct fib6_info * rt,const struct net_device * down_dev)4927 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4928 const struct net_device *down_dev)
4929 {
4930 struct fib6_info *iter;
4931 unsigned int dead = 0;
4932
4933 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4934 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4935 dead++;
4936 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4937 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4938 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4939 dead++;
4940
4941 return dead;
4942 }
4943
rt6_multipath_nh_flags_set(struct fib6_info * rt,const struct net_device * dev,unsigned char nh_flags)4944 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4945 const struct net_device *dev,
4946 unsigned char nh_flags)
4947 {
4948 struct fib6_info *iter;
4949
4950 if (rt->fib6_nh->fib_nh_dev == dev)
4951 rt->fib6_nh->fib_nh_flags |= nh_flags;
4952 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4953 if (iter->fib6_nh->fib_nh_dev == dev)
4954 iter->fib6_nh->fib_nh_flags |= nh_flags;
4955 }
4956
4957 /* called with write lock held for table with rt */
fib6_ifdown(struct fib6_info * rt,void * p_arg)4958 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4959 {
4960 const struct arg_netdev_event *arg = p_arg;
4961 const struct net_device *dev = arg->dev;
4962 struct net *net = dev_net(dev);
4963
4964 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4965 return 0;
4966
4967 switch (arg->event) {
4968 case NETDEV_UNREGISTER:
4969 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4970 case NETDEV_DOWN:
4971 if (rt->should_flush)
4972 return -1;
4973 if (!rt->fib6_nsiblings)
4974 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4975 if (rt6_multipath_uses_dev(rt, dev)) {
4976 unsigned int count;
4977
4978 count = rt6_multipath_dead_count(rt, dev);
4979 if (rt->fib6_nsiblings + 1 == count) {
4980 rt6_multipath_flush(rt);
4981 return -1;
4982 }
4983 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4984 RTNH_F_LINKDOWN);
4985 fib6_update_sernum(net, rt);
4986 rt6_multipath_rebalance(rt);
4987 }
4988 return -2;
4989 case NETDEV_CHANGE:
4990 if (rt->fib6_nh->fib_nh_dev != dev ||
4991 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4992 break;
4993 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4994 rt6_multipath_rebalance(rt);
4995 break;
4996 }
4997
4998 return 0;
4999 }
5000
rt6_sync_down_dev(struct net_device * dev,unsigned long event)5001 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
5002 {
5003 struct arg_netdev_event arg = {
5004 .dev = dev,
5005 {
5006 .event = event,
5007 },
5008 };
5009 struct net *net = dev_net(dev);
5010
5011 if (net->ipv6.sysctl.skip_notify_on_dev_down)
5012 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
5013 else
5014 fib6_clean_all(net, fib6_ifdown, &arg);
5015 }
5016
rt6_disable_ip(struct net_device * dev,unsigned long event)5017 void rt6_disable_ip(struct net_device *dev, unsigned long event)
5018 {
5019 rt6_sync_down_dev(dev, event);
5020 rt6_uncached_list_flush_dev(dev);
5021 neigh_ifdown(&nd_tbl, dev);
5022 }
5023
5024 struct rt6_mtu_change_arg {
5025 struct net_device *dev;
5026 unsigned int mtu;
5027 struct fib6_info *f6i;
5028 };
5029
fib6_nh_mtu_change(struct fib6_nh * nh,void * _arg)5030 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
5031 {
5032 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
5033 struct fib6_info *f6i = arg->f6i;
5034
5035 /* For administrative MTU increase, there is no way to discover
5036 * IPv6 PMTU increase, so PMTU increase should be updated here.
5037 * Since RFC 1981 doesn't include administrative MTU increase
5038 * update PMTU increase is a MUST. (i.e. jumbo frame)
5039 */
5040 if (nh->fib_nh_dev == arg->dev) {
5041 struct inet6_dev *idev = __in6_dev_get(arg->dev);
5042 u32 mtu = f6i->fib6_pmtu;
5043
5044 if (mtu >= arg->mtu ||
5045 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
5046 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
5047
5048 spin_lock_bh(&rt6_exception_lock);
5049 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
5050 spin_unlock_bh(&rt6_exception_lock);
5051 }
5052
5053 return 0;
5054 }
5055
rt6_mtu_change_route(struct fib6_info * f6i,void * p_arg)5056 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
5057 {
5058 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
5059 struct inet6_dev *idev;
5060
5061 /* In IPv6 pmtu discovery is not optional,
5062 so that RTAX_MTU lock cannot disable it.
5063 We still use this lock to block changes
5064 caused by addrconf/ndisc.
5065 */
5066
5067 idev = __in6_dev_get(arg->dev);
5068 if (!idev)
5069 return 0;
5070
5071 if (fib6_metric_locked(f6i, RTAX_MTU))
5072 return 0;
5073
5074 arg->f6i = f6i;
5075 if (f6i->nh) {
5076 /* fib6_nh_mtu_change only returns 0, so this is safe */
5077 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
5078 arg);
5079 }
5080
5081 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
5082 }
5083
rt6_mtu_change(struct net_device * dev,unsigned int mtu)5084 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
5085 {
5086 struct rt6_mtu_change_arg arg = {
5087 .dev = dev,
5088 .mtu = mtu,
5089 };
5090
5091 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
5092 }
5093
5094 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
5095 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
5096 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
5097 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
5098 [RTA_OIF] = { .type = NLA_U32 },
5099 [RTA_IIF] = { .type = NLA_U32 },
5100 [RTA_PRIORITY] = { .type = NLA_U32 },
5101 [RTA_METRICS] = { .type = NLA_NESTED },
5102 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
5103 [RTA_PREF] = { .type = NLA_U8 },
5104 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
5105 [RTA_ENCAP] = { .type = NLA_NESTED },
5106 [RTA_EXPIRES] = { .type = NLA_U32 },
5107 [RTA_UID] = { .type = NLA_U32 },
5108 [RTA_MARK] = { .type = NLA_U32 },
5109 [RTA_TABLE] = { .type = NLA_U32 },
5110 [RTA_IP_PROTO] = { .type = NLA_U8 },
5111 [RTA_SPORT] = { .type = NLA_U16 },
5112 [RTA_DPORT] = { .type = NLA_U16 },
5113 [RTA_NH_ID] = { .type = NLA_U32 },
5114 [RTA_FLOWLABEL] = { .type = NLA_BE32 },
5115 };
5116
rtm_to_fib6_multipath_config(struct fib6_config * cfg,struct netlink_ext_ack * extack,bool newroute)5117 static int rtm_to_fib6_multipath_config(struct fib6_config *cfg,
5118 struct netlink_ext_ack *extack,
5119 bool newroute)
5120 {
5121 struct rtnexthop *rtnh;
5122 int remaining;
5123
5124 remaining = cfg->fc_mp_len;
5125 rtnh = (struct rtnexthop *)cfg->fc_mp;
5126
5127 if (!rtnh_ok(rtnh, remaining)) {
5128 NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - no valid nexthops");
5129 return -EINVAL;
5130 }
5131
5132 do {
5133 bool has_gateway = cfg->fc_flags & RTF_GATEWAY;
5134 int attrlen = rtnh_attrlen(rtnh);
5135
5136 if (attrlen > 0) {
5137 struct nlattr *nla, *attrs;
5138
5139 attrs = rtnh_attrs(rtnh);
5140 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5141 if (nla) {
5142 if (nla_len(nla) < sizeof(cfg->fc_gateway)) {
5143 NL_SET_ERR_MSG(extack,
5144 "Invalid IPv6 address in RTA_GATEWAY");
5145 return -EINVAL;
5146 }
5147
5148 has_gateway = true;
5149 }
5150 }
5151
5152 if (newroute && (cfg->fc_nh_id || !has_gateway)) {
5153 NL_SET_ERR_MSG(extack,
5154 "Device only routes can not be added for IPv6 using the multipath API.");
5155 return -EINVAL;
5156 }
5157
5158 rtnh = rtnh_next(rtnh, &remaining);
5159 } while (rtnh_ok(rtnh, remaining));
5160
5161 return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack);
5162 }
5163
rtm_to_fib6_config(struct sk_buff * skb,struct nlmsghdr * nlh,struct fib6_config * cfg,struct netlink_ext_ack * extack)5164 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5165 struct fib6_config *cfg,
5166 struct netlink_ext_ack *extack)
5167 {
5168 bool newroute = nlh->nlmsg_type == RTM_NEWROUTE;
5169 struct nlattr *tb[RTA_MAX+1];
5170 struct rtmsg *rtm;
5171 unsigned int pref;
5172 int err;
5173
5174 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5175 rtm_ipv6_policy, extack);
5176 if (err < 0)
5177 goto errout;
5178
5179 err = -EINVAL;
5180 rtm = nlmsg_data(nlh);
5181
5182 if (rtm->rtm_tos) {
5183 NL_SET_ERR_MSG(extack,
5184 "Invalid dsfield (tos): option not available for IPv6");
5185 goto errout;
5186 }
5187
5188 if (tb[RTA_FLOWLABEL]) {
5189 NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
5190 "Flow label cannot be specified for this operation");
5191 goto errout;
5192 }
5193
5194 *cfg = (struct fib6_config){
5195 .fc_table = rtm->rtm_table,
5196 .fc_dst_len = rtm->rtm_dst_len,
5197 .fc_src_len = rtm->rtm_src_len,
5198 .fc_flags = RTF_UP,
5199 .fc_protocol = rtm->rtm_protocol,
5200 .fc_type = rtm->rtm_type,
5201
5202 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
5203 .fc_nlinfo.nlh = nlh,
5204 .fc_nlinfo.nl_net = sock_net(skb->sk),
5205 };
5206
5207 if (rtm->rtm_type == RTN_UNREACHABLE ||
5208 rtm->rtm_type == RTN_BLACKHOLE ||
5209 rtm->rtm_type == RTN_PROHIBIT ||
5210 rtm->rtm_type == RTN_THROW)
5211 cfg->fc_flags |= RTF_REJECT;
5212
5213 if (rtm->rtm_type == RTN_LOCAL)
5214 cfg->fc_flags |= RTF_LOCAL;
5215
5216 if (rtm->rtm_flags & RTM_F_CLONED)
5217 cfg->fc_flags |= RTF_CACHE;
5218
5219 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5220
5221 if (tb[RTA_NH_ID]) {
5222 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
5223 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5224 NL_SET_ERR_MSG(extack,
5225 "Nexthop specification and nexthop id are mutually exclusive");
5226 goto errout;
5227 }
5228 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5229 }
5230
5231 if (tb[RTA_GATEWAY]) {
5232 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5233 cfg->fc_flags |= RTF_GATEWAY;
5234 }
5235 if (tb[RTA_VIA]) {
5236 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5237 goto errout;
5238 }
5239
5240 if (tb[RTA_DST]) {
5241 int plen = (rtm->rtm_dst_len + 7) >> 3;
5242
5243 if (nla_len(tb[RTA_DST]) < plen)
5244 goto errout;
5245
5246 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5247 }
5248
5249 if (tb[RTA_SRC]) {
5250 int plen = (rtm->rtm_src_len + 7) >> 3;
5251
5252 if (nla_len(tb[RTA_SRC]) < plen)
5253 goto errout;
5254
5255 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5256 }
5257
5258 if (tb[RTA_PREFSRC])
5259 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5260
5261 if (tb[RTA_OIF])
5262 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5263
5264 if (tb[RTA_PRIORITY])
5265 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5266
5267 if (tb[RTA_METRICS]) {
5268 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5269 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5270 }
5271
5272 if (tb[RTA_TABLE])
5273 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5274
5275 if (tb[RTA_MULTIPATH]) {
5276 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5277 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5278
5279 err = rtm_to_fib6_multipath_config(cfg, extack, newroute);
5280 if (err < 0)
5281 goto errout;
5282 }
5283
5284 if (tb[RTA_PREF]) {
5285 pref = nla_get_u8(tb[RTA_PREF]);
5286 if (pref != ICMPV6_ROUTER_PREF_LOW &&
5287 pref != ICMPV6_ROUTER_PREF_HIGH)
5288 pref = ICMPV6_ROUTER_PREF_MEDIUM;
5289 cfg->fc_flags |= RTF_PREF(pref);
5290 }
5291
5292 if (tb[RTA_ENCAP])
5293 cfg->fc_encap = tb[RTA_ENCAP];
5294
5295 if (tb[RTA_ENCAP_TYPE]) {
5296 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5297
5298 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5299 if (err < 0)
5300 goto errout;
5301 }
5302
5303 if (tb[RTA_EXPIRES]) {
5304 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5305
5306 if (addrconf_finite_timeout(timeout)) {
5307 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5308 cfg->fc_flags |= RTF_EXPIRES;
5309 }
5310 }
5311
5312 err = 0;
5313 errout:
5314 return err;
5315 }
5316
5317 struct rt6_nh {
5318 struct fib6_info *fib6_info;
5319 struct fib6_config r_cfg;
5320 struct list_head list;
5321 };
5322
ip6_route_info_append(struct list_head * rt6_nh_list,struct fib6_info * rt,struct fib6_config * r_cfg)5323 static int ip6_route_info_append(struct list_head *rt6_nh_list,
5324 struct fib6_info *rt,
5325 struct fib6_config *r_cfg)
5326 {
5327 struct rt6_nh *nh;
5328
5329 list_for_each_entry(nh, rt6_nh_list, list) {
5330 /* check if fib6_info already exists */
5331 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5332 return -EEXIST;
5333 }
5334
5335 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5336 if (!nh)
5337 return -ENOMEM;
5338
5339 nh->fib6_info = rt;
5340 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5341 list_add_tail(&nh->list, rt6_nh_list);
5342
5343 return 0;
5344 }
5345
ip6_route_mpath_notify(struct fib6_info * rt,struct fib6_info * rt_last,struct nl_info * info,__u16 nlflags)5346 static void ip6_route_mpath_notify(struct fib6_info *rt,
5347 struct fib6_info *rt_last,
5348 struct nl_info *info,
5349 __u16 nlflags)
5350 {
5351 /* if this is an APPEND route, then rt points to the first route
5352 * inserted and rt_last points to last route inserted. Userspace
5353 * wants a consistent dump of the route which starts at the first
5354 * nexthop. Since sibling routes are always added at the end of
5355 * the list, find the first sibling of the last route appended
5356 */
5357 rcu_read_lock();
5358
5359 if ((nlflags & NLM_F_APPEND) && rt_last &&
5360 READ_ONCE(rt_last->fib6_nsiblings)) {
5361 rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
5362 struct fib6_info,
5363 fib6_siblings);
5364 }
5365
5366 if (rt)
5367 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5368
5369 rcu_read_unlock();
5370 }
5371
ip6_route_mpath_should_notify(const struct fib6_info * rt)5372 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5373 {
5374 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5375 bool should_notify = false;
5376 struct fib6_info *leaf;
5377 struct fib6_node *fn;
5378
5379 rcu_read_lock();
5380 fn = rcu_dereference(rt->fib6_node);
5381 if (!fn)
5382 goto out;
5383
5384 leaf = rcu_dereference(fn->leaf);
5385 if (!leaf)
5386 goto out;
5387
5388 if (rt == leaf ||
5389 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5390 rt6_qualify_for_ecmp(leaf)))
5391 should_notify = true;
5392 out:
5393 rcu_read_unlock();
5394
5395 return should_notify;
5396 }
5397
ip6_route_multipath_add(struct fib6_config * cfg,struct netlink_ext_ack * extack)5398 static int ip6_route_multipath_add(struct fib6_config *cfg,
5399 struct netlink_ext_ack *extack)
5400 {
5401 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5402 struct nl_info *info = &cfg->fc_nlinfo;
5403 struct rt6_nh *nh, *nh_safe;
5404 struct fib6_config r_cfg;
5405 struct rtnexthop *rtnh;
5406 LIST_HEAD(rt6_nh_list);
5407 struct rt6_nh *err_nh;
5408 struct fib6_info *rt;
5409 __u16 nlflags;
5410 int remaining;
5411 int attrlen;
5412 int replace;
5413 int nhn = 0;
5414 int err;
5415
5416 err = fib6_config_validate(cfg, extack);
5417 if (err)
5418 return err;
5419
5420 replace = (cfg->fc_nlinfo.nlh &&
5421 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5422
5423 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5424 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5425 nlflags |= NLM_F_APPEND;
5426
5427 remaining = cfg->fc_mp_len;
5428 rtnh = (struct rtnexthop *)cfg->fc_mp;
5429
5430 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5431 * fib6_info structs per nexthop
5432 */
5433 while (rtnh_ok(rtnh, remaining)) {
5434 memcpy(&r_cfg, cfg, sizeof(*cfg));
5435 if (rtnh->rtnh_ifindex)
5436 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5437
5438 attrlen = rtnh_attrlen(rtnh);
5439 if (attrlen > 0) {
5440 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5441
5442 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5443 if (nla) {
5444 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5445 r_cfg.fc_flags |= RTF_GATEWAY;
5446 }
5447
5448 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5449 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5450 if (nla)
5451 r_cfg.fc_encap_type = nla_get_u16(nla);
5452 }
5453
5454 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5455 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5456 if (IS_ERR(rt)) {
5457 err = PTR_ERR(rt);
5458 rt = NULL;
5459 goto cleanup;
5460 }
5461
5462 err = ip6_route_info_create_nh(rt, &r_cfg, GFP_KERNEL, extack);
5463 if (err) {
5464 rt = NULL;
5465 goto cleanup;
5466 }
5467
5468 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5469
5470 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
5471 if (err) {
5472 fib6_info_release(rt);
5473 goto cleanup;
5474 }
5475
5476 rtnh = rtnh_next(rtnh, &remaining);
5477 }
5478
5479 /* for add and replace send one notification with all nexthops.
5480 * Skip the notification in fib6_add_rt2node and send one with
5481 * the full route when done
5482 */
5483 info->skip_notify = 1;
5484
5485 /* For add and replace, send one notification with all nexthops. For
5486 * append, send one notification with all appended nexthops.
5487 */
5488 info->skip_notify_kernel = 1;
5489
5490 err_nh = NULL;
5491 list_for_each_entry(nh, &rt6_nh_list, list) {
5492 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5493
5494 if (err) {
5495 if (replace && nhn)
5496 NL_SET_ERR_MSG_MOD(extack,
5497 "multipath route replace failed (check consistency of installed routes)");
5498 err_nh = nh;
5499 goto add_errout;
5500 }
5501 /* save reference to last route successfully inserted */
5502 rt_last = nh->fib6_info;
5503
5504 /* save reference to first route for notification */
5505 if (!rt_notif)
5506 rt_notif = nh->fib6_info;
5507
5508 /* Because each route is added like a single route we remove
5509 * these flags after the first nexthop: if there is a collision,
5510 * we have already failed to add the first nexthop:
5511 * fib6_add_rt2node() has rejected it; when replacing, old
5512 * nexthops have been replaced by first new, the rest should
5513 * be added to it.
5514 */
5515 if (cfg->fc_nlinfo.nlh) {
5516 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5517 NLM_F_REPLACE);
5518 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5519 }
5520 nhn++;
5521 }
5522
5523 /* An in-kernel notification should only be sent in case the new
5524 * multipath route is added as the first route in the node, or if
5525 * it was appended to it. We pass 'rt_notif' since it is the first
5526 * sibling and might allow us to skip some checks in the replace case.
5527 */
5528 if (ip6_route_mpath_should_notify(rt_notif)) {
5529 enum fib_event_type fib_event;
5530
5531 if (rt_notif->fib6_nsiblings != nhn - 1)
5532 fib_event = FIB_EVENT_ENTRY_APPEND;
5533 else
5534 fib_event = FIB_EVENT_ENTRY_REPLACE;
5535
5536 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5537 fib_event, rt_notif,
5538 nhn - 1, extack);
5539 if (err) {
5540 /* Delete all the siblings that were just added */
5541 err_nh = NULL;
5542 goto add_errout;
5543 }
5544 }
5545
5546 /* success ... tell user about new route */
5547 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5548 goto cleanup;
5549
5550 add_errout:
5551 /* send notification for routes that were added so that
5552 * the delete notifications sent by ip6_route_del are
5553 * coherent
5554 */
5555 if (rt_notif)
5556 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5557
5558 /* Delete routes that were already added */
5559 list_for_each_entry(nh, &rt6_nh_list, list) {
5560 if (err_nh == nh)
5561 break;
5562 ip6_route_del(&nh->r_cfg, extack);
5563 }
5564
5565 cleanup:
5566 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, list) {
5567 fib6_info_release(nh->fib6_info);
5568 list_del(&nh->list);
5569 kfree(nh);
5570 }
5571
5572 return err;
5573 }
5574
ip6_route_multipath_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)5575 static int ip6_route_multipath_del(struct fib6_config *cfg,
5576 struct netlink_ext_ack *extack)
5577 {
5578 struct fib6_config r_cfg;
5579 struct rtnexthop *rtnh;
5580 int last_err = 0;
5581 int remaining;
5582 int attrlen;
5583 int err;
5584
5585 remaining = cfg->fc_mp_len;
5586 rtnh = (struct rtnexthop *)cfg->fc_mp;
5587
5588 /* Parse a Multipath Entry */
5589 while (rtnh_ok(rtnh, remaining)) {
5590 memcpy(&r_cfg, cfg, sizeof(*cfg));
5591 if (rtnh->rtnh_ifindex)
5592 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5593
5594 attrlen = rtnh_attrlen(rtnh);
5595 if (attrlen > 0) {
5596 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5597
5598 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5599 if (nla) {
5600 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5601 r_cfg.fc_flags |= RTF_GATEWAY;
5602 }
5603 }
5604
5605 err = ip6_route_del(&r_cfg, extack);
5606 if (err)
5607 last_err = err;
5608
5609 rtnh = rtnh_next(rtnh, &remaining);
5610 }
5611
5612 return last_err;
5613 }
5614
inet6_rtm_delroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5615 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5616 struct netlink_ext_ack *extack)
5617 {
5618 struct fib6_config cfg;
5619 int err;
5620
5621 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5622 if (err < 0)
5623 return err;
5624
5625 if (cfg.fc_nh_id) {
5626 rcu_read_lock();
5627 err = !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id);
5628 rcu_read_unlock();
5629
5630 if (err) {
5631 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5632 return -EINVAL;
5633 }
5634 }
5635
5636 if (cfg.fc_mp) {
5637 return ip6_route_multipath_del(&cfg, extack);
5638 } else {
5639 cfg.fc_delete_all_nh = 1;
5640 return ip6_route_del(&cfg, extack);
5641 }
5642 }
5643
inet6_rtm_newroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5644 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5645 struct netlink_ext_ack *extack)
5646 {
5647 struct fib6_config cfg;
5648 int err;
5649
5650 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5651 if (err < 0)
5652 return err;
5653
5654 if (cfg.fc_metric == 0)
5655 cfg.fc_metric = IP6_RT_PRIO_USER;
5656
5657 if (cfg.fc_mp)
5658 return ip6_route_multipath_add(&cfg, extack);
5659 else
5660 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5661 }
5662
5663 /* add the overhead of this fib6_nh to nexthop_len */
rt6_nh_nlmsg_size(struct fib6_nh * nh,void * arg)5664 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5665 {
5666 int *nexthop_len = arg;
5667
5668 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5669 + NLA_ALIGN(sizeof(struct rtnexthop))
5670 + nla_total_size(16); /* RTA_GATEWAY */
5671
5672 if (nh->fib_nh_lws) {
5673 /* RTA_ENCAP_TYPE */
5674 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5675 /* RTA_ENCAP */
5676 *nexthop_len += nla_total_size(2);
5677 }
5678
5679 return 0;
5680 }
5681
rt6_nlmsg_size(struct fib6_info * f6i)5682 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5683 {
5684 struct fib6_info *sibling;
5685 struct fib6_nh *nh;
5686 int nexthop_len;
5687
5688 if (f6i->nh) {
5689 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5690 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5691 &nexthop_len);
5692 goto common;
5693 }
5694
5695 rcu_read_lock();
5696 retry:
5697 nh = f6i->fib6_nh;
5698 nexthop_len = 0;
5699 if (READ_ONCE(f6i->fib6_nsiblings)) {
5700 rt6_nh_nlmsg_size(nh, &nexthop_len);
5701
5702 list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5703 fib6_siblings) {
5704 rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5705 if (!READ_ONCE(f6i->fib6_nsiblings))
5706 goto retry;
5707 }
5708 }
5709 rcu_read_unlock();
5710 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5711 common:
5712 return NLMSG_ALIGN(sizeof(struct rtmsg))
5713 + nla_total_size(16) /* RTA_SRC */
5714 + nla_total_size(16) /* RTA_DST */
5715 + nla_total_size(16) /* RTA_GATEWAY */
5716 + nla_total_size(16) /* RTA_PREFSRC */
5717 + nla_total_size(4) /* RTA_TABLE */
5718 + nla_total_size(4) /* RTA_IIF */
5719 + nla_total_size(4) /* RTA_OIF */
5720 + nla_total_size(4) /* RTA_PRIORITY */
5721 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5722 + nla_total_size(sizeof(struct rta_cacheinfo))
5723 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5724 + nla_total_size(1) /* RTA_PREF */
5725 + nexthop_len;
5726 }
5727
rt6_fill_node_nexthop(struct sk_buff * skb,struct nexthop * nh,unsigned char * flags)5728 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5729 unsigned char *flags)
5730 {
5731 if (nexthop_is_multipath(nh)) {
5732 struct nlattr *mp;
5733
5734 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5735 if (!mp)
5736 goto nla_put_failure;
5737
5738 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5739 goto nla_put_failure;
5740
5741 nla_nest_end(skb, mp);
5742 } else {
5743 struct fib6_nh *fib6_nh;
5744
5745 fib6_nh = nexthop_fib6_nh(nh);
5746 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5747 flags, false) < 0)
5748 goto nla_put_failure;
5749 }
5750
5751 return 0;
5752
5753 nla_put_failure:
5754 return -EMSGSIZE;
5755 }
5756
rt6_fill_node(struct net * net,struct sk_buff * skb,struct fib6_info * rt,struct dst_entry * dst,struct in6_addr * dest,struct in6_addr * src,int iif,int type,u32 portid,u32 seq,unsigned int flags)5757 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5758 struct fib6_info *rt, struct dst_entry *dst,
5759 struct in6_addr *dest, struct in6_addr *src,
5760 int iif, int type, u32 portid, u32 seq,
5761 unsigned int flags)
5762 {
5763 struct rt6_info *rt6 = dst_rt6_info(dst);
5764 struct rt6key *rt6_dst, *rt6_src;
5765 u32 *pmetrics, table, rt6_flags;
5766 unsigned char nh_flags = 0;
5767 struct nlmsghdr *nlh;
5768 struct rtmsg *rtm;
5769 long expires = 0;
5770
5771 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5772 if (!nlh)
5773 return -EMSGSIZE;
5774
5775 if (rt6) {
5776 rt6_dst = &rt6->rt6i_dst;
5777 rt6_src = &rt6->rt6i_src;
5778 rt6_flags = rt6->rt6i_flags;
5779 } else {
5780 rt6_dst = &rt->fib6_dst;
5781 rt6_src = &rt->fib6_src;
5782 rt6_flags = rt->fib6_flags;
5783 }
5784
5785 rtm = nlmsg_data(nlh);
5786 rtm->rtm_family = AF_INET6;
5787 rtm->rtm_dst_len = rt6_dst->plen;
5788 rtm->rtm_src_len = rt6_src->plen;
5789 rtm->rtm_tos = 0;
5790 if (rt->fib6_table)
5791 table = rt->fib6_table->tb6_id;
5792 else
5793 table = RT6_TABLE_UNSPEC;
5794 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5795 if (nla_put_u32(skb, RTA_TABLE, table))
5796 goto nla_put_failure;
5797
5798 rtm->rtm_type = rt->fib6_type;
5799 rtm->rtm_flags = 0;
5800 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5801 rtm->rtm_protocol = rt->fib6_protocol;
5802
5803 if (rt6_flags & RTF_CACHE)
5804 rtm->rtm_flags |= RTM_F_CLONED;
5805
5806 if (dest) {
5807 if (nla_put_in6_addr(skb, RTA_DST, dest))
5808 goto nla_put_failure;
5809 rtm->rtm_dst_len = 128;
5810 } else if (rtm->rtm_dst_len)
5811 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5812 goto nla_put_failure;
5813 #ifdef CONFIG_IPV6_SUBTREES
5814 if (src) {
5815 if (nla_put_in6_addr(skb, RTA_SRC, src))
5816 goto nla_put_failure;
5817 rtm->rtm_src_len = 128;
5818 } else if (rtm->rtm_src_len &&
5819 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5820 goto nla_put_failure;
5821 #endif
5822 if (iif) {
5823 #ifdef CONFIG_IPV6_MROUTE
5824 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5825 int err = ip6mr_get_route(net, skb, rtm, portid);
5826
5827 if (err == 0)
5828 return 0;
5829 if (err < 0)
5830 goto nla_put_failure;
5831 } else
5832 #endif
5833 if (nla_put_u32(skb, RTA_IIF, iif))
5834 goto nla_put_failure;
5835 } else if (dest) {
5836 struct in6_addr saddr_buf;
5837 if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
5838 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5839 goto nla_put_failure;
5840 }
5841
5842 if (rt->fib6_prefsrc.plen) {
5843 struct in6_addr saddr_buf;
5844 saddr_buf = rt->fib6_prefsrc.addr;
5845 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5846 goto nla_put_failure;
5847 }
5848
5849 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5850 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5851 goto nla_put_failure;
5852
5853 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5854 goto nla_put_failure;
5855
5856 /* For multipath routes, walk the siblings list and add
5857 * each as a nexthop within RTA_MULTIPATH.
5858 */
5859 if (rt6) {
5860 struct net_device *dev;
5861
5862 if (rt6_flags & RTF_GATEWAY &&
5863 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5864 goto nla_put_failure;
5865
5866 dev = dst_dev(dst);
5867 if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
5868 goto nla_put_failure;
5869
5870 if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5871 goto nla_put_failure;
5872 } else if (READ_ONCE(rt->fib6_nsiblings)) {
5873 struct fib6_info *sibling;
5874 struct nlattr *mp;
5875
5876 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5877 if (!mp)
5878 goto nla_put_failure;
5879
5880 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5881 rt->fib6_nh->fib_nh_weight, AF_INET6,
5882 0) < 0)
5883 goto nla_put_failure;
5884
5885 rcu_read_lock();
5886
5887 list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
5888 fib6_siblings) {
5889 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5890 sibling->fib6_nh->fib_nh_weight,
5891 AF_INET6, 0) < 0) {
5892 rcu_read_unlock();
5893
5894 goto nla_put_failure;
5895 }
5896 }
5897
5898 rcu_read_unlock();
5899
5900 nla_nest_end(skb, mp);
5901 } else if (rt->nh) {
5902 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5903 goto nla_put_failure;
5904
5905 if (nexthop_is_blackhole(rt->nh))
5906 rtm->rtm_type = RTN_BLACKHOLE;
5907
5908 if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5909 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5910 goto nla_put_failure;
5911
5912 rtm->rtm_flags |= nh_flags;
5913 } else {
5914 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5915 &nh_flags, false) < 0)
5916 goto nla_put_failure;
5917
5918 rtm->rtm_flags |= nh_flags;
5919 }
5920
5921 if (rt6_flags & RTF_EXPIRES) {
5922 expires = dst ? READ_ONCE(dst->expires) : rt->expires;
5923 expires -= jiffies;
5924 }
5925
5926 if (!dst) {
5927 if (READ_ONCE(rt->offload))
5928 rtm->rtm_flags |= RTM_F_OFFLOAD;
5929 if (READ_ONCE(rt->trap))
5930 rtm->rtm_flags |= RTM_F_TRAP;
5931 if (READ_ONCE(rt->offload_failed))
5932 rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5933 }
5934
5935 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5936 goto nla_put_failure;
5937
5938 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5939 goto nla_put_failure;
5940
5941
5942 nlmsg_end(skb, nlh);
5943 return 0;
5944
5945 nla_put_failure:
5946 nlmsg_cancel(skb, nlh);
5947 return -EMSGSIZE;
5948 }
5949
fib6_info_nh_uses_dev(struct fib6_nh * nh,void * arg)5950 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5951 {
5952 const struct net_device *dev = arg;
5953
5954 if (nh->fib_nh_dev == dev)
5955 return 1;
5956
5957 return 0;
5958 }
5959
fib6_info_uses_dev(const struct fib6_info * f6i,const struct net_device * dev)5960 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5961 const struct net_device *dev)
5962 {
5963 if (f6i->nh) {
5964 struct net_device *_dev = (struct net_device *)dev;
5965
5966 return !!nexthop_for_each_fib6_nh(f6i->nh,
5967 fib6_info_nh_uses_dev,
5968 _dev);
5969 }
5970
5971 if (f6i->fib6_nh->fib_nh_dev == dev)
5972 return true;
5973
5974 if (READ_ONCE(f6i->fib6_nsiblings)) {
5975 const struct fib6_info *sibling;
5976
5977 rcu_read_lock();
5978 list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5979 fib6_siblings) {
5980 if (sibling->fib6_nh->fib_nh_dev == dev) {
5981 rcu_read_unlock();
5982 return true;
5983 }
5984 if (!READ_ONCE(f6i->fib6_nsiblings))
5985 break;
5986 }
5987 rcu_read_unlock();
5988 }
5989 return false;
5990 }
5991
5992 struct fib6_nh_exception_dump_walker {
5993 struct rt6_rtnl_dump_arg *dump;
5994 struct fib6_info *rt;
5995 unsigned int flags;
5996 unsigned int skip;
5997 unsigned int count;
5998 };
5999
rt6_nh_dump_exceptions(struct fib6_nh * nh,void * arg)6000 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
6001 {
6002 struct fib6_nh_exception_dump_walker *w = arg;
6003 struct rt6_rtnl_dump_arg *dump = w->dump;
6004 struct rt6_exception_bucket *bucket;
6005 struct rt6_exception *rt6_ex;
6006 int i, err;
6007
6008 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
6009 if (!bucket)
6010 return 0;
6011
6012 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
6013 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
6014 if (w->skip) {
6015 w->skip--;
6016 continue;
6017 }
6018
6019 /* Expiration of entries doesn't bump sernum, insertion
6020 * does. Removal is triggered by insertion, so we can
6021 * rely on the fact that if entries change between two
6022 * partial dumps, this node is scanned again completely,
6023 * see rt6_insert_exception() and fib6_dump_table().
6024 *
6025 * Count expired entries we go through as handled
6026 * entries that we'll skip next time, in case of partial
6027 * node dump. Otherwise, if entries expire meanwhile,
6028 * we'll skip the wrong amount.
6029 */
6030 if (rt6_check_expired(rt6_ex->rt6i)) {
6031 w->count++;
6032 continue;
6033 }
6034
6035 err = rt6_fill_node(dump->net, dump->skb, w->rt,
6036 &rt6_ex->rt6i->dst, NULL, NULL, 0,
6037 RTM_NEWROUTE,
6038 NETLINK_CB(dump->cb->skb).portid,
6039 dump->cb->nlh->nlmsg_seq, w->flags);
6040 if (err)
6041 return err;
6042
6043 w->count++;
6044 }
6045 bucket++;
6046 }
6047
6048 return 0;
6049 }
6050
6051 /* Return -1 if done with node, number of handled routes on partial dump */
rt6_dump_route(struct fib6_info * rt,void * p_arg,unsigned int skip)6052 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
6053 {
6054 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
6055 struct fib_dump_filter *filter = &arg->filter;
6056 unsigned int flags = NLM_F_MULTI;
6057 struct net *net = arg->net;
6058 int count = 0;
6059
6060 if (rt == net->ipv6.fib6_null_entry)
6061 return -1;
6062
6063 if ((filter->flags & RTM_F_PREFIX) &&
6064 !(rt->fib6_flags & RTF_PREFIX_RT)) {
6065 /* success since this is not a prefix route */
6066 return -1;
6067 }
6068 if (filter->filter_set &&
6069 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
6070 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
6071 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
6072 return -1;
6073 }
6074
6075 if (filter->filter_set ||
6076 !filter->dump_routes || !filter->dump_exceptions) {
6077 flags |= NLM_F_DUMP_FILTERED;
6078 }
6079
6080 if (filter->dump_routes) {
6081 if (skip) {
6082 skip--;
6083 } else {
6084 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
6085 0, RTM_NEWROUTE,
6086 NETLINK_CB(arg->cb->skb).portid,
6087 arg->cb->nlh->nlmsg_seq, flags)) {
6088 return 0;
6089 }
6090 count++;
6091 }
6092 }
6093
6094 if (filter->dump_exceptions) {
6095 struct fib6_nh_exception_dump_walker w = { .dump = arg,
6096 .rt = rt,
6097 .flags = flags,
6098 .skip = skip,
6099 .count = 0 };
6100 int err;
6101
6102 rcu_read_lock();
6103 if (rt->nh) {
6104 err = nexthop_for_each_fib6_nh(rt->nh,
6105 rt6_nh_dump_exceptions,
6106 &w);
6107 } else {
6108 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
6109 }
6110 rcu_read_unlock();
6111
6112 if (err)
6113 return count + w.count;
6114 }
6115
6116 return -1;
6117 }
6118
inet6_rtm_valid_getroute_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)6119 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
6120 const struct nlmsghdr *nlh,
6121 struct nlattr **tb,
6122 struct netlink_ext_ack *extack)
6123 {
6124 struct rtmsg *rtm;
6125 int i, err;
6126
6127 rtm = nlmsg_payload(nlh, sizeof(*rtm));
6128 if (!rtm) {
6129 NL_SET_ERR_MSG_MOD(extack,
6130 "Invalid header for get route request");
6131 return -EINVAL;
6132 }
6133
6134 if (!netlink_strict_get_check(skb))
6135 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
6136 rtm_ipv6_policy, extack);
6137
6138 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
6139 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
6140 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
6141 rtm->rtm_type) {
6142 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
6143 return -EINVAL;
6144 }
6145 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
6146 NL_SET_ERR_MSG_MOD(extack,
6147 "Invalid flags for get route request");
6148 return -EINVAL;
6149 }
6150
6151 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
6152 rtm_ipv6_policy, extack);
6153 if (err)
6154 return err;
6155
6156 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
6157 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
6158 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
6159 return -EINVAL;
6160 }
6161
6162 if (tb[RTA_FLOWLABEL] &&
6163 (nla_get_be32(tb[RTA_FLOWLABEL]) & ~IPV6_FLOWLABEL_MASK)) {
6164 NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
6165 "Invalid flow label");
6166 return -EINVAL;
6167 }
6168
6169 for (i = 0; i <= RTA_MAX; i++) {
6170 if (!tb[i])
6171 continue;
6172
6173 switch (i) {
6174 case RTA_SRC:
6175 case RTA_DST:
6176 case RTA_IIF:
6177 case RTA_OIF:
6178 case RTA_MARK:
6179 case RTA_UID:
6180 case RTA_SPORT:
6181 case RTA_DPORT:
6182 case RTA_IP_PROTO:
6183 case RTA_FLOWLABEL:
6184 break;
6185 default:
6186 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6187 return -EINVAL;
6188 }
6189 }
6190
6191 return 0;
6192 }
6193
inet6_rtm_getroute(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6194 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6195 struct netlink_ext_ack *extack)
6196 {
6197 struct net *net = sock_net(in_skb->sk);
6198 struct nlattr *tb[RTA_MAX+1];
6199 int err, iif = 0, oif = 0;
6200 struct fib6_info *from;
6201 struct dst_entry *dst;
6202 struct rt6_info *rt;
6203 struct sk_buff *skb;
6204 struct rtmsg *rtm;
6205 struct flowi6 fl6 = {};
6206 __be32 flowlabel;
6207 bool fibmatch;
6208
6209 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6210 if (err < 0)
6211 goto errout;
6212
6213 err = -EINVAL;
6214 rtm = nlmsg_data(nlh);
6215 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6216
6217 if (tb[RTA_SRC]) {
6218 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6219 goto errout;
6220
6221 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6222 }
6223
6224 if (tb[RTA_DST]) {
6225 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6226 goto errout;
6227
6228 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6229 }
6230
6231 if (tb[RTA_IIF])
6232 iif = nla_get_u32(tb[RTA_IIF]);
6233
6234 if (tb[RTA_OIF])
6235 oif = nla_get_u32(tb[RTA_OIF]);
6236
6237 if (tb[RTA_MARK])
6238 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6239
6240 if (tb[RTA_UID])
6241 fl6.flowi6_uid = make_kuid(current_user_ns(),
6242 nla_get_u32(tb[RTA_UID]));
6243 else
6244 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6245
6246 if (tb[RTA_SPORT])
6247 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6248
6249 if (tb[RTA_DPORT])
6250 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6251
6252 if (tb[RTA_IP_PROTO]) {
6253 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6254 &fl6.flowi6_proto, AF_INET6,
6255 extack);
6256 if (err)
6257 goto errout;
6258 }
6259
6260 flowlabel = nla_get_be32_default(tb[RTA_FLOWLABEL], 0);
6261 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, flowlabel);
6262
6263 if (iif) {
6264 struct net_device *dev;
6265 int flags = 0;
6266
6267 rcu_read_lock();
6268
6269 dev = dev_get_by_index_rcu(net, iif);
6270 if (!dev) {
6271 rcu_read_unlock();
6272 err = -ENODEV;
6273 goto errout;
6274 }
6275
6276 fl6.flowi6_iif = iif;
6277
6278 if (!ipv6_addr_any(&fl6.saddr))
6279 flags |= RT6_LOOKUP_F_HAS_SADDR;
6280
6281 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6282
6283 rcu_read_unlock();
6284 } else {
6285 fl6.flowi6_oif = oif;
6286
6287 dst = ip6_route_output(net, NULL, &fl6);
6288 }
6289
6290
6291 rt = dst_rt6_info(dst);
6292 if (rt->dst.error) {
6293 err = rt->dst.error;
6294 ip6_rt_put(rt);
6295 goto errout;
6296 }
6297
6298 if (rt == net->ipv6.ip6_null_entry) {
6299 err = rt->dst.error;
6300 ip6_rt_put(rt);
6301 goto errout;
6302 }
6303
6304 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6305 if (!skb) {
6306 ip6_rt_put(rt);
6307 err = -ENOBUFS;
6308 goto errout;
6309 }
6310
6311 skb_dst_set(skb, &rt->dst);
6312
6313 rcu_read_lock();
6314 from = rcu_dereference(rt->from);
6315 if (from) {
6316 if (fibmatch)
6317 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6318 iif, RTM_NEWROUTE,
6319 NETLINK_CB(in_skb).portid,
6320 nlh->nlmsg_seq, 0);
6321 else
6322 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6323 &fl6.saddr, iif, RTM_NEWROUTE,
6324 NETLINK_CB(in_skb).portid,
6325 nlh->nlmsg_seq, 0);
6326 } else {
6327 err = -ENETUNREACH;
6328 }
6329 rcu_read_unlock();
6330
6331 if (err < 0) {
6332 kfree_skb(skb);
6333 goto errout;
6334 }
6335
6336 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6337 errout:
6338 return err;
6339 }
6340
inet6_rt_notify(int event,struct fib6_info * rt,struct nl_info * info,unsigned int nlm_flags)6341 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6342 unsigned int nlm_flags)
6343 {
6344 struct net *net = info->nl_net;
6345 struct sk_buff *skb;
6346 size_t sz;
6347 u32 seq;
6348 int err;
6349
6350 err = -ENOBUFS;
6351 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6352
6353 rcu_read_lock();
6354 sz = rt6_nlmsg_size(rt);
6355 retry:
6356 skb = nlmsg_new(sz, GFP_ATOMIC);
6357 if (!skb)
6358 goto errout;
6359
6360 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6361 event, info->portid, seq, nlm_flags);
6362 if (err < 0) {
6363 kfree_skb(skb);
6364 /* -EMSGSIZE implies needed space grew under us. */
6365 if (err == -EMSGSIZE) {
6366 sz = max(rt6_nlmsg_size(rt), sz << 1);
6367 goto retry;
6368 }
6369 goto errout;
6370 }
6371
6372 rcu_read_unlock();
6373
6374 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6375 info->nlh, GFP_ATOMIC);
6376 return;
6377 errout:
6378 rcu_read_unlock();
6379 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6380 }
6381
fib6_rt_update(struct net * net,struct fib6_info * rt,struct nl_info * info)6382 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6383 struct nl_info *info)
6384 {
6385 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6386 struct sk_buff *skb;
6387 int err = -ENOBUFS;
6388
6389 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6390 if (!skb)
6391 goto errout;
6392
6393 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6394 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6395 if (err < 0) {
6396 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6397 WARN_ON(err == -EMSGSIZE);
6398 kfree_skb(skb);
6399 goto errout;
6400 }
6401 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6402 info->nlh, gfp_any());
6403 return;
6404 errout:
6405 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6406 }
6407
fib6_info_hw_flags_set(struct net * net,struct fib6_info * f6i,bool offload,bool trap,bool offload_failed)6408 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6409 bool offload, bool trap, bool offload_failed)
6410 {
6411 struct sk_buff *skb;
6412 int err;
6413
6414 if (READ_ONCE(f6i->offload) == offload &&
6415 READ_ONCE(f6i->trap) == trap &&
6416 READ_ONCE(f6i->offload_failed) == offload_failed)
6417 return;
6418
6419 WRITE_ONCE(f6i->offload, offload);
6420 WRITE_ONCE(f6i->trap, trap);
6421
6422 /* 2 means send notifications only if offload_failed was changed. */
6423 if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
6424 READ_ONCE(f6i->offload_failed) == offload_failed)
6425 return;
6426
6427 WRITE_ONCE(f6i->offload_failed, offload_failed);
6428
6429 if (!rcu_access_pointer(f6i->fib6_node))
6430 /* The route was removed from the tree, do not send
6431 * notification.
6432 */
6433 return;
6434
6435 if (!net->ipv6.sysctl.fib_notify_on_flag_change)
6436 return;
6437
6438 skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6439 if (!skb) {
6440 err = -ENOBUFS;
6441 goto errout;
6442 }
6443
6444 err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6445 0, 0);
6446 if (err < 0) {
6447 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6448 WARN_ON(err == -EMSGSIZE);
6449 kfree_skb(skb);
6450 goto errout;
6451 }
6452
6453 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6454 return;
6455
6456 errout:
6457 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6458 }
6459 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6460
ip6_route_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)6461 static int ip6_route_dev_notify(struct notifier_block *this,
6462 unsigned long event, void *ptr)
6463 {
6464 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6465 struct net *net = dev_net(dev);
6466
6467 if (!(dev->flags & IFF_LOOPBACK))
6468 return NOTIFY_OK;
6469
6470 if (event == NETDEV_REGISTER) {
6471 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6472 net->ipv6.ip6_null_entry->dst.dev = dev;
6473 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6474 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6475 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6476 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6477 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6478 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6479 #endif
6480 } else if (event == NETDEV_UNREGISTER &&
6481 dev->reg_state != NETREG_UNREGISTERED) {
6482 /* NETDEV_UNREGISTER could be fired for multiple times by
6483 * netdev_wait_allrefs(). Make sure we only call this once.
6484 */
6485 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6486 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6487 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6488 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6489 #endif
6490 }
6491
6492 return NOTIFY_OK;
6493 }
6494
6495 /*
6496 * /proc
6497 */
6498
6499 #ifdef CONFIG_PROC_FS
rt6_stats_seq_show(struct seq_file * seq,void * v)6500 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6501 {
6502 struct net *net = (struct net *)seq->private;
6503 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6504 net->ipv6.rt6_stats->fib_nodes,
6505 net->ipv6.rt6_stats->fib_route_nodes,
6506 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6507 net->ipv6.rt6_stats->fib_rt_entries,
6508 net->ipv6.rt6_stats->fib_rt_cache,
6509 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6510 net->ipv6.rt6_stats->fib_discarded_routes);
6511
6512 return 0;
6513 }
6514 #endif /* CONFIG_PROC_FS */
6515
6516 #ifdef CONFIG_SYSCTL
6517
ipv6_sysctl_rtcache_flush(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6518 static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
6519 void *buffer, size_t *lenp, loff_t *ppos)
6520 {
6521 struct net *net;
6522 int delay;
6523 int ret;
6524 if (!write)
6525 return -EINVAL;
6526
6527 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6528 if (ret)
6529 return ret;
6530
6531 net = (struct net *)ctl->extra1;
6532 delay = net->ipv6.sysctl.flush_delay;
6533 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6534 return 0;
6535 }
6536
6537 static struct ctl_table ipv6_route_table_template[] = {
6538 {
6539 .procname = "max_size",
6540 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6541 .maxlen = sizeof(int),
6542 .mode = 0644,
6543 .proc_handler = proc_dointvec,
6544 },
6545 {
6546 .procname = "gc_thresh",
6547 .data = &ip6_dst_ops_template.gc_thresh,
6548 .maxlen = sizeof(int),
6549 .mode = 0644,
6550 .proc_handler = proc_dointvec,
6551 },
6552 {
6553 .procname = "flush",
6554 .data = &init_net.ipv6.sysctl.flush_delay,
6555 .maxlen = sizeof(int),
6556 .mode = 0200,
6557 .proc_handler = ipv6_sysctl_rtcache_flush
6558 },
6559 {
6560 .procname = "gc_min_interval",
6561 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6562 .maxlen = sizeof(int),
6563 .mode = 0644,
6564 .proc_handler = proc_dointvec_jiffies,
6565 },
6566 {
6567 .procname = "gc_timeout",
6568 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6569 .maxlen = sizeof(int),
6570 .mode = 0644,
6571 .proc_handler = proc_dointvec_jiffies,
6572 },
6573 {
6574 .procname = "gc_interval",
6575 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6576 .maxlen = sizeof(int),
6577 .mode = 0644,
6578 .proc_handler = proc_dointvec_jiffies,
6579 },
6580 {
6581 .procname = "gc_elasticity",
6582 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6583 .maxlen = sizeof(int),
6584 .mode = 0644,
6585 .proc_handler = proc_dointvec,
6586 },
6587 {
6588 .procname = "mtu_expires",
6589 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6590 .maxlen = sizeof(int),
6591 .mode = 0644,
6592 .proc_handler = proc_dointvec_jiffies,
6593 },
6594 {
6595 .procname = "min_adv_mss",
6596 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6597 .maxlen = sizeof(int),
6598 .mode = 0644,
6599 .proc_handler = proc_dointvec,
6600 },
6601 {
6602 .procname = "gc_min_interval_ms",
6603 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6604 .maxlen = sizeof(int),
6605 .mode = 0644,
6606 .proc_handler = proc_dointvec_ms_jiffies,
6607 },
6608 {
6609 .procname = "skip_notify_on_dev_down",
6610 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6611 .maxlen = sizeof(u8),
6612 .mode = 0644,
6613 .proc_handler = proc_dou8vec_minmax,
6614 .extra1 = SYSCTL_ZERO,
6615 .extra2 = SYSCTL_ONE,
6616 },
6617 };
6618
ipv6_route_sysctl_init(struct net * net)6619 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6620 {
6621 struct ctl_table *table;
6622
6623 table = kmemdup(ipv6_route_table_template,
6624 sizeof(ipv6_route_table_template),
6625 GFP_KERNEL);
6626
6627 if (table) {
6628 table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6629 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6630 table[2].data = &net->ipv6.sysctl.flush_delay;
6631 table[2].extra1 = net;
6632 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6633 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6634 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6635 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6636 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6637 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6638 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6639 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6640 }
6641
6642 return table;
6643 }
6644
ipv6_route_sysctl_table_size(struct net * net)6645 size_t ipv6_route_sysctl_table_size(struct net *net)
6646 {
6647 /* Don't export sysctls to unprivileged users */
6648 if (net->user_ns != &init_user_ns)
6649 return 1;
6650
6651 return ARRAY_SIZE(ipv6_route_table_template);
6652 }
6653 #endif
6654
ip6_route_net_init(struct net * net)6655 static int __net_init ip6_route_net_init(struct net *net)
6656 {
6657 int ret = -ENOMEM;
6658
6659 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6660 sizeof(net->ipv6.ip6_dst_ops));
6661
6662 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6663 goto out_ip6_dst_ops;
6664
6665 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6666 if (!net->ipv6.fib6_null_entry)
6667 goto out_ip6_dst_entries;
6668 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6669 sizeof(*net->ipv6.fib6_null_entry));
6670
6671 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6672 sizeof(*net->ipv6.ip6_null_entry),
6673 GFP_KERNEL);
6674 if (!net->ipv6.ip6_null_entry)
6675 goto out_fib6_null_entry;
6676 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6677 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6678 ip6_template_metrics, true);
6679 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached);
6680
6681 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6682 net->ipv6.fib6_has_custom_rules = false;
6683 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6684 sizeof(*net->ipv6.ip6_prohibit_entry),
6685 GFP_KERNEL);
6686 if (!net->ipv6.ip6_prohibit_entry)
6687 goto out_ip6_null_entry;
6688 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6689 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6690 ip6_template_metrics, true);
6691 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached);
6692
6693 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6694 sizeof(*net->ipv6.ip6_blk_hole_entry),
6695 GFP_KERNEL);
6696 if (!net->ipv6.ip6_blk_hole_entry)
6697 goto out_ip6_prohibit_entry;
6698 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6699 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6700 ip6_template_metrics, true);
6701 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached);
6702 #ifdef CONFIG_IPV6_SUBTREES
6703 net->ipv6.fib6_routes_require_src = 0;
6704 #endif
6705 #endif
6706
6707 net->ipv6.sysctl.flush_delay = 0;
6708 net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6709 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6710 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6711 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6712 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6713 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6714 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6715 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6716
6717 atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6718
6719 ret = 0;
6720 out:
6721 return ret;
6722
6723 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6724 out_ip6_prohibit_entry:
6725 kfree(net->ipv6.ip6_prohibit_entry);
6726 out_ip6_null_entry:
6727 kfree(net->ipv6.ip6_null_entry);
6728 #endif
6729 out_fib6_null_entry:
6730 kfree(net->ipv6.fib6_null_entry);
6731 out_ip6_dst_entries:
6732 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6733 out_ip6_dst_ops:
6734 goto out;
6735 }
6736
ip6_route_net_exit(struct net * net)6737 static void __net_exit ip6_route_net_exit(struct net *net)
6738 {
6739 kfree(net->ipv6.fib6_null_entry);
6740 kfree(net->ipv6.ip6_null_entry);
6741 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6742 kfree(net->ipv6.ip6_prohibit_entry);
6743 kfree(net->ipv6.ip6_blk_hole_entry);
6744 #endif
6745 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6746 }
6747
ip6_route_net_init_late(struct net * net)6748 static int __net_init ip6_route_net_init_late(struct net *net)
6749 {
6750 #ifdef CONFIG_PROC_FS
6751 if (!proc_create_net("ipv6_route", 0, net->proc_net,
6752 &ipv6_route_seq_ops,
6753 sizeof(struct ipv6_route_iter)))
6754 return -ENOMEM;
6755
6756 if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6757 rt6_stats_seq_show, NULL)) {
6758 remove_proc_entry("ipv6_route", net->proc_net);
6759 return -ENOMEM;
6760 }
6761 #endif
6762 return 0;
6763 }
6764
ip6_route_net_exit_late(struct net * net)6765 static void __net_exit ip6_route_net_exit_late(struct net *net)
6766 {
6767 #ifdef CONFIG_PROC_FS
6768 remove_proc_entry("ipv6_route", net->proc_net);
6769 remove_proc_entry("rt6_stats", net->proc_net);
6770 #endif
6771 }
6772
6773 static struct pernet_operations ip6_route_net_ops = {
6774 .init = ip6_route_net_init,
6775 .exit = ip6_route_net_exit,
6776 };
6777
ipv6_inetpeer_init(struct net * net)6778 static int __net_init ipv6_inetpeer_init(struct net *net)
6779 {
6780 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6781
6782 if (!bp)
6783 return -ENOMEM;
6784 inet_peer_base_init(bp);
6785 net->ipv6.peers = bp;
6786 return 0;
6787 }
6788
ipv6_inetpeer_exit(struct net * net)6789 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6790 {
6791 struct inet_peer_base *bp = net->ipv6.peers;
6792
6793 net->ipv6.peers = NULL;
6794 inetpeer_invalidate_tree(bp);
6795 kfree(bp);
6796 }
6797
6798 static struct pernet_operations ipv6_inetpeer_ops = {
6799 .init = ipv6_inetpeer_init,
6800 .exit = ipv6_inetpeer_exit,
6801 };
6802
6803 static struct pernet_operations ip6_route_net_late_ops = {
6804 .init = ip6_route_net_init_late,
6805 .exit = ip6_route_net_exit_late,
6806 };
6807
6808 static struct notifier_block ip6_route_dev_notifier = {
6809 .notifier_call = ip6_route_dev_notify,
6810 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6811 };
6812
ip6_route_init_special_entries(void)6813 void __init ip6_route_init_special_entries(void)
6814 {
6815 /* Registering of the loopback is done before this portion of code,
6816 * the loopback reference in rt6_info will not be taken, do it
6817 * manually for init_net */
6818 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6819 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6820 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6821 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6822 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6823 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6824 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6825 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6826 #endif
6827 }
6828
6829 #if IS_BUILTIN(CONFIG_IPV6)
6830 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6831 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6832
6833 BTF_ID_LIST_SINGLE(btf_fib6_info_id, struct, fib6_info)
6834
6835 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6836 .seq_ops = &ipv6_route_seq_ops,
6837 .init_seq_private = bpf_iter_init_seq_net,
6838 .fini_seq_private = bpf_iter_fini_seq_net,
6839 .seq_priv_size = sizeof(struct ipv6_route_iter),
6840 };
6841
6842 static struct bpf_iter_reg ipv6_route_reg_info = {
6843 .target = "ipv6_route",
6844 .ctx_arg_info_size = 1,
6845 .ctx_arg_info = {
6846 { offsetof(struct bpf_iter__ipv6_route, rt),
6847 PTR_TO_BTF_ID_OR_NULL },
6848 },
6849 .seq_info = &ipv6_route_seq_info,
6850 };
6851
bpf_iter_register(void)6852 static int __init bpf_iter_register(void)
6853 {
6854 ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6855 return bpf_iter_reg_target(&ipv6_route_reg_info);
6856 }
6857
bpf_iter_unregister(void)6858 static void bpf_iter_unregister(void)
6859 {
6860 bpf_iter_unreg_target(&ipv6_route_reg_info);
6861 }
6862 #endif
6863 #endif
6864
6865 static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = {
6866 {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE,
6867 .doit = inet6_rtm_newroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6868 {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE,
6869 .doit = inet6_rtm_delroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6870 {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
6871 .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6872 };
6873
ip6_route_init(void)6874 int __init ip6_route_init(void)
6875 {
6876 int ret;
6877 int cpu;
6878
6879 ret = -ENOMEM;
6880 ip6_dst_ops_template.kmem_cachep =
6881 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6882 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6883 if (!ip6_dst_ops_template.kmem_cachep)
6884 goto out;
6885
6886 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6887 if (ret)
6888 goto out_kmem_cache;
6889
6890 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6891 if (ret)
6892 goto out_dst_entries;
6893
6894 ret = register_pernet_subsys(&ip6_route_net_ops);
6895 if (ret)
6896 goto out_register_inetpeer;
6897
6898 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6899
6900 ret = fib6_init();
6901 if (ret)
6902 goto out_register_subsys;
6903
6904 ret = xfrm6_init();
6905 if (ret)
6906 goto out_fib6_init;
6907
6908 ret = fib6_rules_init();
6909 if (ret)
6910 goto xfrm6_init;
6911
6912 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6913 if (ret)
6914 goto fib6_rules_init;
6915
6916 ret = rtnl_register_many(ip6_route_rtnl_msg_handlers);
6917 if (ret < 0)
6918 goto out_register_late_subsys;
6919
6920 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6921 if (ret)
6922 goto out_register_late_subsys;
6923
6924 #if IS_BUILTIN(CONFIG_IPV6)
6925 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6926 ret = bpf_iter_register();
6927 if (ret)
6928 goto out_register_late_subsys;
6929 #endif
6930 #endif
6931
6932 for_each_possible_cpu(cpu) {
6933 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6934
6935 INIT_LIST_HEAD(&ul->head);
6936 spin_lock_init(&ul->lock);
6937 }
6938
6939 out:
6940 return ret;
6941
6942 out_register_late_subsys:
6943 rtnl_unregister_all(PF_INET6);
6944 unregister_pernet_subsys(&ip6_route_net_late_ops);
6945 fib6_rules_init:
6946 fib6_rules_cleanup();
6947 xfrm6_init:
6948 xfrm6_fini();
6949 out_fib6_init:
6950 fib6_gc_cleanup();
6951 out_register_subsys:
6952 unregister_pernet_subsys(&ip6_route_net_ops);
6953 out_register_inetpeer:
6954 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6955 out_dst_entries:
6956 dst_entries_destroy(&ip6_dst_blackhole_ops);
6957 out_kmem_cache:
6958 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6959 goto out;
6960 }
6961
ip6_route_cleanup(void)6962 void ip6_route_cleanup(void)
6963 {
6964 #if IS_BUILTIN(CONFIG_IPV6)
6965 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6966 bpf_iter_unregister();
6967 #endif
6968 #endif
6969 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6970 unregister_pernet_subsys(&ip6_route_net_late_ops);
6971 fib6_rules_cleanup();
6972 xfrm6_fini();
6973 fib6_gc_cleanup();
6974 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6975 unregister_pernet_subsys(&ip6_route_net_ops);
6976 dst_entries_destroy(&ip6_dst_blackhole_ops);
6977 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6978 }
6979