Lines Matching full:rt
415 const struct rtable *rt = container_of(dst, struct rtable, dst);
421 if (likely(rt->rt_gw_family == AF_INET)) {
422 n = ip_neigh_gw4(dev, rt->rt_gw4);
423 } else if (rt->rt_gw_family == AF_INET6) {
424 n = ip_neigh_gw6(dev, &rt->rt_gw6);
442 const struct rtable *rt = container_of(dst, struct rtable, dst);
446 if (rt->rt_gw_family == AF_INET) {
447 pkey = (const __be32 *)&rt->rt_gw4;
448 } else if (rt->rt_gw_family == AF_INET6) {
449 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
451 (rt->rt_flags &
576 struct rtable *rt;
578 rt = rcu_dereference(fnhe->fnhe_rth_input);
579 if (rt) {
581 dst_dev_put(&rt->dst);
582 dst_release(&rt->dst);
584 rt = rcu_dereference(fnhe->fnhe_rth_output);
585 if (rt) {
587 dst_dev_put(&rt->dst);
588 dst_release(&rt->dst);
623 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
625 rt->rt_pmtu = fnhe->fnhe_pmtu;
626 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
627 rt->dst.expires = fnhe->fnhe_expires;
630 rt->rt_flags |= RTCF_REDIRECTED;
631 rt->rt_uses_gateway = 1;
632 rt->rt_gw_family = AF_INET;
633 rt->rt_gw4 = fnhe->fnhe_gw;
643 struct rtable *rt;
682 rt = rcu_dereference(fnhe->fnhe_rth_input);
683 if (rt)
684 fill_route_from_fnhe(rt, fnhe);
685 rt = rcu_dereference(fnhe->fnhe_rth_output);
686 if (rt)
687 fill_route_from_fnhe(rt, fnhe);
717 rt = rcu_dereference(nhc->nhc_rth_input);
718 if (rt)
719 rt->dst.obsolete = DST_OBSOLETE_KILL;
725 rt = rcu_dereference(*prt);
726 if (rt)
727 rt->dst.obsolete = DST_OBSOLETE_KILL;
737 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
759 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
782 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
784 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
799 rt->dst.obsolete = DST_OBSOLETE_KILL;
824 struct rtable *rt;
833 rt = dst_rtable(dst);
836 __ip_do_redirect(rt, skb, &fl4, true);
842 struct rtable *rt = dst_rtable(dst);
845 (rt->rt_flags & RTCF_REDIRECTED) ||
846 rt->dst.expires)
868 struct rtable *rt = skb_rtable(skb);
876 in_dev = __in_dev_get_rcu(rt->dst.dev);
882 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
884 net = dev_net(rt->dst.dev);
889 rt_nexthop(rt, ip_hdr(skb)->daddr));
916 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
933 struct rtable *rt = skb_rtable(skb);
955 net = dev_net(rt->dst.dev);
957 switch (rt->dst.error) {
971 switch (rt->dst.error) {
1012 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1014 struct dst_entry *dst = &rt->dst;
1034 if (rt->rt_pmtu == mtu && !lock &&
1066 struct rtable *rt = dst_rtable(dst);
1075 __ip_rt_update_pmtu(rt, &fl4, mtu);
1083 struct rtable *rt;
1088 rt = __ip_route_output_key(net, &fl4);
1089 if (!IS_ERR(rt)) {
1090 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 ip_rt_put(rt);
1100 struct rtable *rt;
1107 rt = __ip_route_output_key(sock_net(sk), &fl4);
1108 if (!IS_ERR(rt)) {
1109 __ip_rt_update_pmtu(rt, &fl4, mtu);
1110 ip_rt_put(rt);
1118 struct rtable *rt;
1137 rt = dst_rtable(odst);
1139 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1140 if (IS_ERR(rt))
1146 __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
1148 if (!dst_check(&rt->dst, 0)) {
1150 dst_release(&rt->dst);
1152 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1153 if (IS_ERR(rt))
1160 sk_dst_set(sk, &rt->dst);
1173 struct rtable *rt;
1176 rt = __ip_route_output_key(net, &fl4);
1177 if (!IS_ERR(rt)) {
1178 __ip_do_redirect(rt, skb, &fl4, false);
1179 ip_rt_put(rt);
1188 struct rtable *rt;
1192 rt = __ip_route_output_key(net, &fl4);
1193 if (!IS_ERR(rt)) {
1194 __ip_do_redirect(rt, skb, &fl4, false);
1195 ip_rt_put(rt);
1203 struct rtable *rt = dst_rtable(dst);
1213 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1251 struct rtable *rt;
1255 rt = skb_rtable(skb);
1256 if (rt)
1257 dst_set_expires(&rt->dst, 0);
1279 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1283 if (rt_is_output_route(rt))
1292 .flowi4_oif = rt->dst.dev->ifindex,
1298 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1299 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1301 src = inet_select_addr(rt->dst.dev,
1302 rt_nexthop(rt, iph->daddr),
1310 static void set_class_tag(struct rtable *rt, u32 tag)
1312 if (!(rt->dst.tclassid & 0xFFFF))
1313 rt->dst.tclassid |= tag & 0xFFFF;
1314 if (!(rt->dst.tclassid & 0xFFFF0000))
1315 rt->dst.tclassid |= tag & 0xFFFF0000;
1431 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1441 int genid = fnhe_genid(dev_net(rt->dst.dev));
1443 if (rt_is_input_route(rt))
1458 fill_route_from_fnhe(rt, fnhe);
1459 if (!rt->rt_gw4) {
1460 rt->rt_gw4 = daddr;
1461 rt->rt_gw_family = AF_INET;
1465 dst_hold(&rt->dst);
1466 rcu_assign_pointer(*porig, rt);
1481 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1486 if (rt_is_input_route(rt)) {
1496 dst_hold(&rt->dst);
1497 prev = cmpxchg(p, orig, rt);
1504 dst_release(&rt->dst);
1518 void rt_add_uncached_list(struct rtable *rt)
1522 rt->dst.rt_uncached_list = ul;
1525 list_add_tail(&rt->dst.rt_uncached, &ul->head);
1529 void rt_del_uncached_list(struct rtable *rt)
1531 if (!list_empty(&rt->dst.rt_uncached)) {
1532 struct uncached_list *ul = rt->dst.rt_uncached_list;
1535 list_del_init(&rt->dst.rt_uncached);
1548 struct rtable *rt, *safe;
1558 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1559 if (rt->dst.dev != dev)
1561 rt->dst.dev = blackhole_netdev;
1563 &rt->dst.dev_tracker, GFP_ATOMIC);
1564 list_del_init(&rt->dst.rt_uncached);
1570 static bool rt_cache_valid(const struct rtable *rt)
1572 return rt &&
1573 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1574 !rt_is_expired(rt);
1577 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1589 rt->rt_uses_gateway = 1;
1590 rt->rt_gw_family = nhc->nhc_gw_family;
1593 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1595 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1598 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1605 rt->dst.tclassid = nh->nh_tclassid;
1608 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1610 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1612 cached = rt_cache_route(nhc, rt);
1619 if (!rt->rt_gw4) {
1620 rt->rt_gw_family = AF_INET;
1621 rt->rt_gw4 = daddr;
1623 rt_add_uncached_list(rt);
1626 rt_add_uncached_list(rt);
1630 set_class_tag(rt, res->tclassid);
1632 set_class_tag(rt, itag);
1640 struct rtable *rt;
1642 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1645 if (rt) {
1646 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1647 rt->rt_flags = flags;
1648 rt->rt_type = type;
1649 rt->rt_is_input = 0;
1650 rt->rt_iif = 0;
1651 rt->rt_pmtu = 0;
1652 rt->rt_mtu_locked = 0;
1653 rt->rt_uses_gateway = 0;
1654 rt->rt_gw_family = 0;
1655 rt->rt_gw4 = 0;
1657 rt->dst.output = ip_output;
1659 rt->dst.input = ip_local_deliver;
1662 return rt;
1666 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1671 rt->dst.flags);
1675 new_rt->rt_flags = rt->rt_flags;
1676 new_rt->rt_type = rt->rt_type;
1677 new_rt->rt_is_input = rt->rt_is_input;
1678 new_rt->rt_iif = rt->rt_iif;
1679 new_rt->rt_pmtu = rt->rt_pmtu;
1680 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1681 new_rt->rt_gw_family = rt->rt_gw_family;
1682 if (rt->rt_gw_family == AF_INET)
1683 new_rt->rt_gw4 = rt->rt_gw4;
1684 else if (rt->rt_gw_family == AF_INET6)
1685 new_rt->rt_gw6 = rt->rt_gw6;
1687 new_rt->dst.input = rt->dst.input;
1688 new_rt->dst.output = rt->dst.output;
1689 new_rt->dst.error = rt->dst.error;
1691 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
2188 struct rtable *rt = skb_rtable(hint);
2210 if (rt->rt_type != RTN_LOCAL)
2884 struct rtable *rt;
2886 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2887 if (rt) {
2888 struct dst_entry *new = &rt->dst;
2897 rt->rt_is_input = ort->rt_is_input;
2898 rt->rt_iif = ort->rt_iif;
2899 rt->rt_pmtu = ort->rt_pmtu;
2900 rt->rt_mtu_locked = ort->rt_mtu_locked;
2902 rt->rt_genid = rt_genid_ipv4(net);
2903 rt->rt_flags = ort->rt_flags;
2904 rt->rt_type = ort->rt_type;
2905 rt->rt_uses_gateway = ort->rt_uses_gateway;
2906 rt->rt_gw_family = ort->rt_gw_family;
2907 if (rt->rt_gw_family == AF_INET)
2908 rt->rt_gw4 = ort->rt_gw4;
2909 else if (rt->rt_gw_family == AF_INET6)
2910 rt->rt_gw6 = ort->rt_gw6;
2915 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2921 struct rtable *rt = __ip_route_output_key(net, flp4);
2923 if (IS_ERR(rt))
2924 return rt;
2927 flp4->flowi4_oif = rt->dst.dev->ifindex;
2928 rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
2933 return rt;
2939 struct rtable *rt, u32 table_id, dscp_t dscp,
2961 r->rtm_type = rt->rt_type;
2964 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2965 if (rt->rt_flags & RTCF_NOTIFY)
2977 if (rt->dst.dev &&
2978 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2980 if (rt->dst.lwtstate &&
2981 lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2984 if (rt->dst.tclassid &&
2985 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2988 if (fl4 && !rt_is_input_route(rt) &&
2993 if (rt->rt_uses_gateway) {
2994 if (rt->rt_gw_family == AF_INET &&
2995 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2997 } else if (rt->rt_gw_family == AF_INET6) {
3008 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
3012 expires = rt->dst.expires;
3022 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3023 if (rt->rt_pmtu && expires)
3024 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
3025 if (rt->rt_mtu_locked && expires)
3041 if (rt_is_input_route(rt)) {
3062 error = rt->dst.error;
3064 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3087 struct rtable *rt;
3100 rt = rcu_dereference(fnhe->fnhe_rth_input);
3101 if (!rt)
3102 rt = rcu_dereference(fnhe->fnhe_rth_output);
3103 if (!rt)
3106 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3289 struct rtable *rt = NULL;
3362 rt = skb_rtable(skb);
3363 if (err == 0 && rt->dst.error)
3364 err = -rt->dst.error;
3368 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3370 if (IS_ERR(rt))
3371 err = PTR_ERR(rt);
3373 skb_dst_set(skb, &rt->dst);
3380 rt->rt_flags |= RTCF_NOTIFY;
3405 fri.type = rt->rt_type;
3431 err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4,