Lines Matching +full:array +full:- +full:nest

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
93 return !net->nexthop.notifier_chain.head; in nexthop_notifiers_is_empty()
100 nh_info->dev = nhi->fib_nhc.nhc_dev; in __nh_notifier_single_info_init()
101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family; in __nh_notifier_single_info_init()
102 if (nh_info->gw_family == AF_INET) in __nh_notifier_single_info_init()
103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4; in __nh_notifier_single_info_init()
104 else if (nh_info->gw_family == AF_INET6) in __nh_notifier_single_info_init()
105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6; in __nh_notifier_single_info_init()
107 nh_info->id = nhi->nh_parent->id; in __nh_notifier_single_info_init()
108 nh_info->is_reject = nhi->reject_nh; in __nh_notifier_single_info_init()
109 nh_info->is_fdb = nhi->fdb_nh; in __nh_notifier_single_info_init()
110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate; in __nh_notifier_single_info_init()
116 struct nh_info *nhi = rtnl_dereference(nh->nh_info); in nh_notifier_single_info_init()
118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE; in nh_notifier_single_info_init()
119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL); in nh_notifier_single_info_init()
120 if (!info->nh) in nh_notifier_single_info_init()
121 return -ENOMEM; in nh_notifier_single_info_init()
123 __nh_notifier_single_info_init(info->nh, nhi); in nh_notifier_single_info_init()
130 kfree(info->nh); in nh_notifier_single_info_fini()
136 u16 num_nh = nhg->num_nh; in nh_notifier_mpath_info_init()
139 info->type = NH_NOTIFIER_INFO_TYPE_GRP; in nh_notifier_mpath_info_init()
140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh), in nh_notifier_mpath_info_init()
142 if (!info->nh_grp) in nh_notifier_mpath_info_init()
143 return -ENOMEM; in nh_notifier_mpath_info_init()
145 info->nh_grp->num_nh = num_nh; in nh_notifier_mpath_info_init()
146 info->nh_grp->is_fdb = nhg->fdb_nh; in nh_notifier_mpath_info_init()
147 info->nh_grp->hw_stats = nhg->hw_stats; in nh_notifier_mpath_info_init()
150 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_notifier_mpath_info_init()
153 nhi = rtnl_dereference(nhge->nh->nh_info); in nh_notifier_mpath_info_init()
154 info->nh_grp->nh_entries[i].weight = nhge->weight; in nh_notifier_mpath_info_init()
155 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh, in nh_notifier_mpath_info_init()
165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); in nh_notifier_res_table_info_init()
166 u16 num_nh_buckets = res_table->num_nh_buckets; in nh_notifier_res_table_info_init()
170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE; in nh_notifier_res_table_info_init()
171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets); in nh_notifier_res_table_info_init()
172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | in nh_notifier_res_table_info_init()
174 if (!info->nh_res_table) in nh_notifier_res_table_info_init()
175 return -ENOMEM; in nh_notifier_res_table_info_init()
177 info->nh_res_table->num_nh_buckets = num_nh_buckets; in nh_notifier_res_table_info_init()
178 info->nh_res_table->hw_stats = nhg->hw_stats; in nh_notifier_res_table_info_init()
181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in nh_notifier_res_table_info_init()
185 nhge = rtnl_dereference(bucket->nh_entry); in nh_notifier_res_table_info_init()
186 nhi = rtnl_dereference(nhge->nh->nh_info); in nh_notifier_res_table_info_init()
187 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i], in nh_notifier_res_table_info_init()
197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nh_notifier_grp_info_init()
199 if (nhg->hash_threshold) in nh_notifier_grp_info_init()
201 else if (nhg->resilient) in nh_notifier_grp_info_init()
203 return -EINVAL; in nh_notifier_grp_info_init()
209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nh_notifier_grp_info_fini()
211 if (nhg->hash_threshold) in nh_notifier_grp_info_fini()
212 kfree(info->nh_grp); in nh_notifier_grp_info_fini()
213 else if (nhg->resilient) in nh_notifier_grp_info_fini()
214 vfree(info->nh_res_table); in nh_notifier_grp_info_fini()
220 info->id = nh->id; in nh_notifier_info_init()
222 if (nh->is_group) in nh_notifier_info_init()
231 if (nh->is_group) in nh_notifier_info_fini()
259 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, in call_nexthop_notifiers()
291 nh = nexthop_find_by_id(info->net, info->id); in nh_notifier_res_bucket_idle_timer_get()
293 err = -EINVAL; in nh_notifier_res_bucket_idle_timer_get()
297 nhg = rcu_dereference(nh->nh_grp); in nh_notifier_res_bucket_idle_timer_get()
298 res_table = rcu_dereference(nhg->res_table); in nh_notifier_res_bucket_idle_timer_get()
299 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer); in nh_notifier_res_bucket_idle_timer_get()
320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET; in nh_notifier_res_bucket_info_init()
321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket), in nh_notifier_res_bucket_info_init()
323 if (!info->nh_res_bucket) in nh_notifier_res_bucket_info_init()
324 return -ENOMEM; in nh_notifier_res_bucket_info_init()
326 info->nh_res_bucket->bucket_index = bucket_index; in nh_notifier_res_bucket_info_init()
327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms; in nh_notifier_res_bucket_info_init()
328 info->nh_res_bucket->force = force; in nh_notifier_res_bucket_info_init()
329 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi); in nh_notifier_res_bucket_info_init()
330 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi); in nh_notifier_res_bucket_info_init()
336 kfree(info->nh_res_bucket); in nh_notifier_res_bucket_info_fini()
360 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, in __call_nexthop_res_bucket_notifiers()
375 * maintain mutual exclusion. Since there are only two and well-known
379 * - Have the DW operate without locking;
380 * - synchronously cancel the DW;
381 * - do the writing;
382 * - if the write was not actually a delete, call upkeep, which schedules
397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info); in call_nexthop_res_bucket_notifiers()
398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info); in call_nexthop_res_bucket_notifiers()
410 .id = nh->id, in call_nexthop_res_table_notifiers()
424 nhg = rtnl_dereference(nh->nh_grp); in call_nexthop_res_table_notifiers()
431 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, in call_nexthop_res_table_notifiers()
454 err = nb->notifier_call(nb, event_type, &info); in call_nexthop_notifier()
462 unsigned int mask = NH_DEV_HASHSIZE - 1; in nh_dev_hashfn()
471 struct net_device *dev = nhi->fib_nhc.nhc_dev; in nexthop_devhash_add()
477 hash = nh_dev_hashfn(dev->ifindex); in nexthop_devhash_add()
478 head = &net->nexthop.devhash[hash]; in nexthop_devhash_add()
479 hlist_add_head(&nhi->dev_hash, head); in nexthop_devhash_add()
487 nhg = rcu_dereference_raw(nh->nh_grp); in nexthop_free_group()
488 for (i = 0; i < nhg->num_nh; ++i) { in nexthop_free_group()
489 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nexthop_free_group()
491 WARN_ON(!list_empty(&nhge->nh_list)); in nexthop_free_group()
492 free_percpu(nhge->stats); in nexthop_free_group()
493 nexthop_put(nhge->nh); in nexthop_free_group()
496 WARN_ON(nhg->spare == nhg); in nexthop_free_group()
498 if (nhg->resilient) in nexthop_free_group()
499 vfree(rcu_dereference_raw(nhg->res_table)); in nexthop_free_group()
501 kfree(nhg->spare); in nexthop_free_group()
509 nhi = rcu_dereference_raw(nh->nh_info); in nexthop_free_single()
510 switch (nhi->family) { in nexthop_free_single()
512 fib_nh_release(nh->net, &nhi->fib_nh); in nexthop_free_single()
515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh); in nexthop_free_single()
525 if (nh->is_group) in nexthop_free_rcu()
540 INIT_LIST_HEAD(&nh->fi_list); in nexthop_alloc()
541 INIT_LIST_HEAD(&nh->f6i_list); in nexthop_alloc()
542 INIT_LIST_HEAD(&nh->grp_list); in nexthop_alloc()
543 INIT_LIST_HEAD(&nh->fdb_list); in nexthop_alloc()
554 nhg->num_nh = num_nh; in nexthop_grp_alloc()
564 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets; in nexthop_res_table_alloc()
573 res_table->net = net; in nexthop_res_table_alloc()
574 res_table->nhg_id = nhg_id; in nexthop_res_table_alloc()
575 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw); in nexthop_res_table_alloc()
576 INIT_LIST_HEAD(&res_table->uw_nh_entries); in nexthop_res_table_alloc()
577 res_table->idle_timer = cfg->nh_grp_res_idle_timer; in nexthop_res_table_alloc()
578 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer; in nexthop_res_table_alloc()
579 res_table->num_nh_buckets = num_nh_buckets; in nexthop_res_table_alloc()
585 while (++net->nexthop.seq == 0) in nh_base_seq_inc()
594 pp = &net->nexthop.rb_root.rb_node; in nexthop_find_by_id()
604 if (id < nh->id) in nexthop_find_by_id()
605 pp = &next->rb_left; in nexthop_find_by_id()
606 else if (id > nh->id) in nexthop_find_by_id()
607 pp = &next->rb_right; in nexthop_find_by_id()
618 u32 id_start = net->nexthop.last_id_allocated; in nh_find_unused_id()
621 net->nexthop.last_id_allocated++; in nh_find_unused_id()
622 if (net->nexthop.last_id_allocated == id_start) in nh_find_unused_id()
625 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated)) in nh_find_unused_id()
626 return net->nexthop.last_id_allocated; in nh_find_unused_id()
640 if (list_empty(&res_table->uw_nh_entries)) in nh_res_table_unbalanced_time()
642 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since); in nh_res_table_unbalanced_time()
647 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table); in nla_put_nh_group_res()
648 struct nlattr *nest; in nla_put_nh_group_res() local
650 nest = nla_nest_start(skb, NHA_RES_GROUP); in nla_put_nh_group_res()
651 if (!nest) in nla_put_nh_group_res()
652 return -EMSGSIZE; in nla_put_nh_group_res()
655 res_table->num_nh_buckets) || in nla_put_nh_group_res()
657 jiffies_to_clock_t(res_table->idle_timer)) || in nla_put_nh_group_res()
659 jiffies_to_clock_t(res_table->unbalanced_timer)) || in nla_put_nh_group_res()
665 nla_nest_end(skb, nest); in nla_put_nh_group_res()
669 nla_nest_cancel(skb, nest); in nla_put_nh_group_res()
670 return -EMSGSIZE; in nla_put_nh_group_res()
677 cpu_stats = get_cpu_ptr(nhge->stats); in nh_grp_entry_stats_inc()
678 u64_stats_update_begin(&cpu_stats->syncp); in nh_grp_entry_stats_inc()
679 u64_stats_inc(&cpu_stats->packets); in nh_grp_entry_stats_inc()
680 u64_stats_update_end(&cpu_stats->syncp); in nh_grp_entry_stats_inc()
696 cpu_stats = per_cpu_ptr(nhge->stats, i); in nh_grp_entry_stats_read()
698 start = u64_stats_fetch_begin(&cpu_stats->syncp); in nh_grp_entry_stats_read()
699 packets = u64_stats_read(&cpu_stats->packets); in nh_grp_entry_stats_read()
700 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); in nh_grp_entry_stats_read()
713 nhg = rtnl_dereference(nh->nh_grp); in nh_notifier_grp_hw_stats_init()
715 info->id = nh->id; in nh_notifier_grp_hw_stats_init()
716 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS; in nh_notifier_grp_hw_stats_init()
717 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats, in nh_notifier_grp_hw_stats_init()
718 stats, nhg->num_nh), in nh_notifier_grp_hw_stats_init()
720 if (!info->nh_grp_hw_stats) in nh_notifier_grp_hw_stats_init()
721 return -ENOMEM; in nh_notifier_grp_hw_stats_init()
723 info->nh_grp_hw_stats->num_nh = nhg->num_nh; in nh_notifier_grp_hw_stats_init()
724 for (i = 0; i < nhg->num_nh; i++) { in nh_notifier_grp_hw_stats_init()
725 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_notifier_grp_hw_stats_init()
727 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id; in nh_notifier_grp_hw_stats_init()
735 kfree(info->nh_grp_hw_stats); in nh_notifier_grp_hw_stats_fini()
742 info->hw_stats_used = true; in nh_grp_hw_stats_report_delta()
743 info->stats[nh_idx].packets += delta_packets; in nh_grp_hw_stats_report_delta()
754 nhg = rtnl_dereference(nh->nh_grp); in nh_grp_hw_stats_apply_update()
756 for (i = 0; i < nhg->num_nh; i++) { in nh_grp_hw_stats_apply_update()
757 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_grp_hw_stats_apply_update()
759 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets; in nh_grp_hw_stats_apply_update()
766 .net = nh->net, in nh_grp_hw_stats_update()
768 struct net *net = nh->net; in nh_grp_hw_stats_update()
780 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain, in nh_grp_hw_stats_update()
788 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used; in nh_grp_hw_stats_update()
798 struct nlattr *nest; in nla_put_nh_group_stats_entry() local
803 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY); in nla_put_nh_group_stats_entry()
804 if (!nest) in nla_put_nh_group_stats_entry()
805 return -EMSGSIZE; in nla_put_nh_group_stats_entry()
807 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) || in nla_put_nh_group_stats_entry()
809 packets + nhge->packets_hw)) in nla_put_nh_group_stats_entry()
814 nhge->packets_hw)) in nla_put_nh_group_stats_entry()
817 nla_nest_end(skb, nest); in nla_put_nh_group_stats_entry()
821 nla_nest_cancel(skb, nest); in nla_put_nh_group_stats_entry()
822 return -EMSGSIZE; in nla_put_nh_group_stats_entry()
828 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nla_put_nh_group_stats()
829 struct nlattr *nest; in nla_put_nh_group_stats() local
834 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats)) in nla_put_nh_group_stats()
838 nhg->hw_stats) { in nla_put_nh_group_stats()
847 nest = nla_nest_start(skb, NHA_GROUP_STATS); in nla_put_nh_group_stats()
848 if (!nest) in nla_put_nh_group_stats()
851 for (i = 0; i < nhg->num_nh; i++) in nla_put_nh_group_stats()
852 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i], in nla_put_nh_group_stats()
856 nla_nest_end(skb, nest); in nla_put_nh_group_stats()
860 nla_nest_cancel(skb, nest); in nla_put_nh_group_stats()
862 err = -EMSGSIZE; in nla_put_nh_group_stats()
870 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nla_put_nh_group()
872 size_t len = nhg->num_nh * sizeof(*p); in nla_put_nh_group()
880 if (nhg->hash_threshold) in nla_put_nh_group()
882 else if (nhg->resilient) in nla_put_nh_group()
893 for (i = 0; i < nhg->num_nh; ++i) { in nla_put_nh_group()
894 weight = nhg->nh_entries[i].weight - 1; in nla_put_nh_group()
897 .id = nhg->nh_entries[i].nh->id, in nla_put_nh_group()
903 if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) in nla_put_nh_group()
907 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || in nla_put_nh_group()
914 return -EMSGSIZE; in nla_put_nh_group()
929 return -EMSGSIZE; in nh_fill_node()
932 nhm->nh_family = AF_UNSPEC; in nh_fill_node()
933 nhm->nh_flags = nh->nh_flags; in nh_fill_node()
934 nhm->nh_protocol = nh->protocol; in nh_fill_node()
935 nhm->nh_scope = 0; in nh_fill_node()
936 nhm->resvd = 0; in nh_fill_node()
938 if (nla_put_u32(skb, NHA_ID, nh->id)) in nh_fill_node()
941 if (nh->is_group) { in nh_fill_node()
942 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nh_fill_node()
945 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) in nh_fill_node()
953 nhi = rtnl_dereference(nh->nh_info); in nh_fill_node()
954 nhm->nh_family = nhi->family; in nh_fill_node()
955 if (nhi->reject_nh) { in nh_fill_node()
959 } else if (nhi->fdb_nh) { in nh_fill_node()
965 dev = nhi->fib_nhc.nhc_dev; in nh_fill_node()
966 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex)) in nh_fill_node()
970 nhm->nh_scope = nhi->fib_nhc.nhc_scope; in nh_fill_node()
971 switch (nhi->family) { in nh_fill_node()
973 fib_nh = &nhi->fib_nh; in nh_fill_node()
974 if (fib_nh->fib_nh_gw_family && in nh_fill_node()
975 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4)) in nh_fill_node()
980 fib6_nh = &nhi->fib6_nh; in nh_fill_node()
981 if (fib6_nh->fib_nh_gw_family && in nh_fill_node()
982 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6)) in nh_fill_node()
987 if (nhi->fib_nhc.nhc_lwtstate && in nh_fill_node()
988 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate, in nh_fill_node()
998 return -EMSGSIZE; in nh_fill_node()
1012 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in nh_nlmsg_size_grp()
1013 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; in nh_nlmsg_size_grp()
1017 if (nhg->resilient) in nh_nlmsg_size_grp()
1025 struct nh_info *nhi = rtnl_dereference(nh->nh_info); in nh_nlmsg_size_single()
1033 switch (nhi->family) { in nh_nlmsg_size_single()
1035 if (nhi->fib_nh.fib_nh_gw_family) in nh_nlmsg_size_single()
1041 if (nhi->fib6_nh.fib_nh_gw_family) in nh_nlmsg_size_single()
1046 if (nhi->fib_nhc.nhc_lwtstate) { in nh_nlmsg_size_single()
1047 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate); in nh_nlmsg_size_single()
1060 if (nh->is_group) in nh_nlmsg_size()
1072 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0; in nexthop_notify()
1073 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; in nexthop_notify()
1075 int err = -ENOBUFS; in nexthop_notify()
1081 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0); in nexthop_notify()
1083 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */ in nexthop_notify()
1084 WARN_ON(err == -EMSGSIZE); in nexthop_notify()
1089 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP, in nexthop_notify()
1090 info->nlh, gfp_any()); in nexthop_notify()
1093 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err); in nexthop_notify()
1098 return (unsigned long)atomic_long_read(&bucket->used_time); in nh_res_bucket_used_time()
1109 if (time == bucket->migrated_time) in nh_res_bucket_idle_point()
1112 return time + res_table->idle_timer; in nh_res_bucket_idle_point()
1118 return res_table->unbalanced_since + res_table->unbalanced_timer; in nh_res_table_unb_point()
1126 atomic_long_set(&bucket->used_time, (long)now); in nh_res_bucket_set_idle()
1127 bucket->migrated_time = now; in nh_res_bucket_set_idle()
1132 atomic_long_set(&bucket->used_time, (long)jiffies); in nh_res_bucket_set_busy()
1139 return jiffies_delta_to_clock_t(jiffies - used_time); in nh_res_bucket_idle_time()
1148 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); in nh_fill_res_bucket()
1150 struct nlattr *nest; in nh_fill_res_bucket() local
1155 return -EMSGSIZE; in nh_fill_res_bucket()
1158 nhm->nh_family = AF_UNSPEC; in nh_fill_res_bucket()
1159 nhm->nh_flags = bucket->nh_flags; in nh_fill_res_bucket()
1160 nhm->nh_protocol = nh->protocol; in nh_fill_res_bucket()
1161 nhm->nh_scope = 0; in nh_fill_res_bucket()
1162 nhm->resvd = 0; in nh_fill_res_bucket()
1164 if (nla_put_u32(skb, NHA_ID, nh->id)) in nh_fill_res_bucket()
1167 nest = nla_nest_start(skb, NHA_RES_BUCKET); in nh_fill_res_bucket()
1168 if (!nest) in nh_fill_res_bucket()
1172 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) || in nh_fill_res_bucket()
1178 nla_nest_end(skb, nest); in nh_fill_res_bucket()
1183 nla_nest_cancel(skb, nest); in nh_fill_res_bucket()
1186 return -EMSGSIZE; in nh_fill_res_bucket()
1192 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; in nexthop_bucket_notify()
1193 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry); in nexthop_bucket_notify()
1194 struct nexthop *nh = nhge->nh_parent; in nexthop_bucket_notify()
1196 int err = -ENOBUFS; in nexthop_bucket_notify()
1210 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL); in nexthop_bucket_notify()
1213 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err); in nexthop_bucket_notify()
1219 if (nh->is_group) { in valid_group_nh()
1220 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); in valid_group_nh()
1223 if (nhg->hash_threshold) { in valid_group_nh()
1225 "Hash-threshold group can not be a nexthop within a group"); in valid_group_nh()
1228 if (nhg->resilient) { in valid_group_nh()
1233 *is_fdb = nhg->fdb_nh; in valid_group_nh()
1235 struct nh_info *nhi = rtnl_dereference(nh->nh_info); in valid_group_nh()
1237 if (nhi->reject_nh && npaths > 1) { in valid_group_nh()
1242 *is_fdb = nhi->fdb_nh; in valid_group_nh()
1253 nhi = rtnl_dereference(nh->nh_info); in nh_check_attr_fdb_group()
1255 if (!nhi->fdb_nh) { in nh_check_attr_fdb_group()
1257 return -EINVAL; in nh_check_attr_fdb_group()
1261 *nh_family = nhi->family; in nh_check_attr_fdb_group()
1262 } else if (*nh_family != nhi->family) { in nh_check_attr_fdb_group()
1264 return -EINVAL; in nh_check_attr_fdb_group()
1280 if (!len || len & (sizeof(struct nexthop_grp) - 1)) { in nh_check_attr_group()
1283 return -EINVAL; in nh_check_attr_group()
1293 return -EINVAL; in nh_check_attr_group()
1300 return -EINVAL; in nh_check_attr_group()
1305 return -EINVAL; in nh_check_attr_group()
1320 return -EINVAL; in nh_check_attr_group()
1323 return -EINVAL; in nh_check_attr_group()
1326 return -EINVAL; in nh_check_attr_group()
1330 return -EINVAL; in nh_check_attr_group()
1347 return -EINVAL; in nh_check_attr_group()
1360 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); in ipv6_good_nh()
1362 state = READ_ONCE(n->nud_state); in ipv6_good_nh()
1376 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, in ipv4_good_nh()
1377 (__force u32)nh->fib_nh_gw4); in ipv4_good_nh()
1379 state = READ_ONCE(n->nud_state); in ipv4_good_nh()
1388 struct nh_info *nhi = rcu_dereference(nh->nh_info); in nexthop_is_good_nh()
1390 switch (nhi->family) { in nexthop_is_good_nh()
1392 return ipv4_good_nh(&nhi->fib_nh); in nexthop_is_good_nh()
1394 return ipv6_good_nh(&nhi->fib6_nh); in nexthop_is_good_nh()
1404 for (i = 0; i < nhg->num_nh; i++) { in nexthop_select_path_fdb()
1405 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nexthop_select_path_fdb()
1407 if (hash > atomic_read(&nhge->hthr.upper_bound)) in nexthop_select_path_fdb()
1411 return nhge->nh; in nexthop_select_path_fdb()
1423 if (nhg->fdb_nh) in nexthop_select_path_hthr()
1426 for (i = 0; i < nhg->num_nh; ++i) { in nexthop_select_path_hthr()
1427 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nexthop_select_path_hthr()
1432 if (!nexthop_is_good_nh(nhge->nh)) in nexthop_select_path_hthr()
1438 if (hash > atomic_read(&nhge->hthr.upper_bound)) in nexthop_select_path_hthr()
1442 return nhge->nh; in nexthop_select_path_hthr()
1446 nhge0 = &nhg->nh_entries[0]; in nexthop_select_path_hthr()
1448 return nhge0->nh; in nexthop_select_path_hthr()
1453 struct nh_res_table *res_table = rcu_dereference(nhg->res_table); in nexthop_select_path_res()
1454 u16 bucket_index = hash % res_table->num_nh_buckets; in nexthop_select_path_res()
1458 /* nexthop_select_path() is expected to return a non-NULL value, so in nexthop_select_path_res()
1461 bucket = &res_table->nh_buckets[bucket_index]; in nexthop_select_path_res()
1463 nhge = rcu_dereference(bucket->nh_entry); in nexthop_select_path_res()
1465 return nhge->nh; in nexthop_select_path_res()
1472 if (!nh->is_group) in nexthop_select_path()
1475 nhg = rcu_dereference(nh->nh_grp); in nexthop_select_path()
1476 if (nhg->hash_threshold) in nexthop_select_path()
1478 else if (nhg->resilient) in nexthop_select_path()
1493 if (nh->is_group) { in nexthop_for_each_fib6_nh()
1497 nhg = rcu_dereference_rtnl(nh->nh_grp); in nexthop_for_each_fib6_nh()
1498 for (i = 0; i < nhg->num_nh; i++) { in nexthop_for_each_fib6_nh()
1499 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nexthop_for_each_fib6_nh()
1501 nhi = rcu_dereference_rtnl(nhge->nh->nh_info); in nexthop_for_each_fib6_nh()
1502 err = cb(&nhi->fib6_nh, arg); in nexthop_for_each_fib6_nh()
1507 nhi = rcu_dereference_rtnl(nh->nh_info); in nexthop_for_each_fib6_nh()
1508 err = cb(&nhi->fib6_nh, arg); in nexthop_for_each_fib6_nh()
1522 return -EINVAL; in check_src_addr()
1539 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0) in fib6_check_nexthop()
1540 return -EINVAL; in fib6_check_nexthop()
1542 if (nh->is_group) { in fib6_check_nexthop()
1545 nhg = rtnl_dereference(nh->nh_grp); in fib6_check_nexthop()
1546 if (nhg->has_v4) in fib6_check_nexthop()
1548 is_fdb_nh = nhg->fdb_nh; in fib6_check_nexthop()
1550 nhi = rtnl_dereference(nh->nh_info); in fib6_check_nexthop()
1551 if (nhi->family == AF_INET) in fib6_check_nexthop()
1553 is_fdb_nh = nhi->fdb_nh; in fib6_check_nexthop()
1558 return -EINVAL; in fib6_check_nexthop()
1564 return -EINVAL; in fib6_check_nexthop()
1576 if (list_empty(&old->f6i_list)) in fib6_check_nh_list()
1579 list_for_each_entry(f6i, &old->f6i_list, nh_list) { in fib6_check_nh_list()
1580 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0) in fib6_check_nh_list()
1581 return -EINVAL; in fib6_check_nh_list()
1590 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) { in nexthop_check_scope()
1593 return -EINVAL; in nexthop_check_scope()
1596 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) { in nexthop_check_scope()
1598 return -EINVAL; in nexthop_check_scope()
1614 if (nh->is_group) { in fib_check_nexthop()
1617 nhg = rtnl_dereference(nh->nh_grp); in fib_check_nexthop()
1618 if (nhg->fdb_nh) { in fib_check_nexthop()
1620 err = -EINVAL; in fib_check_nexthop()
1626 err = -EINVAL; in fib_check_nexthop()
1631 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info); in fib_check_nexthop()
1634 nhi = rtnl_dereference(nh->nh_info); in fib_check_nexthop()
1635 if (nhi->fdb_nh) { in fib_check_nexthop()
1637 err = -EINVAL; in fib_check_nexthop()
1652 list_for_each_entry(fi, &old->fi_list, nh_list) { in fib_check_nh_list()
1655 err = fib_check_nexthop(new, fi->fib_scope, extack); in fib_check_nh_list()
1664 return nhge->res.count_buckets == nhge->res.wants_buckets; in nh_res_nhge_is_balanced()
1669 return nhge->res.count_buckets > nhge->res.wants_buckets; in nh_res_nhge_is_ow()
1674 return nhge->res.count_buckets < nhge->res.wants_buckets; in nh_res_nhge_is_uw()
1679 return list_empty(&res_table->uw_nh_entries); in nh_res_table_is_balanced()
1686 if (bucket->occupied) { in nh_res_bucket_unset_nh()
1687 nhge = nh_res_dereference(bucket->nh_entry); in nh_res_bucket_unset_nh()
1688 nhge->res.count_buckets--; in nh_res_bucket_unset_nh()
1689 bucket->occupied = false; in nh_res_bucket_unset_nh()
1698 bucket->occupied = true; in nh_res_bucket_set_nh()
1699 rcu_assign_pointer(bucket->nh_entry, nhge); in nh_res_bucket_set_nh()
1700 nhge->res.count_buckets++; in nh_res_bucket_set_nh()
1711 if (!bucket->occupied) { in nh_res_bucket_should_migrate()
1719 nhge = nh_res_dereference(bucket->nh_entry); in nh_res_bucket_should_migrate()
1740 if (res_table->unbalanced_timer) { in nh_res_bucket_should_migrate()
1765 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index]; in nh_res_bucket_migrate()
1770 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries, in nh_res_bucket_migrate()
1784 old_nhge = nh_res_dereference(bucket->nh_entry); in nh_res_bucket_migrate()
1785 err = call_nexthop_res_bucket_notifiers(res_table->net, in nh_res_bucket_migrate()
1786 res_table->nhg_id, in nh_res_bucket_migrate()
1788 old_nhge->nh, in nh_res_bucket_migrate()
1789 new_nhge->nh, &extack); in nh_res_bucket_migrate()
1799 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); in nh_res_bucket_migrate()
1810 list_del(&new_nhge->res.uw_nh_entry); in nh_res_bucket_migrate()
1830 if (res_table->unbalanced_timer) in nh_res_table_upkeep()
1831 deadline = now + res_table->unbalanced_timer; in nh_res_table_upkeep()
1833 deadline = now + res_table->idle_timer; in nh_res_table_upkeep()
1835 for (i = 0; i < res_table->num_nh_buckets; i++) { in nh_res_table_upkeep()
1836 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in nh_res_table_upkeep()
1873 &res_table->upkeep_dw, deadline - now); in nh_res_table_upkeep()
1888 cancel_delayed_work_sync(&res_table->upkeep_dw); in nh_res_table_cancel_upkeep()
1899 INIT_LIST_HEAD(&res_table->uw_nh_entries); in nh_res_group_rebalance()
1901 for (i = 0; i < nhg->num_nh; ++i) in nh_res_group_rebalance()
1902 total += nhg->nh_entries[i].weight; in nh_res_group_rebalance()
1904 for (i = 0; i < nhg->num_nh; ++i) { in nh_res_group_rebalance()
1905 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_res_group_rebalance()
1909 w += nhge->weight; in nh_res_group_rebalance()
1910 btw = ((u64)res_table->num_nh_buckets) * w; in nh_res_group_rebalance()
1912 nhge->res.wants_buckets = upper_bound - prev_upper_bound; in nh_res_group_rebalance()
1916 if (list_empty(&res_table->uw_nh_entries)) in nh_res_group_rebalance()
1917 res_table->unbalanced_since = jiffies; in nh_res_group_rebalance()
1918 list_add(&nhge->res.uw_nh_entry, in nh_res_group_rebalance()
1919 &res_table->uw_nh_entries); in nh_res_group_rebalance()
1933 for (i = 0; i < res_table->num_nh_buckets; i++) { in nh_res_table_migrate_buckets()
1934 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in nh_res_table_migrate_buckets()
1935 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id; in nh_res_table_migrate_buckets()
1939 for (j = 0; j < nhg->num_nh; j++) { in nh_res_table_migrate_buckets()
1940 struct nh_grp_entry *nhge = &nhg->nh_entries[j]; in nh_res_table_migrate_buckets()
1942 if (nhge->nh->id == id) { in nh_res_table_migrate_buckets()
1961 * with oldg->res_table. in replace_nexthop_grp_res()
1963 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table); in replace_nexthop_grp_res()
1964 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since; in replace_nexthop_grp_res()
1965 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries); in replace_nexthop_grp_res()
1970 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries)) in replace_nexthop_grp_res()
1971 old_res_table->unbalanced_since = prev_unbalanced_since; in replace_nexthop_grp_res()
1981 for (i = 0; i < nhg->num_nh; ++i) in nh_hthr_group_rebalance()
1982 total += nhg->nh_entries[i].weight; in nh_hthr_group_rebalance()
1984 for (i = 0; i < nhg->num_nh; ++i) { in nh_hthr_group_rebalance()
1985 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_hthr_group_rebalance()
1988 w += nhge->weight; in nh_hthr_group_rebalance()
1989 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; in nh_hthr_group_rebalance()
1990 atomic_set(&nhge->hthr.upper_bound, upper_bound); in nh_hthr_group_rebalance()
1998 struct nexthop *nhp = nhge->nh_parent; in remove_nh_grp_entry()
2000 struct nexthop *nh = nhge->nh; in remove_nh_grp_entry()
2006 nhg = rtnl_dereference(nhp->nh_grp); in remove_nh_grp_entry()
2007 newg = nhg->spare; in remove_nh_grp_entry()
2010 if (nhg->num_nh == 1) { in remove_nh_grp_entry()
2015 newg->has_v4 = false; in remove_nh_grp_entry()
2016 newg->is_multipath = nhg->is_multipath; in remove_nh_grp_entry()
2017 newg->hash_threshold = nhg->hash_threshold; in remove_nh_grp_entry()
2018 newg->resilient = nhg->resilient; in remove_nh_grp_entry()
2019 newg->fdb_nh = nhg->fdb_nh; in remove_nh_grp_entry()
2020 newg->num_nh = nhg->num_nh; in remove_nh_grp_entry()
2023 nhges = nhg->nh_entries; in remove_nh_grp_entry()
2024 new_nhges = newg->nh_entries; in remove_nh_grp_entry()
2025 for (i = 0, j = 0; i < nhg->num_nh; ++i) { in remove_nh_grp_entry()
2029 if (nhg->nh_entries[i].nh == nh) { in remove_nh_grp_entry()
2030 newg->num_nh--; in remove_nh_grp_entry()
2034 nhi = rtnl_dereference(nhges[i].nh->nh_info); in remove_nh_grp_entry()
2035 if (nhi->family == AF_INET) in remove_nh_grp_entry()
2036 newg->has_v4 = true; in remove_nh_grp_entry()
2043 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); in remove_nh_grp_entry()
2047 if (newg->hash_threshold) in remove_nh_grp_entry()
2049 else if (newg->resilient) in remove_nh_grp_entry()
2052 rcu_assign_pointer(nhp->nh_grp, newg); in remove_nh_grp_entry()
2054 list_del(&nhge->nh_list); in remove_nh_grp_entry()
2055 free_percpu(nhge->stats); in remove_nh_grp_entry()
2056 nexthop_put(nhge->nh); in remove_nh_grp_entry()
2061 if (newg->hash_threshold) { in remove_nh_grp_entry()
2077 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) in remove_nexthop_from_groups()
2080 /* make sure all see the newly published array before releasing rtnl */ in remove_nexthop_from_groups()
2086 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); in remove_nexthop_group()
2088 int i, num_nh = nhg->num_nh; in remove_nexthop_group()
2091 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in remove_nexthop_group()
2093 if (WARN_ON(!nhge->nh)) in remove_nexthop_group()
2096 list_del_init(&nhge->nh_list); in remove_nexthop_group()
2099 if (nhg->resilient) { in remove_nexthop_group()
2100 res_table = rtnl_dereference(nhg->res_table); in remove_nexthop_group()
2112 list_for_each_entry(fi, &nh->fi_list, nh_list) { in __remove_nexthop_fib()
2113 fi->fib_flags |= RTNH_F_DEAD; in __remove_nexthop_fib()
2120 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) { in __remove_nexthop_fib()
2123 ipv6_stub->ip6_del_rt(net, f6i, in __remove_nexthop_fib()
2124 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); in __remove_nexthop_fib()
2133 if (nh->is_group) { in __remove_nexthop()
2138 nhi = rtnl_dereference(nh->nh_info); in __remove_nexthop()
2139 if (nhi->fib_nhc.nhc_dev) in __remove_nexthop()
2140 hlist_del(&nhi->dev_hash); in __remove_nexthop()
2152 rb_erase(&nh->rb_node, &net->nexthop.rb_root); in remove_nexthop()
2173 if (!list_empty(&nh->fi_list)) in nh_rt_cache_flush()
2176 list_for_each_entry(f6i, &nh->f6i_list, nh_list) in nh_rt_cache_flush()
2177 ipv6_stub->fib6_update_sernum(net, f6i); in nh_rt_cache_flush()
2182 if (!replaced_nh->is_group) in nh_rt_cache_flush()
2185 nhg = rtnl_dereference(replaced_nh->nh_grp); in nh_rt_cache_flush()
2186 for (i = 0; i < nhg->num_nh; i++) { in nh_rt_cache_flush()
2187 struct nh_grp_entry *nhge = &nhg->nh_entries[i]; in nh_rt_cache_flush()
2188 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); in nh_rt_cache_flush()
2190 if (nhi->family == AF_INET6) in nh_rt_cache_flush()
2191 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); in nh_rt_cache_flush()
2205 if (!new->is_group) { in replace_nexthop_grp()
2207 return -EINVAL; in replace_nexthop_grp()
2210 oldg = rtnl_dereference(old->nh_grp); in replace_nexthop_grp()
2211 newg = rtnl_dereference(new->nh_grp); in replace_nexthop_grp()
2213 if (newg->hash_threshold != oldg->hash_threshold) { in replace_nexthop_grp()
2215 return -EINVAL; in replace_nexthop_grp()
2218 if (newg->hash_threshold) { in replace_nexthop_grp()
2223 } else if (newg->resilient) { in replace_nexthop_grp()
2224 new_res_table = rtnl_dereference(newg->res_table); in replace_nexthop_grp()
2225 old_res_table = rtnl_dereference(oldg->res_table); in replace_nexthop_grp()
2230 if (cfg->nh_grp_res_has_num_buckets && in replace_nexthop_grp()
2231 cfg->nh_grp_res_num_buckets != in replace_nexthop_grp()
2232 old_res_table->num_nh_buckets) { in replace_nexthop_grp()
2234 return -EINVAL; in replace_nexthop_grp()
2237 /* Emit a pre-replace notification so that listeners could veto in replace_nexthop_grp()
2247 if (cfg->nh_grp_res_has_idle_timer) in replace_nexthop_grp()
2248 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer; in replace_nexthop_grp()
2249 if (cfg->nh_grp_res_has_unbalanced_timer) in replace_nexthop_grp()
2250 old_res_table->unbalanced_timer = in replace_nexthop_grp()
2251 cfg->nh_grp_res_unbalanced_timer; in replace_nexthop_grp()
2256 rcu_assign_pointer(newg->res_table, old_res_table); in replace_nexthop_grp()
2257 rcu_assign_pointer(newg->spare->res_table, old_res_table); in replace_nexthop_grp()
2260 /* update parents - used by nexthop code for cleanup */ in replace_nexthop_grp()
2261 for (i = 0; i < newg->num_nh; i++) in replace_nexthop_grp()
2262 newg->nh_entries[i].nh_parent = old; in replace_nexthop_grp()
2264 rcu_assign_pointer(old->nh_grp, newg); in replace_nexthop_grp()
2269 if (newg->resilient) { in replace_nexthop_grp()
2270 rcu_assign_pointer(oldg->res_table, tmp_table); in replace_nexthop_grp()
2271 rcu_assign_pointer(oldg->spare->res_table, tmp_table); in replace_nexthop_grp()
2274 for (i = 0; i < oldg->num_nh; i++) in replace_nexthop_grp()
2275 oldg->nh_entries[i].nh_parent = new; in replace_nexthop_grp()
2277 rcu_assign_pointer(new->nh_grp, oldg); in replace_nexthop_grp()
2288 nhges = nhg->nh_entries; in nh_group_v4_update()
2289 for (i = 0; i < nhg->num_nh; i++) { in nh_group_v4_update()
2292 nhi = rtnl_dereference(nhges[i].nh->nh_info); in nh_group_v4_update()
2293 if (nhi->family == AF_INET) in nh_group_v4_update()
2296 nhg->has_v4 = has_v4; in nh_group_v4_update()
2306 u32 nhg_id = res_table->nhg_id; in replace_nexthop_single_notify_res()
2310 for (i = 0; i < res_table->num_nh_buckets; i++) { in replace_nexthop_single_notify_res()
2311 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in replace_nexthop_single_notify_res()
2314 nhge = rtnl_dereference(bucket->nh_entry); in replace_nexthop_single_notify_res()
2315 if (nhge->nh == old) { in replace_nexthop_single_notify_res()
2328 while (i-- > 0) { in replace_nexthop_single_notify_res()
2329 struct nh_res_bucket *bucket = &res_table->nh_buckets[i]; in replace_nexthop_single_notify_res()
2332 nhge = rtnl_dereference(bucket->nh_entry); in replace_nexthop_single_notify_res()
2333 if (nhge->nh == old) in replace_nexthop_single_notify_res()
2348 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp); in replace_nexthop_single_notify()
2351 if (nhg->hash_threshold) { in replace_nexthop_single_notify()
2354 } else if (nhg->resilient) { in replace_nexthop_single_notify()
2355 res_table = rtnl_dereference(nhg->res_table); in replace_nexthop_single_notify()
2361 return -EINVAL; in replace_nexthop_single_notify()
2373 if (new->is_group) { in replace_nexthop_single()
2375 return -EINVAL; in replace_nexthop_single()
2382 /* Hardware flags were set on 'old' as 'new' is not in the red-black in replace_nexthop_single()
2385 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP); in replace_nexthop_single()
2387 oldi = rtnl_dereference(old->nh_info); in replace_nexthop_single()
2388 newi = rtnl_dereference(new->nh_info); in replace_nexthop_single()
2390 newi->nh_parent = old; in replace_nexthop_single()
2391 oldi->nh_parent = new; in replace_nexthop_single()
2393 old_protocol = old->protocol; in replace_nexthop_single()
2394 old_nh_flags = old->nh_flags; in replace_nexthop_single()
2396 old->protocol = new->protocol; in replace_nexthop_single()
2397 old->nh_flags = new->nh_flags; in replace_nexthop_single()
2399 rcu_assign_pointer(old->nh_info, newi); in replace_nexthop_single()
2400 rcu_assign_pointer(new->nh_info, oldi); in replace_nexthop_single()
2403 list_for_each_entry(nhge, &old->grp_list, nh_list) { in replace_nexthop_single()
2404 struct nexthop *nhp = nhge->nh_parent; in replace_nexthop_single()
2415 if (oldi->family == AF_INET && newi->family == AF_INET6) { in replace_nexthop_single()
2416 list_for_each_entry(nhge, &old->grp_list, nh_list) { in replace_nexthop_single()
2417 struct nexthop *nhp = nhge->nh_parent; in replace_nexthop_single()
2420 nhg = rtnl_dereference(nhp->nh_grp); in replace_nexthop_single()
2428 rcu_assign_pointer(new->nh_info, newi); in replace_nexthop_single()
2429 rcu_assign_pointer(old->nh_info, oldi); in replace_nexthop_single()
2430 old->nh_flags = old_nh_flags; in replace_nexthop_single()
2431 old->protocol = old_protocol; in replace_nexthop_single()
2432 oldi->nh_parent = old; in replace_nexthop_single()
2433 newi->nh_parent = new; in replace_nexthop_single()
2434 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) { in replace_nexthop_single()
2435 struct nexthop *nhp = nhge->nh_parent; in replace_nexthop_single()
2448 if (!list_empty(&nh->fi_list)) { in __nexthop_replace_notify()
2455 list_for_each_entry(fi, &nh->fi_list, nh_list) in __nexthop_replace_notify()
2456 fi->nh_updated = true; in __nexthop_replace_notify()
2460 list_for_each_entry(fi, &nh->fi_list, nh_list) in __nexthop_replace_notify()
2461 fi->nh_updated = false; in __nexthop_replace_notify()
2464 list_for_each_entry(f6i, &nh->f6i_list, nh_list) in __nexthop_replace_notify()
2465 ipv6_stub->fib6_rt_update(net, f6i, info); in __nexthop_replace_notify()
2479 list_for_each_entry(nhge, &nh->grp_list, nh_list) in nexthop_replace_notify()
2480 __nexthop_replace_notify(net, nhge->nh_parent, info); in nexthop_replace_notify()
2502 if (!new->is_group) { in replace_nexthop()
2503 struct nh_info *nhi = rtnl_dereference(new->nh_info); in replace_nexthop()
2505 new_is_reject = nhi->reject_nh; in replace_nexthop()
2508 list_for_each_entry(nhge, &old->grp_list, nh_list) { in replace_nexthop()
2513 nexthop_num_path(nhge->nh_parent) > 1) { in replace_nexthop()
2515 return -EINVAL; in replace_nexthop()
2518 err = fib_check_nh_list(nhge->nh_parent, new, extack); in replace_nexthop()
2522 err = fib6_check_nh_list(nhge->nh_parent, new, extack); in replace_nexthop()
2527 if (old->is_group) in replace_nexthop()
2547 struct rb_root *root = &net->nexthop.rb_root; in insert_nexthop()
2548 bool replace = !!(cfg->nlflags & NLM_F_REPLACE); in insert_nexthop()
2549 bool create = !!(cfg->nlflags & NLM_F_CREATE); in insert_nexthop()
2550 u32 new_id = new_nh->id; in insert_nexthop()
2552 int rc = -EEXIST; in insert_nexthop()
2554 pp = &root->rb_node; in insert_nexthop()
2565 if (new_id < nh->id) { in insert_nexthop()
2566 pp = &next->rb_left; in insert_nexthop()
2567 } else if (new_id > nh->id) { in insert_nexthop()
2568 pp = &next->rb_right; in insert_nexthop()
2584 rc = -ENOENT; in insert_nexthop()
2588 if (new_nh->is_group) { in insert_nexthop()
2589 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp); in insert_nexthop()
2592 if (nhg->resilient) { in insert_nexthop()
2593 res_table = rtnl_dereference(nhg->res_table); in insert_nexthop()
2598 if (!cfg->nh_grp_res_has_num_buckets) { in insert_nexthop()
2600 rc = -EINVAL; in insert_nexthop()
2613 rb_link_node_rcu(&new_nh->rb_node, parent, pp); in insert_nexthop()
2614 rb_insert_color(&new_nh->rb_node, root); in insert_nexthop()
2616 /* The initial insertion is a full notification for hash-threshold as in insert_nexthop()
2621 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root); in insert_nexthop()
2626 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); in insert_nexthop()
2628 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) in insert_nexthop()
2629 nexthop_replace_notify(net, new_nh, &cfg->nlinfo); in insert_nexthop()
2639 unsigned int hash = nh_dev_hashfn(dev->ifindex); in nexthop_flush_dev()
2641 struct hlist_head *head = &net->nexthop.devhash[hash]; in nexthop_flush_dev()
2646 if (nhi->fib_nhc.nhc_dev != dev) in nexthop_flush_dev()
2649 if (nhi->reject_nh && in nexthop_flush_dev()
2653 remove_nexthop(net, nhi->nh_parent, NULL); in nexthop_flush_dev()
2660 struct rb_root *root = &net->nexthop.rb_root; in flush_all_nexthops()
2674 struct nlattr *grps_attr = cfg->nh_grp; in nexthop_create_group()
2683 return ERR_PTR(-EINVAL); in nexthop_create_group()
2687 return ERR_PTR(-ENOMEM); in nexthop_create_group()
2689 nh->is_group = 1; in nexthop_create_group()
2694 return ERR_PTR(-ENOMEM); in nexthop_create_group()
2698 nhg->spare = nexthop_grp_alloc(num_nh); in nexthop_create_group()
2699 if (!nhg->spare) { in nexthop_create_group()
2702 return ERR_PTR(-ENOMEM); in nexthop_create_group()
2704 nhg->spare->spare = nhg; in nexthop_create_group()
2706 for (i = 0; i < nhg->num_nh; ++i) { in nexthop_create_group()
2712 err = -ENOENT; in nexthop_create_group()
2716 nhi = rtnl_dereference(nhe->nh_info); in nexthop_create_group()
2717 if (nhi->family == AF_INET) in nexthop_create_group()
2718 nhg->has_v4 = true; in nexthop_create_group()
2720 nhg->nh_entries[i].stats = in nexthop_create_group()
2722 if (!nhg->nh_entries[i].stats) { in nexthop_create_group()
2723 err = -ENOMEM; in nexthop_create_group()
2727 nhg->nh_entries[i].nh = nhe; in nexthop_create_group()
2728 nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]); in nexthop_create_group()
2730 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list); in nexthop_create_group()
2731 nhg->nh_entries[i].nh_parent = nh; in nexthop_create_group()
2734 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) { in nexthop_create_group()
2735 nhg->hash_threshold = 1; in nexthop_create_group()
2736 nhg->is_multipath = true; in nexthop_create_group()
2737 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) { in nexthop_create_group()
2740 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg); in nexthop_create_group()
2742 err = -ENOMEM; in nexthop_create_group()
2746 rcu_assign_pointer(nhg->spare->res_table, res_table); in nexthop_create_group()
2747 rcu_assign_pointer(nhg->res_table, res_table); in nexthop_create_group()
2748 nhg->resilient = true; in nexthop_create_group()
2749 nhg->is_multipath = true; in nexthop_create_group()
2752 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1); in nexthop_create_group()
2754 if (nhg->hash_threshold) in nexthop_create_group()
2757 if (cfg->nh_fdb) in nexthop_create_group()
2758 nhg->fdb_nh = 1; in nexthop_create_group()
2760 if (cfg->nh_hw_stats) in nexthop_create_group()
2761 nhg->hw_stats = true; in nexthop_create_group()
2763 rcu_assign_pointer(nh->nh_grp, nhg); in nexthop_create_group()
2768 for (i--; i >= 0; --i) { in nexthop_create_group()
2769 list_del(&nhg->nh_entries[i].nh_list); in nexthop_create_group()
2770 free_percpu(nhg->nh_entries[i].stats); in nexthop_create_group()
2771 nexthop_put(nhg->nh_entries[i].nh); in nexthop_create_group()
2774 kfree(nhg->spare); in nexthop_create_group()
2785 struct fib_nh *fib_nh = &nhi->fib_nh; in nh_create_ipv4()
2787 .fc_oif = cfg->nh_ifindex, in nh_create_ipv4()
2788 .fc_gw4 = cfg->gw.ipv4, in nh_create_ipv4()
2789 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0, in nh_create_ipv4()
2790 .fc_flags = cfg->nh_flags, in nh_create_ipv4()
2791 .fc_nlinfo = cfg->nlinfo, in nh_create_ipv4()
2792 .fc_encap = cfg->nh_encap, in nh_create_ipv4()
2793 .fc_encap_type = cfg->nh_encap_type, in nh_create_ipv4()
2795 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN); in nh_create_ipv4()
2804 if (nhi->fdb_nh) in nh_create_ipv4()
2810 nh->nh_flags = fib_nh->fib_nh_flags; in nh_create_ipv4()
2811 fib_info_update_nhc_saddr(net, &fib_nh->nh_common, in nh_create_ipv4()
2812 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); in nh_create_ipv4()
2824 struct fib6_nh *fib6_nh = &nhi->fib6_nh; in nh_create_ipv6()
2826 .fc_table = l3mdev_fib_table(cfg->dev), in nh_create_ipv6()
2827 .fc_ifindex = cfg->nh_ifindex, in nh_create_ipv6()
2828 .fc_gateway = cfg->gw.ipv6, in nh_create_ipv6()
2829 .fc_flags = cfg->nh_flags, in nh_create_ipv6()
2830 .fc_nlinfo = cfg->nlinfo, in nh_create_ipv6()
2831 .fc_encap = cfg->nh_encap, in nh_create_ipv6()
2832 .fc_encap_type = cfg->nh_encap_type, in nh_create_ipv6()
2833 .fc_is_fdb = cfg->nh_fdb, in nh_create_ipv6()
2837 if (!ipv6_addr_any(&cfg->gw.ipv6)) in nh_create_ipv6()
2841 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, in nh_create_ipv6()
2845 if (err == -EAFNOSUPPORT) in nh_create_ipv6()
2847 ipv6_stub->fib6_nh_release(fib6_nh); in nh_create_ipv6()
2849 nh->nh_flags = fib6_nh->fib_nh_flags; in nh_create_ipv6()
2864 return ERR_PTR(-ENOMEM); in nexthop_create()
2869 return ERR_PTR(-ENOMEM); in nexthop_create()
2872 nh->nh_flags = cfg->nh_flags; in nexthop_create()
2873 nh->net = net; in nexthop_create()
2875 nhi->nh_parent = nh; in nexthop_create()
2876 nhi->family = cfg->nh_family; in nexthop_create()
2877 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK; in nexthop_create()
2879 if (cfg->nh_fdb) in nexthop_create()
2880 nhi->fdb_nh = 1; in nexthop_create()
2882 if (cfg->nh_blackhole) { in nexthop_create()
2883 nhi->reject_nh = 1; in nexthop_create()
2884 cfg->nh_ifindex = net->loopback_dev->ifindex; in nexthop_create()
2887 switch (cfg->nh_family) { in nexthop_create()
2903 if (!nhi->fdb_nh) in nexthop_create()
2906 rcu_assign_pointer(nh->nh_info, nhi); in nexthop_create()
2918 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) { in nexthop_add()
2920 return ERR_PTR(-EINVAL); in nexthop_add()
2923 if (!cfg->nh_id) { in nexthop_add()
2924 cfg->nh_id = nh_find_unused_id(net); in nexthop_add()
2925 if (!cfg->nh_id) { in nexthop_add()
2927 return ERR_PTR(-EINVAL); in nexthop_add()
2931 if (cfg->nh_grp) in nexthop_add()
2939 refcount_set(&nh->refcnt, 1); in nexthop_add()
2940 nh->id = cfg->nh_id; in nexthop_add()
2941 nh->protocol = cfg->nh_protocol; in nexthop_add()
2942 nh->net = net; in nexthop_add()
2971 return -EINVAL; in rtm_nh_get_timer()
2987 ARRAY_SIZE(rtm_nh_res_policy_new) - 1, in rtm_to_nh_config_grp_res()
2994 cfg->nh_grp_res_num_buckets = in rtm_to_nh_config_grp_res()
2996 cfg->nh_grp_res_has_num_buckets = true; in rtm_to_nh_config_grp_res()
2997 if (!cfg->nh_grp_res_num_buckets) { in rtm_to_nh_config_grp_res()
2998 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0"); in rtm_to_nh_config_grp_res()
2999 return -EINVAL; in rtm_to_nh_config_grp_res()
3005 &cfg->nh_grp_res_idle_timer, in rtm_to_nh_config_grp_res()
3006 &cfg->nh_grp_res_has_idle_timer, in rtm_to_nh_config_grp_res()
3013 &cfg->nh_grp_res_unbalanced_timer, in rtm_to_nh_config_grp_res()
3014 &cfg->nh_grp_res_has_unbalanced_timer, in rtm_to_nh_config_grp_res()
3027 ARRAY_SIZE(rtm_nh_policy_new) - 1, in rtm_to_nh_config()
3032 err = -EINVAL; in rtm_to_nh_config()
3033 if (nhm->resvd || nhm->nh_scope) { in rtm_to_nh_config()
3037 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) { in rtm_to_nh_config()
3042 switch (nhm->nh_family) { in rtm_to_nh_config()
3056 cfg->nlflags = nlh->nlmsg_flags; in rtm_to_nh_config()
3057 cfg->nlinfo.portid = NETLINK_CB(skb).portid; in rtm_to_nh_config()
3058 cfg->nlinfo.nlh = nlh; in rtm_to_nh_config()
3059 cfg->nlinfo.nl_net = net; in rtm_to_nh_config()
3061 cfg->nh_family = nhm->nh_family; in rtm_to_nh_config()
3062 cfg->nh_protocol = nhm->nh_protocol; in rtm_to_nh_config()
3063 cfg->nh_flags = nhm->nh_flags; in rtm_to_nh_config()
3066 cfg->nh_id = nla_get_u32(tb[NHA_ID]); in rtm_to_nh_config()
3074 if (nhm->nh_flags) { in rtm_to_nh_config()
3078 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]); in rtm_to_nh_config()
3082 if (nhm->nh_family != AF_UNSPEC) { in rtm_to_nh_config()
3086 cfg->nh_grp = tb[NHA_GROUP]; in rtm_to_nh_config()
3088 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH; in rtm_to_nh_config()
3090 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]); in rtm_to_nh_config()
3092 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) { in rtm_to_nh_config()
3097 cfg->nh_grp_type, extack); in rtm_to_nh_config()
3101 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) in rtm_to_nh_config()
3106 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]); in rtm_to_nh_config()
3119 cfg->nh_blackhole = 1; in rtm_to_nh_config()
3124 if (!cfg->nh_fdb && !tb[NHA_OIF]) { in rtm_to_nh_config()
3125 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops"); in rtm_to_nh_config()
3129 if (!cfg->nh_fdb && tb[NHA_OIF]) { in rtm_to_nh_config()
3130 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); in rtm_to_nh_config()
3131 if (cfg->nh_ifindex) in rtm_to_nh_config()
3132 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); in rtm_to_nh_config()
3134 if (!cfg->dev) { in rtm_to_nh_config()
3137 } else if (!(cfg->dev->flags & IFF_UP)) { in rtm_to_nh_config()
3139 err = -ENETDOWN; in rtm_to_nh_config()
3141 } else if (!netif_carrier_ok(cfg->dev)) { in rtm_to_nh_config()
3143 err = -ENETDOWN; in rtm_to_nh_config()
3148 err = -EINVAL; in rtm_to_nh_config()
3152 switch (cfg->nh_family) { in rtm_to_nh_config()
3158 cfg->gw.ipv4 = nla_get_be32(gwa); in rtm_to_nh_config()
3165 cfg->gw.ipv6 = nla_get_in6_addr(gwa); in rtm_to_nh_config()
3174 if (cfg->nh_flags & RTNH_F_ONLINK) { in rtm_to_nh_config()
3182 cfg->nh_encap = tb[NHA_ENCAP]; in rtm_to_nh_config()
3189 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); in rtm_to_nh_config()
3190 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); in rtm_to_nh_config()
3200 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops"); in rtm_to_nh_config()
3213 struct net *net = sock_net(skb->sk); in rtm_new_nexthop()
3234 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { in nh_valid_get_del_req()
3236 return -EINVAL; in nh_valid_get_del_req()
3241 return -EINVAL; in nh_valid_get_del_req()
3247 return -EINVAL; in nh_valid_get_del_req()
3261 struct net *net = sock_net(skb->sk); in rtm_del_nexthop()
3272 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del, in rtm_del_nexthop()
3283 return -ENOENT; in rtm_del_nexthop()
3295 struct net *net = sock_net(in_skb->sk); in rtm_get_nexthop()
3303 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get, in rtm_get_nexthop()
3312 err = -ENOBUFS; in rtm_get_nexthop()
3317 err = -ENOENT; in rtm_get_nexthop()
3323 nlh->nlmsg_seq, 0, op_flags); in rtm_get_nexthop()
3325 WARN_ON(err == -EMSGSIZE); in rtm_get_nexthop()
3353 if (filter->group_filter && !nh->is_group) in nh_dump_filtered()
3356 if (!filter->dev_idx && !filter->master_idx && !family) in nh_dump_filtered()
3359 if (nh->is_group) in nh_dump_filtered()
3362 nhi = rtnl_dereference(nh->nh_info); in nh_dump_filtered()
3363 if (family && nhi->family != family) in nh_dump_filtered()
3366 dev = nhi->fib_nhc.nhc_dev; in nh_dump_filtered()
3367 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx)) in nh_dump_filtered()
3370 if (filter->master_idx) { in nh_dump_filtered()
3377 if (!master || master->ifindex != filter->master_idx) in nh_dump_filtered()
3395 return -EINVAL; in __nh_valid_dump_req()
3397 filter->dev_idx = idx; in __nh_valid_dump_req()
3403 return -EINVAL; in __nh_valid_dump_req()
3405 filter->master_idx = idx; in __nh_valid_dump_req()
3407 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]); in __nh_valid_dump_req()
3408 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]); in __nh_valid_dump_req()
3411 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) { in __nh_valid_dump_req()
3413 return -EINVAL; in __nh_valid_dump_req()
3427 ARRAY_SIZE(rtm_nh_policy_dump) - 1, in nh_valid_dump_req()
3428 rtm_nh_policy_dump, cb->extack); in nh_valid_dump_req()
3432 filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0); in nh_valid_dump_req()
3434 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); in nh_valid_dump_req()
3444 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx; in rtm_dump_nh_ctx()
3446 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); in rtm_dump_nh_ctx()
3463 s_idx = ctx->idx; in rtm_dump_walk_nexthops()
3468 if (nh->id < s_idx) in rtm_dump_walk_nexthops()
3471 ctx->idx = nh->id; in rtm_dump_walk_nexthops()
3483 struct nhmsg *nhm = nlmsg_data(cb->nlh); in rtm_dump_nexthop_cb()
3486 if (nh_dump_filtered(nh, filter, nhm->nh_family)) in rtm_dump_nexthop_cb()
3490 NETLINK_CB(cb->skb).portid, in rtm_dump_nexthop_cb()
3491 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags); in rtm_dump_nexthop_cb()
3498 struct net *net = sock_net(skb->sk); in rtm_dump_nexthop()
3499 struct rb_root *root = &net->nexthop.rb_root; in rtm_dump_nexthop()
3503 err = nh_valid_dump_req(cb->nlh, &filter, cb); in rtm_dump_nexthop()
3510 cb->seq = net->nexthop.seq; in rtm_dump_nexthop()
3524 return ERR_PTR(-ENOENT); in nexthop_find_group_resilient()
3526 if (!nh->is_group) { in nexthop_find_group_resilient()
3528 return ERR_PTR(-EINVAL); in nexthop_find_group_resilient()
3531 nhg = rtnl_dereference(nh->nh_grp); in nexthop_find_group_resilient()
3532 if (!nhg->resilient) { in nexthop_find_group_resilient()
3534 return ERR_PTR(-EINVAL); in nexthop_find_group_resilient()
3549 return -EINVAL; in nh_valid_dump_nhid()
3568 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1, in nh_valid_dump_bucket_req()
3573 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack); in nh_valid_dump_bucket_req()
3578 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1; in nh_valid_dump_bucket_req()
3583 cb->extack); in nh_valid_dump_bucket_req()
3588 &filter->res_bucket_nh_id, in nh_valid_dump_bucket_req()
3589 cb->extack); in nh_valid_dump_bucket_req()
3594 return __nh_valid_dump_req(nlh, tb, filter, cb->extack); in nh_valid_dump_bucket_req()
3605 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx; in rtm_dump_res_bucket_ctx()
3607 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); in rtm_dump_res_bucket_ctx()
3621 u32 portid = NETLINK_CB(cb->skb).portid; in rtm_dump_nexthop_bucket_nh()
3622 struct nhmsg *nhm = nlmsg_data(cb->nlh); in rtm_dump_nexthop_bucket_nh()
3628 nhg = rtnl_dereference(nh->nh_grp); in rtm_dump_nexthop_bucket_nh()
3629 res_table = rtnl_dereference(nhg->res_table); in rtm_dump_nexthop_bucket_nh()
3630 for (bucket_index = dd->ctx->bucket_index; in rtm_dump_nexthop_bucket_nh()
3631 bucket_index < res_table->num_nh_buckets; in rtm_dump_nexthop_bucket_nh()
3636 bucket = &res_table->nh_buckets[bucket_index]; in rtm_dump_nexthop_bucket_nh()
3637 nhge = rtnl_dereference(bucket->nh_entry); in rtm_dump_nexthop_bucket_nh()
3638 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family)) in rtm_dump_nexthop_bucket_nh()
3641 if (dd->filter.res_bucket_nh_id && in rtm_dump_nexthop_bucket_nh()
3642 dd->filter.res_bucket_nh_id != nhge->nh->id) in rtm_dump_nexthop_bucket_nh()
3645 dd->ctx->bucket_index = bucket_index; in rtm_dump_nexthop_bucket_nh()
3648 cb->nlh->nlmsg_seq, NLM_F_MULTI, in rtm_dump_nexthop_bucket_nh()
3649 cb->extack); in rtm_dump_nexthop_bucket_nh()
3654 dd->ctx->bucket_index = 0; in rtm_dump_nexthop_bucket_nh()
3666 if (!nh->is_group) in rtm_dump_nexthop_bucket_cb()
3669 nhg = rtnl_dereference(nh->nh_grp); in rtm_dump_nexthop_bucket_cb()
3670 if (!nhg->resilient) in rtm_dump_nexthop_bucket_cb()
3682 struct net *net = sock_net(skb->sk); in rtm_dump_nexthop_bucket()
3686 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb); in rtm_dump_nexthop_bucket()
3692 cb->extack); in rtm_dump_nexthop_bucket()
3697 struct rb_root *root = &net->nexthop.rb_root; in rtm_dump_nexthop_bucket()
3699 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh, in rtm_dump_nexthop_bucket()
3703 cb->seq = net->nexthop.seq; in rtm_dump_nexthop_bucket()
3715 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1, in nh_valid_get_bucket_req_res_bucket()
3722 return -EINVAL; in nh_valid_get_bucket_req_res_bucket()
3737 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1, in nh_valid_get_bucket_req()
3748 return -EINVAL; in nh_valid_get_bucket_req()
3763 struct net *net = sock_net(in_skb->sk); in rtm_get_nexthop_bucket()
3780 nhg = rtnl_dereference(nh->nh_grp); in rtm_get_nexthop_bucket()
3781 res_table = rtnl_dereference(nhg->res_table); in rtm_get_nexthop_bucket()
3782 if (bucket_index >= res_table->num_nh_buckets) { in rtm_get_nexthop_bucket()
3784 return -ENOENT; in rtm_get_nexthop_bucket()
3789 return -ENOBUFS; in rtm_get_nexthop_bucket()
3791 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index], in rtm_get_nexthop_bucket()
3793 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, in rtm_get_nexthop_bucket()
3796 WARN_ON(err == -EMSGSIZE); in rtm_get_nexthop_bucket()
3809 unsigned int hash = nh_dev_hashfn(dev->ifindex); in nexthop_sync_mtu()
3811 struct hlist_head *head = &net->nexthop.devhash[hash]; in nexthop_sync_mtu()
3816 if (nhi->fib_nhc.nhc_dev == dev) { in nexthop_sync_mtu()
3817 if (nhi->family == AF_INET) in nexthop_sync_mtu()
3818 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu, in nexthop_sync_mtu()
3842 nexthop_sync_mtu(dev, info_ext->ext.mtu); in nh_netdev_event()
3857 struct rb_root *root = &net->nexthop.rb_root; in nexthops_dump()
3882 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, in register_nexthop_notifier()
3894 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, in __unregister_nexthop_notifier()
3923 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); in nexthop_set_hw_flags()
3925 nexthop->nh_flags |= RTNH_F_OFFLOAD; in nexthop_set_hw_flags()
3927 nexthop->nh_flags |= RTNH_F_TRAP; in nexthop_set_hw_flags()
3945 if (!nexthop || !nexthop->is_group) in nexthop_bucket_set_hw_flags()
3948 nhg = rcu_dereference(nexthop->nh_grp); in nexthop_bucket_set_hw_flags()
3949 if (!nhg->resilient) in nexthop_bucket_set_hw_flags()
3952 if (bucket_index >= nhg->res_table->num_nh_buckets) in nexthop_bucket_set_hw_flags()
3955 res_table = rcu_dereference(nhg->res_table); in nexthop_bucket_set_hw_flags()
3956 bucket = &res_table->nh_buckets[bucket_index]; in nexthop_bucket_set_hw_flags()
3957 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP); in nexthop_bucket_set_hw_flags()
3959 bucket->nh_flags |= RTNH_F_OFFLOAD; in nexthop_bucket_set_hw_flags()
3961 bucket->nh_flags |= RTNH_F_TRAP; in nexthop_bucket_set_hw_flags()
3979 if (!nexthop || !nexthop->is_group) in nexthop_res_grp_activity_update()
3982 nhg = rcu_dereference(nexthop->nh_grp); in nexthop_res_grp_activity_update()
3983 if (!nhg->resilient) in nexthop_res_grp_activity_update()
3989 res_table = rcu_dereference(nhg->res_table); in nexthop_res_grp_activity_update()
3990 if (num_buckets != res_table->num_nh_buckets) in nexthop_res_grp_activity_update()
3995 nh_res_bucket_set_busy(&res_table->nh_buckets[i]); in nexthop_res_grp_activity_update()
4015 kfree(net->nexthop.devhash); in nexthop_net_exit()
4016 net->nexthop.devhash = NULL; in nexthop_net_exit()
4023 net->nexthop.rb_root = RB_ROOT; in nexthop_net_init()
4024 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL); in nexthop_net_init()
4025 if (!net->nexthop.devhash) in nexthop_net_init()
4026 return -ENOMEM; in nexthop_net_init()
4027 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain); in nexthop_net_init()