xref: /linux/net/ipv4/nexthop.c (revision 6ef1ca2d14f2a78a83dfd40e34f53e8d5c1c0b4b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7 
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19 
20 #define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
22 
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 			   struct nl_info *nlinfo);
25 
26 #define NH_DEV_HASHBITS  8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28 
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS |		\
30 			       NHA_OP_FLAG_DUMP_HW_STATS)
31 
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 	[NHA_ID]		= { .type = NLA_U32 },
34 	[NHA_GROUP]		= { .type = NLA_BINARY },
35 	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
36 	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
37 	[NHA_OIF]		= { .type = NLA_U32 },
38 	[NHA_GATEWAY]		= { .type = NLA_BINARY },
39 	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
40 	[NHA_ENCAP]		= { .type = NLA_NESTED },
41 	[NHA_FDB]		= { .type = NLA_FLAG },
42 	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
43 	[NHA_HW_STATS_ENABLE]	= NLA_POLICY_MAX(NLA_U32, true),
44 };
45 
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 	[NHA_ID]		= { .type = NLA_U32 },
48 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
49 						  NHA_OP_FLAGS_DUMP_ALL),
50 };
51 
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 	[NHA_ID]		= { .type = NLA_U32 },
54 };
55 
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 	[NHA_OIF]		= { .type = NLA_U32 },
58 	[NHA_GROUPS]		= { .type = NLA_FLAG },
59 	[NHA_MASTER]		= { .type = NLA_U32 },
60 	[NHA_FDB]		= { .type = NLA_FLAG },
61 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
62 						  NHA_OP_FLAGS_DUMP_ALL),
63 };
64 
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
67 	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
68 	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
69 };
70 
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 	[NHA_ID]		= { .type = NLA_U32 },
73 	[NHA_OIF]		= { .type = NLA_U32 },
74 	[NHA_MASTER]		= { .type = NLA_U32 },
75 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
76 };
77 
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
80 };
81 
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 	[NHA_ID]		= { .type = NLA_U32 },
84 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
85 };
86 
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
89 };
90 
91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 	return !net->nexthop.notifier_chain.head;
94 }
95 
96 static void
97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 			       const struct nh_info *nhi)
99 {
100 	nh_info->dev = nhi->fib_nhc.nhc_dev;
101 	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 	if (nh_info->gw_family == AF_INET)
103 		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 	else if (nh_info->gw_family == AF_INET6)
105 		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106 
107 	nh_info->id = nhi->nh_parent->id;
108 	nh_info->is_reject = nhi->reject_nh;
109 	nh_info->is_fdb = nhi->fdb_nh;
110 	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112 
113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 					const struct nexthop *nh)
115 {
116 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117 
118 	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
120 	if (!info->nh)
121 		return -ENOMEM;
122 
123 	__nh_notifier_single_info_init(info->nh, nhi);
124 
125 	return 0;
126 }
127 
128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 	kfree(info->nh);
131 }
132 
133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 				       struct nh_group *nhg)
135 {
136 	u16 num_nh = nhg->num_nh;
137 	int i;
138 
139 	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
141 			       GFP_KERNEL);
142 	if (!info->nh_grp)
143 		return -ENOMEM;
144 
145 	info->nh_grp->num_nh = num_nh;
146 	info->nh_grp->is_fdb = nhg->fdb_nh;
147 	info->nh_grp->hw_stats = nhg->hw_stats;
148 
149 	for (i = 0; i < num_nh; i++) {
150 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 		struct nh_info *nhi;
152 
153 		nhi = rtnl_dereference(nhge->nh->nh_info);
154 		info->nh_grp->nh_entries[i].weight = nhge->weight;
155 		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
156 					       nhi);
157 	}
158 
159 	return 0;
160 }
161 
162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 					   struct nh_group *nhg)
164 {
165 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 	u16 num_nh_buckets = res_table->num_nh_buckets;
167 	unsigned long size;
168 	u16 i;
169 
170 	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 				       __GFP_NOWARN);
174 	if (!info->nh_res_table)
175 		return -ENOMEM;
176 
177 	info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 	info->nh_res_table->hw_stats = nhg->hw_stats;
179 
180 	for (i = 0; i < num_nh_buckets; i++) {
181 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 		struct nh_grp_entry *nhge;
183 		struct nh_info *nhi;
184 
185 		nhge = rtnl_dereference(bucket->nh_entry);
186 		nhi = rtnl_dereference(nhge->nh->nh_info);
187 		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
188 					       nhi);
189 	}
190 
191 	return 0;
192 }
193 
194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 				     const struct nexthop *nh)
196 {
197 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198 
199 	if (nhg->hash_threshold)
200 		return nh_notifier_mpath_info_init(info, nhg);
201 	else if (nhg->resilient)
202 		return nh_notifier_res_table_info_init(info, nhg);
203 	return -EINVAL;
204 }
205 
206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 				      const struct nexthop *nh)
208 {
209 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210 
211 	if (nhg->hash_threshold)
212 		kfree(info->nh_grp);
213 	else if (nhg->resilient)
214 		vfree(info->nh_res_table);
215 }
216 
217 static int nh_notifier_info_init(struct nh_notifier_info *info,
218 				 const struct nexthop *nh)
219 {
220 	info->id = nh->id;
221 
222 	if (nh->is_group)
223 		return nh_notifier_grp_info_init(info, nh);
224 	else
225 		return nh_notifier_single_info_init(info, nh);
226 }
227 
228 static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 				  const struct nexthop *nh)
230 {
231 	if (nh->is_group)
232 		nh_notifier_grp_info_fini(info, nh);
233 	else
234 		nh_notifier_single_info_fini(info);
235 }
236 
237 static int call_nexthop_notifiers(struct net *net,
238 				  enum nexthop_event_type event_type,
239 				  struct nexthop *nh,
240 				  struct netlink_ext_ack *extack)
241 {
242 	struct nh_notifier_info info = {
243 		.net = net,
244 		.extack = extack,
245 	};
246 	int err;
247 
248 	ASSERT_RTNL();
249 
250 	if (nexthop_notifiers_is_empty(net))
251 		return 0;
252 
253 	err = nh_notifier_info_init(&info, nh);
254 	if (err) {
255 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
256 		return err;
257 	}
258 
259 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
260 					   event_type, &info);
261 	nh_notifier_info_fini(&info, nh);
262 
263 	return notifier_to_errno(err);
264 }
265 
266 static int
267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 				      bool force, unsigned int *p_idle_timer_ms)
269 {
270 	struct nh_res_table *res_table;
271 	struct nh_group *nhg;
272 	struct nexthop *nh;
273 	int err = 0;
274 
275 	/* When 'force' is false, nexthop bucket replacement is performed
276 	 * because the bucket was deemed to be idle. In this case, capable
277 	 * listeners can choose to perform an atomic replacement: The bucket is
278 	 * only replaced if it is inactive. However, if the idle timer interval
279 	 * is smaller than the interval in which a listener is querying
280 	 * buckets' activity from the device, then atomic replacement should
281 	 * not be tried. Pass the idle timer value to listeners, so that they
282 	 * could determine which type of replacement to perform.
283 	 */
284 	if (force) {
285 		*p_idle_timer_ms = 0;
286 		return 0;
287 	}
288 
289 	rcu_read_lock();
290 
291 	nh = nexthop_find_by_id(info->net, info->id);
292 	if (!nh) {
293 		err = -EINVAL;
294 		goto out;
295 	}
296 
297 	nhg = rcu_dereference(nh->nh_grp);
298 	res_table = rcu_dereference(nhg->res_table);
299 	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
300 
301 out:
302 	rcu_read_unlock();
303 
304 	return err;
305 }
306 
307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 					    u16 bucket_index, bool force,
309 					    struct nh_info *oldi,
310 					    struct nh_info *newi)
311 {
312 	unsigned int idle_timer_ms;
313 	int err;
314 
315 	err = nh_notifier_res_bucket_idle_timer_get(info, force,
316 						    &idle_timer_ms);
317 	if (err)
318 		return err;
319 
320 	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 				      GFP_KERNEL);
323 	if (!info->nh_res_bucket)
324 		return -ENOMEM;
325 
326 	info->nh_res_bucket->bucket_index = bucket_index;
327 	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 	info->nh_res_bucket->force = force;
329 	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
330 	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
331 	return 0;
332 }
333 
334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335 {
336 	kfree(info->nh_res_bucket);
337 }
338 
339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 					       u16 bucket_index, bool force,
341 					       struct nh_info *oldi,
342 					       struct nh_info *newi,
343 					       struct netlink_ext_ack *extack)
344 {
345 	struct nh_notifier_info info = {
346 		.net = net,
347 		.extack = extack,
348 		.id = nhg_id,
349 	};
350 	int err;
351 
352 	if (nexthop_notifiers_is_empty(net))
353 		return 0;
354 
355 	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
356 					       oldi, newi);
357 	if (err)
358 		return err;
359 
360 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
361 					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
362 	nh_notifier_res_bucket_info_fini(&info);
363 
364 	return notifier_to_errno(err);
365 }
366 
367 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
368  *
369  * 1) a collection of callbacks for NH maintenance. This operates under
370  *    RTNL,
371  * 2) the delayed work that gradually balances the resilient table,
372  * 3) and nexthop_select_path(), operating under RCU.
373  *
374  * Both the delayed work and the RTNL block are writers, and need to
375  * maintain mutual exclusion. Since there are only two and well-known
376  * writers for each table, the RTNL code can make sure it has exclusive
377  * access thus:
378  *
379  * - Have the DW operate without locking;
380  * - synchronously cancel the DW;
381  * - do the writing;
382  * - if the write was not actually a delete, call upkeep, which schedules
383  *   DW again if necessary.
384  *
385  * The functions that are always called from the RTNL context use
386  * rtnl_dereference(). The functions that can also be called from the DW do
387  * a raw dereference and rely on the above mutual exclusion scheme.
388  */
389 #define nh_res_dereference(p) (rcu_dereference_raw(p))
390 
391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 					     u16 bucket_index, bool force,
393 					     struct nexthop *old_nh,
394 					     struct nexthop *new_nh,
395 					     struct netlink_ext_ack *extack)
396 {
397 	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399 
400 	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 						   force, oldi, newi, extack);
402 }
403 
404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 					    struct netlink_ext_ack *extack)
406 {
407 	struct nh_notifier_info info = {
408 		.net = net,
409 		.extack = extack,
410 		.id = nh->id,
411 	};
412 	struct nh_group *nhg;
413 	int err;
414 
415 	ASSERT_RTNL();
416 
417 	if (nexthop_notifiers_is_empty(net))
418 		return 0;
419 
420 	/* At this point, the nexthop buckets are still not populated. Only
421 	 * emit a notification with the logical nexthops, so that a listener
422 	 * could potentially veto it in case of unsupported configuration.
423 	 */
424 	nhg = rtnl_dereference(nh->nh_grp);
425 	err = nh_notifier_mpath_info_init(&info, nhg);
426 	if (err) {
427 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
428 		return err;
429 	}
430 
431 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
432 					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
433 					   &info);
434 	kfree(info.nh_grp);
435 
436 	return notifier_to_errno(err);
437 }
438 
439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 				 enum nexthop_event_type event_type,
441 				 struct nexthop *nh,
442 				 struct netlink_ext_ack *extack)
443 {
444 	struct nh_notifier_info info = {
445 		.net = net,
446 		.extack = extack,
447 	};
448 	int err;
449 
450 	err = nh_notifier_info_init(&info, nh);
451 	if (err)
452 		return err;
453 
454 	err = nb->notifier_call(nb, event_type, &info);
455 	nh_notifier_info_fini(&info, nh);
456 
457 	return notifier_to_errno(err);
458 }
459 
460 static unsigned int nh_dev_hashfn(unsigned int val)
461 {
462 	unsigned int mask = NH_DEV_HASHSIZE - 1;
463 
464 	return (val ^
465 		(val >> NH_DEV_HASHBITS) ^
466 		(val >> (NH_DEV_HASHBITS * 2))) & mask;
467 }
468 
469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
470 {
471 	struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 	struct hlist_head *head;
473 	unsigned int hash;
474 
475 	WARN_ON(!dev);
476 
477 	hash = nh_dev_hashfn(dev->ifindex);
478 	head = &net->nexthop.devhash[hash];
479 	hlist_add_head(&nhi->dev_hash, head);
480 }
481 
482 static void nexthop_free_group(struct nexthop *nh)
483 {
484 	struct nh_group *nhg;
485 	int i;
486 
487 	nhg = rcu_dereference_raw(nh->nh_grp);
488 	for (i = 0; i < nhg->num_nh; ++i) {
489 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
490 
491 		WARN_ON(!list_empty(&nhge->nh_list));
492 		free_percpu(nhge->stats);
493 		nexthop_put(nhge->nh);
494 	}
495 
496 	WARN_ON(nhg->spare == nhg);
497 
498 	if (nhg->resilient)
499 		vfree(rcu_dereference_raw(nhg->res_table));
500 
501 	kfree(nhg->spare);
502 	kfree(nhg);
503 }
504 
505 static void nexthop_free_single(struct nexthop *nh)
506 {
507 	struct nh_info *nhi;
508 
509 	nhi = rcu_dereference_raw(nh->nh_info);
510 	switch (nhi->family) {
511 	case AF_INET:
512 		fib_nh_release(nh->net, &nhi->fib_nh);
513 		break;
514 	case AF_INET6:
515 		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
516 		break;
517 	}
518 	kfree(nhi);
519 }
520 
521 void nexthop_free_rcu(struct rcu_head *head)
522 {
523 	struct nexthop *nh = container_of(head, struct nexthop, rcu);
524 
525 	if (nh->is_group)
526 		nexthop_free_group(nh);
527 	else
528 		nexthop_free_single(nh);
529 
530 	kfree(nh);
531 }
532 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
533 
534 static struct nexthop *nexthop_alloc(void)
535 {
536 	struct nexthop *nh;
537 
538 	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
539 	if (nh) {
540 		INIT_LIST_HEAD(&nh->fi_list);
541 		INIT_LIST_HEAD(&nh->f6i_list);
542 		INIT_LIST_HEAD(&nh->grp_list);
543 		INIT_LIST_HEAD(&nh->fdb_list);
544 	}
545 	return nh;
546 }
547 
548 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
549 {
550 	struct nh_group *nhg;
551 
552 	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
553 	if (nhg)
554 		nhg->num_nh = num_nh;
555 
556 	return nhg;
557 }
558 
559 static void nh_res_table_upkeep_dw(struct work_struct *work);
560 
561 static struct nh_res_table *
562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
563 {
564 	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
565 	struct nh_res_table *res_table;
566 	unsigned long size;
567 
568 	size = struct_size(res_table, nh_buckets, num_nh_buckets);
569 	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
570 	if (!res_table)
571 		return NULL;
572 
573 	res_table->net = net;
574 	res_table->nhg_id = nhg_id;
575 	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
576 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
577 	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
578 	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
579 	res_table->num_nh_buckets = num_nh_buckets;
580 	return res_table;
581 }
582 
583 static void nh_base_seq_inc(struct net *net)
584 {
585 	while (++net->nexthop.seq == 0)
586 		;
587 }
588 
589 /* no reference taken; rcu lock or rtnl must be held */
590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
591 {
592 	struct rb_node **pp, *parent = NULL, *next;
593 
594 	pp = &net->nexthop.rb_root.rb_node;
595 	while (1) {
596 		struct nexthop *nh;
597 
598 		next = rcu_dereference_raw(*pp);
599 		if (!next)
600 			break;
601 		parent = next;
602 
603 		nh = rb_entry(parent, struct nexthop, rb_node);
604 		if (id < nh->id)
605 			pp = &next->rb_left;
606 		else if (id > nh->id)
607 			pp = &next->rb_right;
608 		else
609 			return nh;
610 	}
611 	return NULL;
612 }
613 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
614 
615 /* used for auto id allocation; called with rtnl held */
616 static u32 nh_find_unused_id(struct net *net)
617 {
618 	u32 id_start = net->nexthop.last_id_allocated;
619 
620 	while (1) {
621 		net->nexthop.last_id_allocated++;
622 		if (net->nexthop.last_id_allocated == id_start)
623 			break;
624 
625 		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
626 			return net->nexthop.last_id_allocated;
627 	}
628 	return 0;
629 }
630 
631 static void nh_res_time_set_deadline(unsigned long next_time,
632 				     unsigned long *deadline)
633 {
634 	if (time_before(next_time, *deadline))
635 		*deadline = next_time;
636 }
637 
638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
639 {
640 	if (list_empty(&res_table->uw_nh_entries))
641 		return 0;
642 	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
643 }
644 
645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
646 {
647 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
648 	struct nlattr *nest;
649 
650 	nest = nla_nest_start(skb, NHA_RES_GROUP);
651 	if (!nest)
652 		return -EMSGSIZE;
653 
654 	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
655 			res_table->num_nh_buckets) ||
656 	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
657 			jiffies_to_clock_t(res_table->idle_timer)) ||
658 	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
659 			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
660 	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
661 			      nh_res_table_unbalanced_time(res_table),
662 			      NHA_RES_GROUP_PAD))
663 		goto nla_put_failure;
664 
665 	nla_nest_end(skb, nest);
666 	return 0;
667 
668 nla_put_failure:
669 	nla_nest_cancel(skb, nest);
670 	return -EMSGSIZE;
671 }
672 
673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
674 {
675 	struct nh_grp_entry_stats *cpu_stats;
676 
677 	cpu_stats = get_cpu_ptr(nhge->stats);
678 	u64_stats_update_begin(&cpu_stats->syncp);
679 	u64_stats_inc(&cpu_stats->packets);
680 	u64_stats_update_end(&cpu_stats->syncp);
681 	put_cpu_ptr(cpu_stats);
682 }
683 
684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
685 				    u64 *ret_packets)
686 {
687 	int i;
688 
689 	*ret_packets = 0;
690 
691 	for_each_possible_cpu(i) {
692 		struct nh_grp_entry_stats *cpu_stats;
693 		unsigned int start;
694 		u64 packets;
695 
696 		cpu_stats = per_cpu_ptr(nhge->stats, i);
697 		do {
698 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
699 			packets = u64_stats_read(&cpu_stats->packets);
700 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
701 
702 		*ret_packets += packets;
703 	}
704 }
705 
706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
707 					 const struct nexthop *nh)
708 {
709 	struct nh_group *nhg;
710 	int i;
711 
712 	ASSERT_RTNL();
713 	nhg = rtnl_dereference(nh->nh_grp);
714 
715 	info->id = nh->id;
716 	info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
717 	info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
718 						    stats, nhg->num_nh),
719 					GFP_KERNEL);
720 	if (!info->nh_grp_hw_stats)
721 		return -ENOMEM;
722 
723 	info->nh_grp_hw_stats->num_nh = nhg->num_nh;
724 	for (i = 0; i < nhg->num_nh; i++) {
725 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
726 
727 		info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
728 	}
729 
730 	return 0;
731 }
732 
733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
734 {
735 	kfree(info->nh_grp_hw_stats);
736 }
737 
738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
739 				  unsigned int nh_idx,
740 				  u64 delta_packets)
741 {
742 	info->hw_stats_used = true;
743 	info->stats[nh_idx].packets += delta_packets;
744 }
745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
746 
747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
748 					 struct nh_notifier_info *info)
749 {
750 	struct nh_group *nhg;
751 	int i;
752 
753 	ASSERT_RTNL();
754 	nhg = rtnl_dereference(nh->nh_grp);
755 
756 	for (i = 0; i < nhg->num_nh; i++) {
757 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
758 
759 		nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
760 	}
761 }
762 
763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
764 {
765 	struct nh_notifier_info info = {
766 		.net = nh->net,
767 	};
768 	struct net *net = nh->net;
769 	int err;
770 
771 	if (nexthop_notifiers_is_empty(net)) {
772 		*hw_stats_used = false;
773 		return 0;
774 	}
775 
776 	err = nh_notifier_grp_hw_stats_init(&info, nh);
777 	if (err)
778 		return err;
779 
780 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
781 					   NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
782 					   &info);
783 
784 	/* Cache whatever we got, even if there was an error, otherwise the
785 	 * successful stats retrievals would get lost.
786 	 */
787 	nh_grp_hw_stats_apply_update(nh, &info);
788 	*hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
789 
790 	nh_notifier_grp_hw_stats_fini(&info);
791 	return notifier_to_errno(err);
792 }
793 
794 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
795 					struct nh_grp_entry *nhge,
796 					u32 op_flags)
797 {
798 	struct nlattr *nest;
799 	u64 packets;
800 
801 	nh_grp_entry_stats_read(nhge, &packets);
802 
803 	nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
804 	if (!nest)
805 		return -EMSGSIZE;
806 
807 	if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
808 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
809 			 packets + nhge->packets_hw))
810 		goto nla_put_failure;
811 
812 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
813 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
814 			 nhge->packets_hw))
815 		goto nla_put_failure;
816 
817 	nla_nest_end(skb, nest);
818 	return 0;
819 
820 nla_put_failure:
821 	nla_nest_cancel(skb, nest);
822 	return -EMSGSIZE;
823 }
824 
825 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
826 				  u32 op_flags)
827 {
828 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
829 	struct nlattr *nest;
830 	bool hw_stats_used;
831 	int err;
832 	int i;
833 
834 	if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
835 		goto err_out;
836 
837 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
838 	    nhg->hw_stats) {
839 		err = nh_grp_hw_stats_update(nh, &hw_stats_used);
840 		if (err)
841 			goto out;
842 
843 		if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
844 			goto err_out;
845 	}
846 
847 	nest = nla_nest_start(skb, NHA_GROUP_STATS);
848 	if (!nest)
849 		goto err_out;
850 
851 	for (i = 0; i < nhg->num_nh; i++)
852 		if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
853 						 op_flags))
854 			goto cancel_out;
855 
856 	nla_nest_end(skb, nest);
857 	return 0;
858 
859 cancel_out:
860 	nla_nest_cancel(skb, nest);
861 err_out:
862 	err = -EMSGSIZE;
863 out:
864 	return err;
865 }
866 
867 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
868 			    u32 op_flags, u32 *resp_op_flags)
869 {
870 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
871 	struct nexthop_grp *p;
872 	size_t len = nhg->num_nh * sizeof(*p);
873 	struct nlattr *nla;
874 	u16 group_type = 0;
875 	u16 weight;
876 	int i;
877 
878 	*resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
879 
880 	if (nhg->hash_threshold)
881 		group_type = NEXTHOP_GRP_TYPE_MPATH;
882 	else if (nhg->resilient)
883 		group_type = NEXTHOP_GRP_TYPE_RES;
884 
885 	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
886 		goto nla_put_failure;
887 
888 	nla = nla_reserve(skb, NHA_GROUP, len);
889 	if (!nla)
890 		goto nla_put_failure;
891 
892 	p = nla_data(nla);
893 	for (i = 0; i < nhg->num_nh; ++i) {
894 		weight = nhg->nh_entries[i].weight - 1;
895 
896 		*p++ = (struct nexthop_grp) {
897 			.id = nhg->nh_entries[i].nh->id,
898 			.weight = weight,
899 			.weight_high = weight >> 8,
900 		};
901 	}
902 
903 	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
904 		goto nla_put_failure;
905 
906 	if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
907 	    (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
908 	     nla_put_nh_group_stats(skb, nh, op_flags)))
909 		goto nla_put_failure;
910 
911 	return 0;
912 
913 nla_put_failure:
914 	return -EMSGSIZE;
915 }
916 
917 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
918 			int event, u32 portid, u32 seq, unsigned int nlflags,
919 			u32 op_flags)
920 {
921 	struct fib6_nh *fib6_nh;
922 	struct fib_nh *fib_nh;
923 	struct nlmsghdr *nlh;
924 	struct nh_info *nhi;
925 	struct nhmsg *nhm;
926 
927 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
928 	if (!nlh)
929 		return -EMSGSIZE;
930 
931 	nhm = nlmsg_data(nlh);
932 	nhm->nh_family = AF_UNSPEC;
933 	nhm->nh_flags = nh->nh_flags;
934 	nhm->nh_protocol = nh->protocol;
935 	nhm->nh_scope = 0;
936 	nhm->resvd = 0;
937 
938 	if (nla_put_u32(skb, NHA_ID, nh->id))
939 		goto nla_put_failure;
940 
941 	if (nh->is_group) {
942 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
943 		u32 resp_op_flags = 0;
944 
945 		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
946 			goto nla_put_failure;
947 		if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
948 		    nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
949 			goto nla_put_failure;
950 		goto out;
951 	}
952 
953 	nhi = rtnl_dereference(nh->nh_info);
954 	nhm->nh_family = nhi->family;
955 	if (nhi->reject_nh) {
956 		if (nla_put_flag(skb, NHA_BLACKHOLE))
957 			goto nla_put_failure;
958 		goto out;
959 	} else if (nhi->fdb_nh) {
960 		if (nla_put_flag(skb, NHA_FDB))
961 			goto nla_put_failure;
962 	} else {
963 		const struct net_device *dev;
964 
965 		dev = nhi->fib_nhc.nhc_dev;
966 		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
967 			goto nla_put_failure;
968 	}
969 
970 	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
971 	switch (nhi->family) {
972 	case AF_INET:
973 		fib_nh = &nhi->fib_nh;
974 		if (fib_nh->fib_nh_gw_family &&
975 		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
976 			goto nla_put_failure;
977 		break;
978 
979 	case AF_INET6:
980 		fib6_nh = &nhi->fib6_nh;
981 		if (fib6_nh->fib_nh_gw_family &&
982 		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
983 			goto nla_put_failure;
984 		break;
985 	}
986 
987 	if (nhi->fib_nhc.nhc_lwtstate &&
988 	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
989 				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
990 		goto nla_put_failure;
991 
992 out:
993 	nlmsg_end(skb, nlh);
994 	return 0;
995 
996 nla_put_failure:
997 	nlmsg_cancel(skb, nlh);
998 	return -EMSGSIZE;
999 }
1000 
1001 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
1002 {
1003 	return nla_total_size(0) +	/* NHA_RES_GROUP */
1004 		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
1005 		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
1006 		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
1007 		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1008 }
1009 
1010 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1011 {
1012 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1013 	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1014 	size_t tot = nla_total_size(sz) +
1015 		nla_total_size(2); /* NHA_GROUP_TYPE */
1016 
1017 	if (nhg->resilient)
1018 		tot += nh_nlmsg_size_grp_res(nhg);
1019 
1020 	return tot;
1021 }
1022 
1023 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1024 {
1025 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1026 	size_t sz;
1027 
1028 	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1029 	 * are mutually exclusive
1030 	 */
1031 	sz = nla_total_size(4);  /* NHA_OIF */
1032 
1033 	switch (nhi->family) {
1034 	case AF_INET:
1035 		if (nhi->fib_nh.fib_nh_gw_family)
1036 			sz += nla_total_size(4);  /* NHA_GATEWAY */
1037 		break;
1038 
1039 	case AF_INET6:
1040 		/* NHA_GATEWAY */
1041 		if (nhi->fib6_nh.fib_nh_gw_family)
1042 			sz += nla_total_size(sizeof(const struct in6_addr));
1043 		break;
1044 	}
1045 
1046 	if (nhi->fib_nhc.nhc_lwtstate) {
1047 		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1048 		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
1049 	}
1050 
1051 	return sz;
1052 }
1053 
1054 static size_t nh_nlmsg_size(struct nexthop *nh)
1055 {
1056 	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1057 
1058 	sz += nla_total_size(4); /* NHA_ID */
1059 
1060 	if (nh->is_group)
1061 		sz += nh_nlmsg_size_grp(nh) +
1062 		      nla_total_size(4) +	/* NHA_OP_FLAGS */
1063 		      0;
1064 	else
1065 		sz += nh_nlmsg_size_single(nh);
1066 
1067 	return sz;
1068 }
1069 
1070 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1071 {
1072 	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1073 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1074 	struct sk_buff *skb;
1075 	int err = -ENOBUFS;
1076 
1077 	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1078 	if (!skb)
1079 		goto errout;
1080 
1081 	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1082 	if (err < 0) {
1083 		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1084 		WARN_ON(err == -EMSGSIZE);
1085 		kfree_skb(skb);
1086 		goto errout;
1087 	}
1088 
1089 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1090 		    info->nlh, gfp_any());
1091 	return;
1092 errout:
1093 	if (err < 0)
1094 		rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1095 }
1096 
1097 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1098 {
1099 	return (unsigned long)atomic_long_read(&bucket->used_time);
1100 }
1101 
1102 static unsigned long
1103 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1104 			 const struct nh_res_bucket *bucket,
1105 			 unsigned long now)
1106 {
1107 	unsigned long time = nh_res_bucket_used_time(bucket);
1108 
1109 	/* Bucket was not used since it was migrated. The idle time is now. */
1110 	if (time == bucket->migrated_time)
1111 		return now;
1112 
1113 	return time + res_table->idle_timer;
1114 }
1115 
1116 static unsigned long
1117 nh_res_table_unb_point(const struct nh_res_table *res_table)
1118 {
1119 	return res_table->unbalanced_since + res_table->unbalanced_timer;
1120 }
1121 
1122 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1123 				   struct nh_res_bucket *bucket)
1124 {
1125 	unsigned long now = jiffies;
1126 
1127 	atomic_long_set(&bucket->used_time, (long)now);
1128 	bucket->migrated_time = now;
1129 }
1130 
1131 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1132 {
1133 	atomic_long_set(&bucket->used_time, (long)jiffies);
1134 }
1135 
1136 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1137 {
1138 	unsigned long used_time = nh_res_bucket_used_time(bucket);
1139 
1140 	return jiffies_delta_to_clock_t(jiffies - used_time);
1141 }
1142 
1143 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1144 			      struct nh_res_bucket *bucket, u16 bucket_index,
1145 			      int event, u32 portid, u32 seq,
1146 			      unsigned int nlflags,
1147 			      struct netlink_ext_ack *extack)
1148 {
1149 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1150 	struct nlmsghdr *nlh;
1151 	struct nlattr *nest;
1152 	struct nhmsg *nhm;
1153 
1154 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1155 	if (!nlh)
1156 		return -EMSGSIZE;
1157 
1158 	nhm = nlmsg_data(nlh);
1159 	nhm->nh_family = AF_UNSPEC;
1160 	nhm->nh_flags = bucket->nh_flags;
1161 	nhm->nh_protocol = nh->protocol;
1162 	nhm->nh_scope = 0;
1163 	nhm->resvd = 0;
1164 
1165 	if (nla_put_u32(skb, NHA_ID, nh->id))
1166 		goto nla_put_failure;
1167 
1168 	nest = nla_nest_start(skb, NHA_RES_BUCKET);
1169 	if (!nest)
1170 		goto nla_put_failure;
1171 
1172 	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1173 	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1174 	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1175 			      nh_res_bucket_idle_time(bucket),
1176 			      NHA_RES_BUCKET_PAD))
1177 		goto nla_put_failure_nest;
1178 
1179 	nla_nest_end(skb, nest);
1180 	nlmsg_end(skb, nlh);
1181 	return 0;
1182 
1183 nla_put_failure_nest:
1184 	nla_nest_cancel(skb, nest);
1185 nla_put_failure:
1186 	nlmsg_cancel(skb, nlh);
1187 	return -EMSGSIZE;
1188 }
1189 
1190 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1191 				  u16 bucket_index)
1192 {
1193 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1194 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1195 	struct nexthop *nh = nhge->nh_parent;
1196 	struct sk_buff *skb;
1197 	int err = -ENOBUFS;
1198 
1199 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1200 	if (!skb)
1201 		goto errout;
1202 
1203 	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1204 				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1205 				 NULL);
1206 	if (err < 0) {
1207 		kfree_skb(skb);
1208 		goto errout;
1209 	}
1210 
1211 	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1212 	return;
1213 errout:
1214 	if (err < 0)
1215 		rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1216 }
1217 
1218 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1219 			   bool *is_fdb, struct netlink_ext_ack *extack)
1220 {
1221 	if (nh->is_group) {
1222 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1223 
1224 		/* Nesting groups within groups is not supported. */
1225 		if (nhg->hash_threshold) {
1226 			NL_SET_ERR_MSG(extack,
1227 				       "Hash-threshold group can not be a nexthop within a group");
1228 			return false;
1229 		}
1230 		if (nhg->resilient) {
1231 			NL_SET_ERR_MSG(extack,
1232 				       "Resilient group can not be a nexthop within a group");
1233 			return false;
1234 		}
1235 		*is_fdb = nhg->fdb_nh;
1236 	} else {
1237 		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1238 
1239 		if (nhi->reject_nh && npaths > 1) {
1240 			NL_SET_ERR_MSG(extack,
1241 				       "Blackhole nexthop can not be used in a group with more than 1 path");
1242 			return false;
1243 		}
1244 		*is_fdb = nhi->fdb_nh;
1245 	}
1246 
1247 	return true;
1248 }
1249 
1250 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1251 				   struct netlink_ext_ack *extack)
1252 {
1253 	struct nh_info *nhi;
1254 
1255 	nhi = rtnl_dereference(nh->nh_info);
1256 
1257 	if (!nhi->fdb_nh) {
1258 		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1259 		return -EINVAL;
1260 	}
1261 
1262 	if (*nh_family == AF_UNSPEC) {
1263 		*nh_family = nhi->family;
1264 	} else if (*nh_family != nhi->family) {
1265 		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1266 		return -EINVAL;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int nh_check_attr_group(struct net *net,
1273 			       struct nlattr *tb[], size_t tb_size,
1274 			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1275 {
1276 	unsigned int len = nla_len(tb[NHA_GROUP]);
1277 	u8 nh_family = AF_UNSPEC;
1278 	struct nexthop_grp *nhg;
1279 	unsigned int i, j;
1280 	u8 nhg_fdb = 0;
1281 
1282 	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1283 		NL_SET_ERR_MSG(extack,
1284 			       "Invalid length for nexthop group attribute");
1285 		return -EINVAL;
1286 	}
1287 
1288 	/* convert len to number of nexthop ids */
1289 	len /= sizeof(*nhg);
1290 
1291 	nhg = nla_data(tb[NHA_GROUP]);
1292 	for (i = 0; i < len; ++i) {
1293 		if (nhg[i].resvd2) {
1294 			NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1295 			return -EINVAL;
1296 		}
1297 		if (nexthop_grp_weight(&nhg[i]) == 0) {
1298 			/* 0xffff got passed in, representing weight of 0x10000,
1299 			 * which is too heavy.
1300 			 */
1301 			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1302 			return -EINVAL;
1303 		}
1304 		for (j = i + 1; j < len; ++j) {
1305 			if (nhg[i].id == nhg[j].id) {
1306 				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1307 				return -EINVAL;
1308 			}
1309 		}
1310 	}
1311 
1312 	if (tb[NHA_FDB])
1313 		nhg_fdb = 1;
1314 	nhg = nla_data(tb[NHA_GROUP]);
1315 	for (i = 0; i < len; ++i) {
1316 		struct nexthop *nh;
1317 		bool is_fdb_nh;
1318 
1319 		nh = nexthop_find_by_id(net, nhg[i].id);
1320 		if (!nh) {
1321 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1322 			return -EINVAL;
1323 		}
1324 		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1325 			return -EINVAL;
1326 
1327 		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1328 			return -EINVAL;
1329 
1330 		if (!nhg_fdb && is_fdb_nh) {
1331 			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1332 			return -EINVAL;
1333 		}
1334 	}
1335 	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1336 		if (!tb[i])
1337 			continue;
1338 		switch (i) {
1339 		case NHA_HW_STATS_ENABLE:
1340 		case NHA_FDB:
1341 			continue;
1342 		case NHA_RES_GROUP:
1343 			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1344 				continue;
1345 			break;
1346 		}
1347 		NL_SET_ERR_MSG(extack,
1348 			       "No other attributes can be set in nexthop groups");
1349 		return -EINVAL;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static bool ipv6_good_nh(const struct fib6_nh *nh)
1356 {
1357 	int state = NUD_REACHABLE;
1358 	struct neighbour *n;
1359 
1360 	rcu_read_lock();
1361 
1362 	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1363 	if (n)
1364 		state = READ_ONCE(n->nud_state);
1365 
1366 	rcu_read_unlock();
1367 
1368 	return !!(state & NUD_VALID);
1369 }
1370 
1371 static bool ipv4_good_nh(const struct fib_nh *nh)
1372 {
1373 	int state = NUD_REACHABLE;
1374 	struct neighbour *n;
1375 
1376 	rcu_read_lock();
1377 
1378 	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1379 				      (__force u32)nh->fib_nh_gw4);
1380 	if (n)
1381 		state = READ_ONCE(n->nud_state);
1382 
1383 	rcu_read_unlock();
1384 
1385 	return !!(state & NUD_VALID);
1386 }
1387 
1388 static bool nexthop_is_good_nh(const struct nexthop *nh)
1389 {
1390 	struct nh_info *nhi = rcu_dereference(nh->nh_info);
1391 
1392 	switch (nhi->family) {
1393 	case AF_INET:
1394 		return ipv4_good_nh(&nhi->fib_nh);
1395 	case AF_INET6:
1396 		return ipv6_good_nh(&nhi->fib6_nh);
1397 	}
1398 
1399 	return false;
1400 }
1401 
1402 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1403 {
1404 	int i;
1405 
1406 	for (i = 0; i < nhg->num_nh; i++) {
1407 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1408 
1409 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1410 			continue;
1411 
1412 		nh_grp_entry_stats_inc(nhge);
1413 		return nhge->nh;
1414 	}
1415 
1416 	WARN_ON_ONCE(1);
1417 	return NULL;
1418 }
1419 
1420 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1421 {
1422 	struct nh_grp_entry *nhge0 = NULL;
1423 	int i;
1424 
1425 	if (nhg->fdb_nh)
1426 		return nexthop_select_path_fdb(nhg, hash);
1427 
1428 	for (i = 0; i < nhg->num_nh; ++i) {
1429 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1430 
1431 		/* nexthops always check if it is good and does
1432 		 * not rely on a sysctl for this behavior
1433 		 */
1434 		if (!nexthop_is_good_nh(nhge->nh))
1435 			continue;
1436 
1437 		if (!nhge0)
1438 			nhge0 = nhge;
1439 
1440 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1441 			continue;
1442 
1443 		nh_grp_entry_stats_inc(nhge);
1444 		return nhge->nh;
1445 	}
1446 
1447 	if (!nhge0)
1448 		nhge0 = &nhg->nh_entries[0];
1449 	nh_grp_entry_stats_inc(nhge0);
1450 	return nhge0->nh;
1451 }
1452 
1453 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1454 {
1455 	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1456 	u16 bucket_index = hash % res_table->num_nh_buckets;
1457 	struct nh_res_bucket *bucket;
1458 	struct nh_grp_entry *nhge;
1459 
1460 	/* nexthop_select_path() is expected to return a non-NULL value, so
1461 	 * skip protocol validation and just hand out whatever there is.
1462 	 */
1463 	bucket = &res_table->nh_buckets[bucket_index];
1464 	nh_res_bucket_set_busy(bucket);
1465 	nhge = rcu_dereference(bucket->nh_entry);
1466 	nh_grp_entry_stats_inc(nhge);
1467 	return nhge->nh;
1468 }
1469 
1470 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1471 {
1472 	struct nh_group *nhg;
1473 
1474 	if (!nh->is_group)
1475 		return nh;
1476 
1477 	nhg = rcu_dereference(nh->nh_grp);
1478 	if (nhg->hash_threshold)
1479 		return nexthop_select_path_hthr(nhg, hash);
1480 	else if (nhg->resilient)
1481 		return nexthop_select_path_res(nhg, hash);
1482 
1483 	/* Unreachable. */
1484 	return NULL;
1485 }
1486 EXPORT_SYMBOL_GPL(nexthop_select_path);
1487 
1488 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1489 			     int (*cb)(struct fib6_nh *nh, void *arg),
1490 			     void *arg)
1491 {
1492 	struct nh_info *nhi;
1493 	int err;
1494 
1495 	if (nh->is_group) {
1496 		struct nh_group *nhg;
1497 		int i;
1498 
1499 		nhg = rcu_dereference_rtnl(nh->nh_grp);
1500 		for (i = 0; i < nhg->num_nh; i++) {
1501 			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1502 
1503 			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1504 			err = cb(&nhi->fib6_nh, arg);
1505 			if (err)
1506 				return err;
1507 		}
1508 	} else {
1509 		nhi = rcu_dereference_rtnl(nh->nh_info);
1510 		err = cb(&nhi->fib6_nh, arg);
1511 		if (err)
1512 			return err;
1513 	}
1514 
1515 	return 0;
1516 }
1517 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1518 
1519 static int check_src_addr(const struct in6_addr *saddr,
1520 			  struct netlink_ext_ack *extack)
1521 {
1522 	if (!ipv6_addr_any(saddr)) {
1523 		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1524 		return -EINVAL;
1525 	}
1526 	return 0;
1527 }
1528 
1529 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1530 		       struct netlink_ext_ack *extack)
1531 {
1532 	struct nh_info *nhi;
1533 	bool is_fdb_nh;
1534 
1535 	/* fib6_src is unique to a fib6_info and limits the ability to cache
1536 	 * routes in fib6_nh within a nexthop that is potentially shared
1537 	 * across multiple fib entries. If the config wants to use source
1538 	 * routing it can not use nexthop objects. mlxsw also does not allow
1539 	 * fib6_src on routes.
1540 	 */
1541 	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1542 		return -EINVAL;
1543 
1544 	if (nh->is_group) {
1545 		struct nh_group *nhg;
1546 
1547 		nhg = rtnl_dereference(nh->nh_grp);
1548 		if (nhg->has_v4)
1549 			goto no_v4_nh;
1550 		is_fdb_nh = nhg->fdb_nh;
1551 	} else {
1552 		nhi = rtnl_dereference(nh->nh_info);
1553 		if (nhi->family == AF_INET)
1554 			goto no_v4_nh;
1555 		is_fdb_nh = nhi->fdb_nh;
1556 	}
1557 
1558 	if (is_fdb_nh) {
1559 		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1560 		return -EINVAL;
1561 	}
1562 
1563 	return 0;
1564 no_v4_nh:
1565 	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1566 	return -EINVAL;
1567 }
1568 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1569 
1570 /* if existing nexthop has ipv6 routes linked to it, need
1571  * to verify this new spec works with ipv6
1572  */
1573 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1574 			      struct netlink_ext_ack *extack)
1575 {
1576 	struct fib6_info *f6i;
1577 
1578 	if (list_empty(&old->f6i_list))
1579 		return 0;
1580 
1581 	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1582 		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1583 			return -EINVAL;
1584 	}
1585 
1586 	return fib6_check_nexthop(new, NULL, extack);
1587 }
1588 
1589 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1590 			       struct netlink_ext_ack *extack)
1591 {
1592 	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1593 		NL_SET_ERR_MSG(extack,
1594 			       "Route with host scope can not have a gateway");
1595 		return -EINVAL;
1596 	}
1597 
1598 	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1599 		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1600 		return -EINVAL;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 /* Invoked by fib add code to verify nexthop by id is ok with
1607  * config for prefix; parts of fib_check_nh not done when nexthop
1608  * object is used.
1609  */
1610 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1611 		      struct netlink_ext_ack *extack)
1612 {
1613 	struct nh_info *nhi;
1614 	int err = 0;
1615 
1616 	if (nh->is_group) {
1617 		struct nh_group *nhg;
1618 
1619 		nhg = rtnl_dereference(nh->nh_grp);
1620 		if (nhg->fdb_nh) {
1621 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1622 			err = -EINVAL;
1623 			goto out;
1624 		}
1625 
1626 		if (scope == RT_SCOPE_HOST) {
1627 			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1628 			err = -EINVAL;
1629 			goto out;
1630 		}
1631 
1632 		/* all nexthops in a group have the same scope */
1633 		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1634 		err = nexthop_check_scope(nhi, scope, extack);
1635 	} else {
1636 		nhi = rtnl_dereference(nh->nh_info);
1637 		if (nhi->fdb_nh) {
1638 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1639 			err = -EINVAL;
1640 			goto out;
1641 		}
1642 		err = nexthop_check_scope(nhi, scope, extack);
1643 	}
1644 
1645 out:
1646 	return err;
1647 }
1648 
1649 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1650 			     struct netlink_ext_ack *extack)
1651 {
1652 	struct fib_info *fi;
1653 
1654 	list_for_each_entry(fi, &old->fi_list, nh_list) {
1655 		int err;
1656 
1657 		err = fib_check_nexthop(new, fi->fib_scope, extack);
1658 		if (err)
1659 			return err;
1660 	}
1661 	return 0;
1662 }
1663 
1664 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1665 {
1666 	return nhge->res.count_buckets == nhge->res.wants_buckets;
1667 }
1668 
1669 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1670 {
1671 	return nhge->res.count_buckets > nhge->res.wants_buckets;
1672 }
1673 
1674 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1675 {
1676 	return nhge->res.count_buckets < nhge->res.wants_buckets;
1677 }
1678 
1679 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1680 {
1681 	return list_empty(&res_table->uw_nh_entries);
1682 }
1683 
1684 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1685 {
1686 	struct nh_grp_entry *nhge;
1687 
1688 	if (bucket->occupied) {
1689 		nhge = nh_res_dereference(bucket->nh_entry);
1690 		nhge->res.count_buckets--;
1691 		bucket->occupied = false;
1692 	}
1693 }
1694 
1695 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1696 				 struct nh_grp_entry *nhge)
1697 {
1698 	nh_res_bucket_unset_nh(bucket);
1699 
1700 	bucket->occupied = true;
1701 	rcu_assign_pointer(bucket->nh_entry, nhge);
1702 	nhge->res.count_buckets++;
1703 }
1704 
1705 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1706 					 struct nh_res_bucket *bucket,
1707 					 unsigned long *deadline, bool *force)
1708 {
1709 	unsigned long now = jiffies;
1710 	struct nh_grp_entry *nhge;
1711 	unsigned long idle_point;
1712 
1713 	if (!bucket->occupied) {
1714 		/* The bucket is not occupied, its NHGE pointer is either
1715 		 * NULL or obsolete. We _have to_ migrate: set force.
1716 		 */
1717 		*force = true;
1718 		return true;
1719 	}
1720 
1721 	nhge = nh_res_dereference(bucket->nh_entry);
1722 
1723 	/* If the bucket is populated by an underweight or balanced
1724 	 * nexthop, do not migrate.
1725 	 */
1726 	if (!nh_res_nhge_is_ow(nhge))
1727 		return false;
1728 
1729 	/* At this point we know that the bucket is populated with an
1730 	 * overweight nexthop. It needs to be migrated to a new nexthop if
1731 	 * the idle timer of unbalanced timer expired.
1732 	 */
1733 
1734 	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1735 	if (time_after_eq(now, idle_point)) {
1736 		/* The bucket is idle. We _can_ migrate: unset force. */
1737 		*force = false;
1738 		return true;
1739 	}
1740 
1741 	/* Unbalanced timer of 0 means "never force". */
1742 	if (res_table->unbalanced_timer) {
1743 		unsigned long unb_point;
1744 
1745 		unb_point = nh_res_table_unb_point(res_table);
1746 		if (time_after(now, unb_point)) {
1747 			/* The bucket is not idle, but the unbalanced timer
1748 			 * expired. We _can_ migrate, but set force anyway,
1749 			 * so that drivers know to ignore activity reports
1750 			 * from the HW.
1751 			 */
1752 			*force = true;
1753 			return true;
1754 		}
1755 
1756 		nh_res_time_set_deadline(unb_point, deadline);
1757 	}
1758 
1759 	nh_res_time_set_deadline(idle_point, deadline);
1760 	return false;
1761 }
1762 
1763 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1764 				  u16 bucket_index, bool notify,
1765 				  bool notify_nl, bool force)
1766 {
1767 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1768 	struct nh_grp_entry *new_nhge;
1769 	struct netlink_ext_ack extack;
1770 	int err;
1771 
1772 	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1773 					    struct nh_grp_entry,
1774 					    res.uw_nh_entry);
1775 	if (WARN_ON_ONCE(!new_nhge))
1776 		/* If this function is called, "bucket" is either not
1777 		 * occupied, or it belongs to a next hop that is
1778 		 * overweight. In either case, there ought to be a
1779 		 * corresponding underweight next hop.
1780 		 */
1781 		return false;
1782 
1783 	if (notify) {
1784 		struct nh_grp_entry *old_nhge;
1785 
1786 		old_nhge = nh_res_dereference(bucket->nh_entry);
1787 		err = call_nexthop_res_bucket_notifiers(res_table->net,
1788 							res_table->nhg_id,
1789 							bucket_index, force,
1790 							old_nhge->nh,
1791 							new_nhge->nh, &extack);
1792 		if (err) {
1793 			pr_err_ratelimited("%s\n", extack._msg);
1794 			if (!force)
1795 				return false;
1796 			/* It is not possible to veto a forced replacement, so
1797 			 * just clear the hardware flags from the nexthop
1798 			 * bucket to indicate to user space that this bucket is
1799 			 * not correctly populated in hardware.
1800 			 */
1801 			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1802 		}
1803 	}
1804 
1805 	nh_res_bucket_set_nh(bucket, new_nhge);
1806 	nh_res_bucket_set_idle(res_table, bucket);
1807 
1808 	if (notify_nl)
1809 		nexthop_bucket_notify(res_table, bucket_index);
1810 
1811 	if (nh_res_nhge_is_balanced(new_nhge))
1812 		list_del(&new_nhge->res.uw_nh_entry);
1813 	return true;
1814 }
1815 
1816 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1817 
1818 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1819 				bool notify, bool notify_nl)
1820 {
1821 	unsigned long now = jiffies;
1822 	unsigned long deadline;
1823 	u16 i;
1824 
1825 	/* Deadline is the next time that upkeep should be run. It is the
1826 	 * earliest time at which one of the buckets might be migrated.
1827 	 * Start at the most pessimistic estimate: either unbalanced_timer
1828 	 * from now, or if there is none, idle_timer from now. For each
1829 	 * encountered time point, call nh_res_time_set_deadline() to
1830 	 * refine the estimate.
1831 	 */
1832 	if (res_table->unbalanced_timer)
1833 		deadline = now + res_table->unbalanced_timer;
1834 	else
1835 		deadline = now + res_table->idle_timer;
1836 
1837 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1838 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1839 		bool force;
1840 
1841 		if (nh_res_bucket_should_migrate(res_table, bucket,
1842 						 &deadline, &force)) {
1843 			if (!nh_res_bucket_migrate(res_table, i, notify,
1844 						   notify_nl, force)) {
1845 				unsigned long idle_point;
1846 
1847 				/* A driver can override the migration
1848 				 * decision if the HW reports that the
1849 				 * bucket is actually not idle. Therefore
1850 				 * remark the bucket as busy again and
1851 				 * update the deadline.
1852 				 */
1853 				nh_res_bucket_set_busy(bucket);
1854 				idle_point = nh_res_bucket_idle_point(res_table,
1855 								      bucket,
1856 								      now);
1857 				nh_res_time_set_deadline(idle_point, &deadline);
1858 			}
1859 		}
1860 	}
1861 
1862 	/* If the group is still unbalanced, schedule the next upkeep to
1863 	 * either the deadline computed above, or the minimum deadline,
1864 	 * whichever comes later.
1865 	 */
1866 	if (!nh_res_table_is_balanced(res_table)) {
1867 		unsigned long now = jiffies;
1868 		unsigned long min_deadline;
1869 
1870 		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1871 		if (time_before(deadline, min_deadline))
1872 			deadline = min_deadline;
1873 
1874 		queue_delayed_work(system_power_efficient_wq,
1875 				   &res_table->upkeep_dw, deadline - now);
1876 	}
1877 }
1878 
1879 static void nh_res_table_upkeep_dw(struct work_struct *work)
1880 {
1881 	struct delayed_work *dw = to_delayed_work(work);
1882 	struct nh_res_table *res_table;
1883 
1884 	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1885 	nh_res_table_upkeep(res_table, true, true);
1886 }
1887 
1888 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1889 {
1890 	cancel_delayed_work_sync(&res_table->upkeep_dw);
1891 }
1892 
1893 static void nh_res_group_rebalance(struct nh_group *nhg,
1894 				   struct nh_res_table *res_table)
1895 {
1896 	u16 prev_upper_bound = 0;
1897 	u32 total = 0;
1898 	u32 w = 0;
1899 	int i;
1900 
1901 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1902 
1903 	for (i = 0; i < nhg->num_nh; ++i)
1904 		total += nhg->nh_entries[i].weight;
1905 
1906 	for (i = 0; i < nhg->num_nh; ++i) {
1907 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1908 		u16 upper_bound;
1909 		u64 btw;
1910 
1911 		w += nhge->weight;
1912 		btw = ((u64)res_table->num_nh_buckets) * w;
1913 		upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1914 		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1915 		prev_upper_bound = upper_bound;
1916 
1917 		if (nh_res_nhge_is_uw(nhge)) {
1918 			if (list_empty(&res_table->uw_nh_entries))
1919 				res_table->unbalanced_since = jiffies;
1920 			list_add(&nhge->res.uw_nh_entry,
1921 				 &res_table->uw_nh_entries);
1922 		}
1923 	}
1924 }
1925 
1926 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1927  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1928  * entry in NHG as not occupied.
1929  */
1930 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1931 					 struct nh_group *nhg)
1932 {
1933 	u16 i;
1934 
1935 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1936 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1937 		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1938 		bool found = false;
1939 		int j;
1940 
1941 		for (j = 0; j < nhg->num_nh; j++) {
1942 			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1943 
1944 			if (nhge->nh->id == id) {
1945 				nh_res_bucket_set_nh(bucket, nhge);
1946 				found = true;
1947 				break;
1948 			}
1949 		}
1950 
1951 		if (!found)
1952 			nh_res_bucket_unset_nh(bucket);
1953 	}
1954 }
1955 
1956 static void replace_nexthop_grp_res(struct nh_group *oldg,
1957 				    struct nh_group *newg)
1958 {
1959 	/* For NH group replacement, the new NHG might only have a stub
1960 	 * hash table with 0 buckets, because the number of buckets was not
1961 	 * specified. For NH removal, oldg and newg both reference the same
1962 	 * res_table. So in any case, in the following, we want to work
1963 	 * with oldg->res_table.
1964 	 */
1965 	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1966 	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1967 	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1968 
1969 	nh_res_table_cancel_upkeep(old_res_table);
1970 	nh_res_table_migrate_buckets(old_res_table, newg);
1971 	nh_res_group_rebalance(newg, old_res_table);
1972 	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1973 		old_res_table->unbalanced_since = prev_unbalanced_since;
1974 	nh_res_table_upkeep(old_res_table, true, false);
1975 }
1976 
1977 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1978 {
1979 	u32 total = 0;
1980 	u32 w = 0;
1981 	int i;
1982 
1983 	for (i = 0; i < nhg->num_nh; ++i)
1984 		total += nhg->nh_entries[i].weight;
1985 
1986 	for (i = 0; i < nhg->num_nh; ++i) {
1987 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1988 		u32 upper_bound;
1989 
1990 		w += nhge->weight;
1991 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1992 		atomic_set(&nhge->hthr.upper_bound, upper_bound);
1993 	}
1994 }
1995 
1996 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1997 				struct nl_info *nlinfo)
1998 {
1999 	struct nh_grp_entry *nhges, *new_nhges;
2000 	struct nexthop *nhp = nhge->nh_parent;
2001 	struct netlink_ext_ack extack;
2002 	struct nexthop *nh = nhge->nh;
2003 	struct nh_group *nhg, *newg;
2004 	int i, j, err;
2005 
2006 	WARN_ON(!nh);
2007 
2008 	nhg = rtnl_dereference(nhp->nh_grp);
2009 	newg = nhg->spare;
2010 
2011 	/* last entry, keep it visible and remove the parent */
2012 	if (nhg->num_nh == 1) {
2013 		remove_nexthop(net, nhp, nlinfo);
2014 		return;
2015 	}
2016 
2017 	newg->has_v4 = false;
2018 	newg->is_multipath = nhg->is_multipath;
2019 	newg->hash_threshold = nhg->hash_threshold;
2020 	newg->resilient = nhg->resilient;
2021 	newg->fdb_nh = nhg->fdb_nh;
2022 	newg->num_nh = nhg->num_nh;
2023 
2024 	/* copy old entries to new except the one getting removed */
2025 	nhges = nhg->nh_entries;
2026 	new_nhges = newg->nh_entries;
2027 	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2028 		struct nh_info *nhi;
2029 
2030 		/* current nexthop getting removed */
2031 		if (nhg->nh_entries[i].nh == nh) {
2032 			newg->num_nh--;
2033 			continue;
2034 		}
2035 
2036 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2037 		if (nhi->family == AF_INET)
2038 			newg->has_v4 = true;
2039 
2040 		list_del(&nhges[i].nh_list);
2041 		new_nhges[j].stats = nhges[i].stats;
2042 		new_nhges[j].nh_parent = nhges[i].nh_parent;
2043 		new_nhges[j].nh = nhges[i].nh;
2044 		new_nhges[j].weight = nhges[i].weight;
2045 		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2046 		j++;
2047 	}
2048 
2049 	if (newg->hash_threshold)
2050 		nh_hthr_group_rebalance(newg);
2051 	else if (newg->resilient)
2052 		replace_nexthop_grp_res(nhg, newg);
2053 
2054 	rcu_assign_pointer(nhp->nh_grp, newg);
2055 
2056 	list_del(&nhge->nh_list);
2057 	free_percpu(nhge->stats);
2058 	nexthop_put(nhge->nh);
2059 
2060 	/* Removal of a NH from a resilient group is notified through
2061 	 * bucket notifications.
2062 	 */
2063 	if (newg->hash_threshold) {
2064 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2065 					     &extack);
2066 		if (err)
2067 			pr_err("%s\n", extack._msg);
2068 	}
2069 
2070 	if (nlinfo)
2071 		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2072 }
2073 
2074 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2075 				       struct nl_info *nlinfo)
2076 {
2077 	struct nh_grp_entry *nhge, *tmp;
2078 
2079 	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2080 		remove_nh_grp_entry(net, nhge, nlinfo);
2081 
2082 	/* make sure all see the newly published array before releasing rtnl */
2083 	synchronize_net();
2084 }
2085 
2086 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2087 {
2088 	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2089 	struct nh_res_table *res_table;
2090 	int i, num_nh = nhg->num_nh;
2091 
2092 	for (i = 0; i < num_nh; ++i) {
2093 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2094 
2095 		if (WARN_ON(!nhge->nh))
2096 			continue;
2097 
2098 		list_del_init(&nhge->nh_list);
2099 	}
2100 
2101 	if (nhg->resilient) {
2102 		res_table = rtnl_dereference(nhg->res_table);
2103 		nh_res_table_cancel_upkeep(res_table);
2104 	}
2105 }
2106 
2107 /* not called for nexthop replace */
2108 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2109 {
2110 	struct fib6_info *f6i, *tmp;
2111 	bool do_flush = false;
2112 	struct fib_info *fi;
2113 
2114 	list_for_each_entry(fi, &nh->fi_list, nh_list) {
2115 		fi->fib_flags |= RTNH_F_DEAD;
2116 		do_flush = true;
2117 	}
2118 	if (do_flush)
2119 		fib_flush(net);
2120 
2121 	/* ip6_del_rt removes the entry from this list hence the _safe */
2122 	list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
2123 		/* __ip6_del_rt does a release, so do a hold here */
2124 		fib6_info_hold(f6i);
2125 		ipv6_stub->ip6_del_rt(net, f6i,
2126 				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2127 	}
2128 }
2129 
2130 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2131 			     struct nl_info *nlinfo)
2132 {
2133 	__remove_nexthop_fib(net, nh);
2134 
2135 	if (nh->is_group) {
2136 		remove_nexthop_group(nh, nlinfo);
2137 	} else {
2138 		struct nh_info *nhi;
2139 
2140 		nhi = rtnl_dereference(nh->nh_info);
2141 		if (nhi->fib_nhc.nhc_dev)
2142 			hlist_del(&nhi->dev_hash);
2143 
2144 		remove_nexthop_from_groups(net, nh, nlinfo);
2145 	}
2146 }
2147 
2148 static void remove_nexthop(struct net *net, struct nexthop *nh,
2149 			   struct nl_info *nlinfo)
2150 {
2151 	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2152 
2153 	/* remove from the tree */
2154 	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2155 
2156 	if (nlinfo)
2157 		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2158 
2159 	__remove_nexthop(net, nh, nlinfo);
2160 	nh_base_seq_inc(net);
2161 
2162 	nexthop_put(nh);
2163 }
2164 
2165 /* if any FIB entries reference this nexthop, any dst entries
2166  * need to be regenerated
2167  */
2168 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2169 			      struct nexthop *replaced_nh)
2170 {
2171 	struct fib6_info *f6i;
2172 	struct nh_group *nhg;
2173 	int i;
2174 
2175 	if (!list_empty(&nh->fi_list))
2176 		rt_cache_flush(net);
2177 
2178 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2179 		ipv6_stub->fib6_update_sernum(net, f6i);
2180 
2181 	/* if an IPv6 group was replaced, we have to release all old
2182 	 * dsts to make sure all refcounts are released
2183 	 */
2184 	if (!replaced_nh->is_group)
2185 		return;
2186 
2187 	nhg = rtnl_dereference(replaced_nh->nh_grp);
2188 	for (i = 0; i < nhg->num_nh; i++) {
2189 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2190 		struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2191 
2192 		if (nhi->family == AF_INET6)
2193 			ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2194 	}
2195 }
2196 
2197 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2198 			       struct nexthop *new, const struct nh_config *cfg,
2199 			       struct netlink_ext_ack *extack)
2200 {
2201 	struct nh_res_table *tmp_table = NULL;
2202 	struct nh_res_table *new_res_table;
2203 	struct nh_res_table *old_res_table;
2204 	struct nh_group *oldg, *newg;
2205 	int i, err;
2206 
2207 	if (!new->is_group) {
2208 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2209 		return -EINVAL;
2210 	}
2211 
2212 	oldg = rtnl_dereference(old->nh_grp);
2213 	newg = rtnl_dereference(new->nh_grp);
2214 
2215 	if (newg->hash_threshold != oldg->hash_threshold) {
2216 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2217 		return -EINVAL;
2218 	}
2219 
2220 	if (newg->hash_threshold) {
2221 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2222 					     extack);
2223 		if (err)
2224 			return err;
2225 	} else if (newg->resilient) {
2226 		new_res_table = rtnl_dereference(newg->res_table);
2227 		old_res_table = rtnl_dereference(oldg->res_table);
2228 
2229 		/* Accept if num_nh_buckets was not given, but if it was
2230 		 * given, demand that the value be correct.
2231 		 */
2232 		if (cfg->nh_grp_res_has_num_buckets &&
2233 		    cfg->nh_grp_res_num_buckets !=
2234 		    old_res_table->num_nh_buckets) {
2235 			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2236 			return -EINVAL;
2237 		}
2238 
2239 		/* Emit a pre-replace notification so that listeners could veto
2240 		 * a potentially unsupported configuration. Otherwise,
2241 		 * individual bucket replacement notifications would need to be
2242 		 * vetoed, which is something that should only happen if the
2243 		 * bucket is currently active.
2244 		 */
2245 		err = call_nexthop_res_table_notifiers(net, new, extack);
2246 		if (err)
2247 			return err;
2248 
2249 		if (cfg->nh_grp_res_has_idle_timer)
2250 			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2251 		if (cfg->nh_grp_res_has_unbalanced_timer)
2252 			old_res_table->unbalanced_timer =
2253 				cfg->nh_grp_res_unbalanced_timer;
2254 
2255 		replace_nexthop_grp_res(oldg, newg);
2256 
2257 		tmp_table = new_res_table;
2258 		rcu_assign_pointer(newg->res_table, old_res_table);
2259 		rcu_assign_pointer(newg->spare->res_table, old_res_table);
2260 	}
2261 
2262 	/* update parents - used by nexthop code for cleanup */
2263 	for (i = 0; i < newg->num_nh; i++)
2264 		newg->nh_entries[i].nh_parent = old;
2265 
2266 	rcu_assign_pointer(old->nh_grp, newg);
2267 
2268 	/* Make sure concurrent readers are not using 'oldg' anymore. */
2269 	synchronize_net();
2270 
2271 	if (newg->resilient) {
2272 		rcu_assign_pointer(oldg->res_table, tmp_table);
2273 		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2274 	}
2275 
2276 	for (i = 0; i < oldg->num_nh; i++)
2277 		oldg->nh_entries[i].nh_parent = new;
2278 
2279 	rcu_assign_pointer(new->nh_grp, oldg);
2280 
2281 	return 0;
2282 }
2283 
2284 static void nh_group_v4_update(struct nh_group *nhg)
2285 {
2286 	struct nh_grp_entry *nhges;
2287 	bool has_v4 = false;
2288 	int i;
2289 
2290 	nhges = nhg->nh_entries;
2291 	for (i = 0; i < nhg->num_nh; i++) {
2292 		struct nh_info *nhi;
2293 
2294 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2295 		if (nhi->family == AF_INET)
2296 			has_v4 = true;
2297 	}
2298 	nhg->has_v4 = has_v4;
2299 }
2300 
2301 static int replace_nexthop_single_notify_res(struct net *net,
2302 					     struct nh_res_table *res_table,
2303 					     struct nexthop *old,
2304 					     struct nh_info *oldi,
2305 					     struct nh_info *newi,
2306 					     struct netlink_ext_ack *extack)
2307 {
2308 	u32 nhg_id = res_table->nhg_id;
2309 	int err;
2310 	u16 i;
2311 
2312 	for (i = 0; i < res_table->num_nh_buckets; i++) {
2313 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2314 		struct nh_grp_entry *nhge;
2315 
2316 		nhge = rtnl_dereference(bucket->nh_entry);
2317 		if (nhge->nh == old) {
2318 			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2319 								  i, true,
2320 								  oldi, newi,
2321 								  extack);
2322 			if (err)
2323 				goto err_notify;
2324 		}
2325 	}
2326 
2327 	return 0;
2328 
2329 err_notify:
2330 	while (i-- > 0) {
2331 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2332 		struct nh_grp_entry *nhge;
2333 
2334 		nhge = rtnl_dereference(bucket->nh_entry);
2335 		if (nhge->nh == old)
2336 			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2337 							    true, newi, oldi,
2338 							    extack);
2339 	}
2340 	return err;
2341 }
2342 
2343 static int replace_nexthop_single_notify(struct net *net,
2344 					 struct nexthop *group_nh,
2345 					 struct nexthop *old,
2346 					 struct nh_info *oldi,
2347 					 struct nh_info *newi,
2348 					 struct netlink_ext_ack *extack)
2349 {
2350 	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2351 	struct nh_res_table *res_table;
2352 
2353 	if (nhg->hash_threshold) {
2354 		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2355 					      group_nh, extack);
2356 	} else if (nhg->resilient) {
2357 		res_table = rtnl_dereference(nhg->res_table);
2358 		return replace_nexthop_single_notify_res(net, res_table,
2359 							 old, oldi, newi,
2360 							 extack);
2361 	}
2362 
2363 	return -EINVAL;
2364 }
2365 
2366 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2367 				  struct nexthop *new,
2368 				  struct netlink_ext_ack *extack)
2369 {
2370 	u8 old_protocol, old_nh_flags;
2371 	struct nh_info *oldi, *newi;
2372 	struct nh_grp_entry *nhge;
2373 	int err;
2374 
2375 	if (new->is_group) {
2376 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2377 		return -EINVAL;
2378 	}
2379 
2380 	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2381 	if (err)
2382 		return err;
2383 
2384 	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2385 	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2386 	 */
2387 	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2388 
2389 	oldi = rtnl_dereference(old->nh_info);
2390 	newi = rtnl_dereference(new->nh_info);
2391 
2392 	newi->nh_parent = old;
2393 	oldi->nh_parent = new;
2394 
2395 	old_protocol = old->protocol;
2396 	old_nh_flags = old->nh_flags;
2397 
2398 	old->protocol = new->protocol;
2399 	old->nh_flags = new->nh_flags;
2400 
2401 	rcu_assign_pointer(old->nh_info, newi);
2402 	rcu_assign_pointer(new->nh_info, oldi);
2403 
2404 	/* Send a replace notification for all the groups using the nexthop. */
2405 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2406 		struct nexthop *nhp = nhge->nh_parent;
2407 
2408 		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2409 						    extack);
2410 		if (err)
2411 			goto err_notify;
2412 	}
2413 
2414 	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2415 	 * update IPv4 indication in all the groups using the nexthop.
2416 	 */
2417 	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2418 		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2419 			struct nexthop *nhp = nhge->nh_parent;
2420 			struct nh_group *nhg;
2421 
2422 			nhg = rtnl_dereference(nhp->nh_grp);
2423 			nh_group_v4_update(nhg);
2424 		}
2425 	}
2426 
2427 	return 0;
2428 
2429 err_notify:
2430 	rcu_assign_pointer(new->nh_info, newi);
2431 	rcu_assign_pointer(old->nh_info, oldi);
2432 	old->nh_flags = old_nh_flags;
2433 	old->protocol = old_protocol;
2434 	oldi->nh_parent = old;
2435 	newi->nh_parent = new;
2436 	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2437 		struct nexthop *nhp = nhge->nh_parent;
2438 
2439 		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2440 	}
2441 	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2442 	return err;
2443 }
2444 
2445 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2446 				     struct nl_info *info)
2447 {
2448 	struct fib6_info *f6i;
2449 
2450 	if (!list_empty(&nh->fi_list)) {
2451 		struct fib_info *fi;
2452 
2453 		/* expectation is a few fib_info per nexthop and then
2454 		 * a lot of routes per fib_info. So mark the fib_info
2455 		 * and then walk the fib tables once
2456 		 */
2457 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2458 			fi->nh_updated = true;
2459 
2460 		fib_info_notify_update(net, info);
2461 
2462 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2463 			fi->nh_updated = false;
2464 	}
2465 
2466 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2467 		ipv6_stub->fib6_rt_update(net, f6i, info);
2468 }
2469 
2470 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2471  * linked to this nexthop and for all groups that the nexthop
2472  * is a member of
2473  */
2474 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2475 				   struct nl_info *info)
2476 {
2477 	struct nh_grp_entry *nhge;
2478 
2479 	__nexthop_replace_notify(net, nh, info);
2480 
2481 	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2482 		__nexthop_replace_notify(net, nhge->nh_parent, info);
2483 }
2484 
2485 static int replace_nexthop(struct net *net, struct nexthop *old,
2486 			   struct nexthop *new, const struct nh_config *cfg,
2487 			   struct netlink_ext_ack *extack)
2488 {
2489 	bool new_is_reject = false;
2490 	struct nh_grp_entry *nhge;
2491 	int err;
2492 
2493 	/* check that existing FIB entries are ok with the
2494 	 * new nexthop definition
2495 	 */
2496 	err = fib_check_nh_list(old, new, extack);
2497 	if (err)
2498 		return err;
2499 
2500 	err = fib6_check_nh_list(old, new, extack);
2501 	if (err)
2502 		return err;
2503 
2504 	if (!new->is_group) {
2505 		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2506 
2507 		new_is_reject = nhi->reject_nh;
2508 	}
2509 
2510 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2511 		/* if new nexthop is a blackhole, any groups using this
2512 		 * nexthop cannot have more than 1 path
2513 		 */
2514 		if (new_is_reject &&
2515 		    nexthop_num_path(nhge->nh_parent) > 1) {
2516 			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2517 			return -EINVAL;
2518 		}
2519 
2520 		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2521 		if (err)
2522 			return err;
2523 
2524 		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2525 		if (err)
2526 			return err;
2527 	}
2528 
2529 	if (old->is_group)
2530 		err = replace_nexthop_grp(net, old, new, cfg, extack);
2531 	else
2532 		err = replace_nexthop_single(net, old, new, extack);
2533 
2534 	if (!err) {
2535 		nh_rt_cache_flush(net, old, new);
2536 
2537 		__remove_nexthop(net, new, NULL);
2538 		nexthop_put(new);
2539 	}
2540 
2541 	return err;
2542 }
2543 
2544 /* called with rtnl_lock held */
2545 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2546 			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2547 {
2548 	struct rb_node **pp, *parent = NULL, *next;
2549 	struct rb_root *root = &net->nexthop.rb_root;
2550 	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2551 	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2552 	u32 new_id = new_nh->id;
2553 	int replace_notify = 0;
2554 	int rc = -EEXIST;
2555 
2556 	pp = &root->rb_node;
2557 	while (1) {
2558 		struct nexthop *nh;
2559 
2560 		next = *pp;
2561 		if (!next)
2562 			break;
2563 
2564 		parent = next;
2565 
2566 		nh = rb_entry(parent, struct nexthop, rb_node);
2567 		if (new_id < nh->id) {
2568 			pp = &next->rb_left;
2569 		} else if (new_id > nh->id) {
2570 			pp = &next->rb_right;
2571 		} else if (replace) {
2572 			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2573 			if (!rc) {
2574 				new_nh = nh; /* send notification with old nh */
2575 				replace_notify = 1;
2576 			}
2577 			goto out;
2578 		} else {
2579 			/* id already exists and not a replace */
2580 			goto out;
2581 		}
2582 	}
2583 
2584 	if (replace && !create) {
2585 		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2586 		rc = -ENOENT;
2587 		goto out;
2588 	}
2589 
2590 	if (new_nh->is_group) {
2591 		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2592 		struct nh_res_table *res_table;
2593 
2594 		if (nhg->resilient) {
2595 			res_table = rtnl_dereference(nhg->res_table);
2596 
2597 			/* Not passing the number of buckets is OK when
2598 			 * replacing, but not when creating a new group.
2599 			 */
2600 			if (!cfg->nh_grp_res_has_num_buckets) {
2601 				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2602 				rc = -EINVAL;
2603 				goto out;
2604 			}
2605 
2606 			nh_res_group_rebalance(nhg, res_table);
2607 
2608 			/* Do not send bucket notifications, we do full
2609 			 * notification below.
2610 			 */
2611 			nh_res_table_upkeep(res_table, false, false);
2612 		}
2613 	}
2614 
2615 	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2616 	rb_insert_color(&new_nh->rb_node, root);
2617 
2618 	/* The initial insertion is a full notification for hash-threshold as
2619 	 * well as resilient groups.
2620 	 */
2621 	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2622 	if (rc)
2623 		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2624 
2625 out:
2626 	if (!rc) {
2627 		nh_base_seq_inc(net);
2628 		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2629 		if (replace_notify &&
2630 		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2631 			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2632 	}
2633 
2634 	return rc;
2635 }
2636 
2637 /* rtnl */
2638 /* remove all nexthops tied to a device being deleted */
2639 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2640 {
2641 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2642 	struct net *net = dev_net(dev);
2643 	struct hlist_head *head = &net->nexthop.devhash[hash];
2644 	struct hlist_node *n;
2645 	struct nh_info *nhi;
2646 
2647 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2648 		if (nhi->fib_nhc.nhc_dev != dev)
2649 			continue;
2650 
2651 		if (nhi->reject_nh &&
2652 		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2653 			continue;
2654 
2655 		remove_nexthop(net, nhi->nh_parent, NULL);
2656 	}
2657 }
2658 
2659 /* rtnl; called when net namespace is deleted */
2660 static void flush_all_nexthops(struct net *net)
2661 {
2662 	struct rb_root *root = &net->nexthop.rb_root;
2663 	struct rb_node *node;
2664 	struct nexthop *nh;
2665 
2666 	while ((node = rb_first(root))) {
2667 		nh = rb_entry(node, struct nexthop, rb_node);
2668 		remove_nexthop(net, nh, NULL);
2669 		cond_resched();
2670 	}
2671 }
2672 
2673 static struct nexthop *nexthop_create_group(struct net *net,
2674 					    struct nh_config *cfg)
2675 {
2676 	struct nlattr *grps_attr = cfg->nh_grp;
2677 	struct nexthop_grp *entry = nla_data(grps_attr);
2678 	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2679 	struct nh_group *nhg;
2680 	struct nexthop *nh;
2681 	int err;
2682 	int i;
2683 
2684 	if (WARN_ON(!num_nh))
2685 		return ERR_PTR(-EINVAL);
2686 
2687 	nh = nexthop_alloc();
2688 	if (!nh)
2689 		return ERR_PTR(-ENOMEM);
2690 
2691 	nh->is_group = 1;
2692 
2693 	nhg = nexthop_grp_alloc(num_nh);
2694 	if (!nhg) {
2695 		kfree(nh);
2696 		return ERR_PTR(-ENOMEM);
2697 	}
2698 
2699 	/* spare group used for removals */
2700 	nhg->spare = nexthop_grp_alloc(num_nh);
2701 	if (!nhg->spare) {
2702 		kfree(nhg);
2703 		kfree(nh);
2704 		return ERR_PTR(-ENOMEM);
2705 	}
2706 	nhg->spare->spare = nhg;
2707 
2708 	for (i = 0; i < nhg->num_nh; ++i) {
2709 		struct nexthop *nhe;
2710 		struct nh_info *nhi;
2711 
2712 		nhe = nexthop_find_by_id(net, entry[i].id);
2713 		if (!nexthop_get(nhe)) {
2714 			err = -ENOENT;
2715 			goto out_no_nh;
2716 		}
2717 
2718 		nhi = rtnl_dereference(nhe->nh_info);
2719 		if (nhi->family == AF_INET)
2720 			nhg->has_v4 = true;
2721 
2722 		nhg->nh_entries[i].stats =
2723 			netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2724 		if (!nhg->nh_entries[i].stats) {
2725 			err = -ENOMEM;
2726 			nexthop_put(nhe);
2727 			goto out_no_nh;
2728 		}
2729 		nhg->nh_entries[i].nh = nhe;
2730 		nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
2731 
2732 		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2733 		nhg->nh_entries[i].nh_parent = nh;
2734 	}
2735 
2736 	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2737 		nhg->hash_threshold = 1;
2738 		nhg->is_multipath = true;
2739 	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2740 		struct nh_res_table *res_table;
2741 
2742 		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2743 		if (!res_table) {
2744 			err = -ENOMEM;
2745 			goto out_no_nh;
2746 		}
2747 
2748 		rcu_assign_pointer(nhg->spare->res_table, res_table);
2749 		rcu_assign_pointer(nhg->res_table, res_table);
2750 		nhg->resilient = true;
2751 		nhg->is_multipath = true;
2752 	}
2753 
2754 	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2755 
2756 	if (nhg->hash_threshold)
2757 		nh_hthr_group_rebalance(nhg);
2758 
2759 	if (cfg->nh_fdb)
2760 		nhg->fdb_nh = 1;
2761 
2762 	if (cfg->nh_hw_stats)
2763 		nhg->hw_stats = true;
2764 
2765 	rcu_assign_pointer(nh->nh_grp, nhg);
2766 
2767 	return nh;
2768 
2769 out_no_nh:
2770 	for (i--; i >= 0; --i) {
2771 		list_del(&nhg->nh_entries[i].nh_list);
2772 		free_percpu(nhg->nh_entries[i].stats);
2773 		nexthop_put(nhg->nh_entries[i].nh);
2774 	}
2775 
2776 	kfree(nhg->spare);
2777 	kfree(nhg);
2778 	kfree(nh);
2779 
2780 	return ERR_PTR(err);
2781 }
2782 
2783 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2784 			  struct nh_info *nhi, struct nh_config *cfg,
2785 			  struct netlink_ext_ack *extack)
2786 {
2787 	struct fib_nh *fib_nh = &nhi->fib_nh;
2788 	struct fib_config fib_cfg = {
2789 		.fc_oif   = cfg->nh_ifindex,
2790 		.fc_gw4   = cfg->gw.ipv4,
2791 		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2792 		.fc_flags = cfg->nh_flags,
2793 		.fc_nlinfo = cfg->nlinfo,
2794 		.fc_encap = cfg->nh_encap,
2795 		.fc_encap_type = cfg->nh_encap_type,
2796 	};
2797 	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2798 	int err;
2799 
2800 	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2801 	if (err) {
2802 		fib_nh_release(net, fib_nh);
2803 		goto out;
2804 	}
2805 
2806 	if (nhi->fdb_nh)
2807 		goto out;
2808 
2809 	/* sets nh_dev if successful */
2810 	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2811 	if (!err) {
2812 		nh->nh_flags = fib_nh->fib_nh_flags;
2813 		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2814 					  !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2815 	} else {
2816 		fib_nh_release(net, fib_nh);
2817 	}
2818 out:
2819 	return err;
2820 }
2821 
2822 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2823 			  struct nh_info *nhi, struct nh_config *cfg,
2824 			  struct netlink_ext_ack *extack)
2825 {
2826 	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2827 	struct fib6_config fib6_cfg = {
2828 		.fc_table = l3mdev_fib_table(cfg->dev),
2829 		.fc_ifindex = cfg->nh_ifindex,
2830 		.fc_gateway = cfg->gw.ipv6,
2831 		.fc_flags = cfg->nh_flags,
2832 		.fc_nlinfo = cfg->nlinfo,
2833 		.fc_encap = cfg->nh_encap,
2834 		.fc_encap_type = cfg->nh_encap_type,
2835 		.fc_is_fdb = cfg->nh_fdb,
2836 	};
2837 	int err;
2838 
2839 	if (!ipv6_addr_any(&cfg->gw.ipv6))
2840 		fib6_cfg.fc_flags |= RTF_GATEWAY;
2841 
2842 	/* sets nh_dev if successful */
2843 	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2844 				      extack);
2845 	if (err) {
2846 		/* IPv6 is not enabled, don't call fib6_nh_release */
2847 		if (err == -EAFNOSUPPORT)
2848 			goto out;
2849 		ipv6_stub->fib6_nh_release(fib6_nh);
2850 	} else {
2851 		nh->nh_flags = fib6_nh->fib_nh_flags;
2852 	}
2853 out:
2854 	return err;
2855 }
2856 
2857 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2858 				      struct netlink_ext_ack *extack)
2859 {
2860 	struct nh_info *nhi;
2861 	struct nexthop *nh;
2862 	int err = 0;
2863 
2864 	nh = nexthop_alloc();
2865 	if (!nh)
2866 		return ERR_PTR(-ENOMEM);
2867 
2868 	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2869 	if (!nhi) {
2870 		kfree(nh);
2871 		return ERR_PTR(-ENOMEM);
2872 	}
2873 
2874 	nh->nh_flags = cfg->nh_flags;
2875 	nh->net = net;
2876 
2877 	nhi->nh_parent = nh;
2878 	nhi->family = cfg->nh_family;
2879 	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2880 
2881 	if (cfg->nh_fdb)
2882 		nhi->fdb_nh = 1;
2883 
2884 	if (cfg->nh_blackhole) {
2885 		nhi->reject_nh = 1;
2886 		cfg->nh_ifindex = net->loopback_dev->ifindex;
2887 	}
2888 
2889 	switch (cfg->nh_family) {
2890 	case AF_INET:
2891 		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2892 		break;
2893 	case AF_INET6:
2894 		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2895 		break;
2896 	}
2897 
2898 	if (err) {
2899 		kfree(nhi);
2900 		kfree(nh);
2901 		return ERR_PTR(err);
2902 	}
2903 
2904 	/* add the entry to the device based hash */
2905 	if (!nhi->fdb_nh)
2906 		nexthop_devhash_add(net, nhi);
2907 
2908 	rcu_assign_pointer(nh->nh_info, nhi);
2909 
2910 	return nh;
2911 }
2912 
2913 /* called with rtnl lock held */
2914 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2915 				   struct netlink_ext_ack *extack)
2916 {
2917 	struct nexthop *nh;
2918 	int err;
2919 
2920 	if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2921 		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2922 		return ERR_PTR(-EINVAL);
2923 	}
2924 
2925 	if (!cfg->nh_id) {
2926 		cfg->nh_id = nh_find_unused_id(net);
2927 		if (!cfg->nh_id) {
2928 			NL_SET_ERR_MSG(extack, "No unused id");
2929 			return ERR_PTR(-EINVAL);
2930 		}
2931 	}
2932 
2933 	if (cfg->nh_grp)
2934 		nh = nexthop_create_group(net, cfg);
2935 	else
2936 		nh = nexthop_create(net, cfg, extack);
2937 
2938 	if (IS_ERR(nh))
2939 		return nh;
2940 
2941 	refcount_set(&nh->refcnt, 1);
2942 	nh->id = cfg->nh_id;
2943 	nh->protocol = cfg->nh_protocol;
2944 	nh->net = net;
2945 
2946 	err = insert_nexthop(net, nh, cfg, extack);
2947 	if (err) {
2948 		__remove_nexthop(net, nh, NULL);
2949 		nexthop_put(nh);
2950 		nh = ERR_PTR(err);
2951 	}
2952 
2953 	return nh;
2954 }
2955 
2956 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2957 			    unsigned long *timer_p, bool *has_p,
2958 			    struct netlink_ext_ack *extack)
2959 {
2960 	unsigned long timer;
2961 	u32 value;
2962 
2963 	if (!attr) {
2964 		*timer_p = fallback;
2965 		*has_p = false;
2966 		return 0;
2967 	}
2968 
2969 	value = nla_get_u32(attr);
2970 	timer = clock_t_to_jiffies(value);
2971 	if (timer == ~0UL) {
2972 		NL_SET_ERR_MSG(extack, "Timer value too large");
2973 		return -EINVAL;
2974 	}
2975 
2976 	*timer_p = timer;
2977 	*has_p = true;
2978 	return 0;
2979 }
2980 
2981 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2982 				    struct netlink_ext_ack *extack)
2983 {
2984 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2985 	int err;
2986 
2987 	if (res) {
2988 		err = nla_parse_nested(tb,
2989 				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2990 				       res, rtm_nh_res_policy_new, extack);
2991 		if (err < 0)
2992 			return err;
2993 	}
2994 
2995 	if (tb[NHA_RES_GROUP_BUCKETS]) {
2996 		cfg->nh_grp_res_num_buckets =
2997 			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2998 		cfg->nh_grp_res_has_num_buckets = true;
2999 		if (!cfg->nh_grp_res_num_buckets) {
3000 			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
3001 			return -EINVAL;
3002 		}
3003 	}
3004 
3005 	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
3006 			       NH_RES_DEFAULT_IDLE_TIMER,
3007 			       &cfg->nh_grp_res_idle_timer,
3008 			       &cfg->nh_grp_res_has_idle_timer,
3009 			       extack);
3010 	if (err)
3011 		return err;
3012 
3013 	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3014 				NH_RES_DEFAULT_UNBALANCED_TIMER,
3015 				&cfg->nh_grp_res_unbalanced_timer,
3016 				&cfg->nh_grp_res_has_unbalanced_timer,
3017 				extack);
3018 }
3019 
3020 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3021 			    struct nlmsghdr *nlh, struct nh_config *cfg,
3022 			    struct netlink_ext_ack *extack)
3023 {
3024 	struct nhmsg *nhm = nlmsg_data(nlh);
3025 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3026 	int err;
3027 
3028 	err = nlmsg_parse(nlh, sizeof(*nhm), tb,
3029 			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
3030 			  rtm_nh_policy_new, extack);
3031 	if (err < 0)
3032 		return err;
3033 
3034 	err = -EINVAL;
3035 	if (nhm->resvd || nhm->nh_scope) {
3036 		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3037 		goto out;
3038 	}
3039 	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3040 		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3041 		goto out;
3042 	}
3043 
3044 	switch (nhm->nh_family) {
3045 	case AF_INET:
3046 	case AF_INET6:
3047 		break;
3048 	case AF_UNSPEC:
3049 		if (tb[NHA_GROUP])
3050 			break;
3051 		fallthrough;
3052 	default:
3053 		NL_SET_ERR_MSG(extack, "Invalid address family");
3054 		goto out;
3055 	}
3056 
3057 	memset(cfg, 0, sizeof(*cfg));
3058 	cfg->nlflags = nlh->nlmsg_flags;
3059 	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3060 	cfg->nlinfo.nlh = nlh;
3061 	cfg->nlinfo.nl_net = net;
3062 
3063 	cfg->nh_family = nhm->nh_family;
3064 	cfg->nh_protocol = nhm->nh_protocol;
3065 	cfg->nh_flags = nhm->nh_flags;
3066 
3067 	if (tb[NHA_ID])
3068 		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3069 
3070 	if (tb[NHA_FDB]) {
3071 		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3072 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
3073 			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3074 			goto out;
3075 		}
3076 		if (nhm->nh_flags) {
3077 			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3078 			goto out;
3079 		}
3080 		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3081 	}
3082 
3083 	if (tb[NHA_GROUP]) {
3084 		if (nhm->nh_family != AF_UNSPEC) {
3085 			NL_SET_ERR_MSG(extack, "Invalid family for group");
3086 			goto out;
3087 		}
3088 		cfg->nh_grp = tb[NHA_GROUP];
3089 
3090 		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3091 		if (tb[NHA_GROUP_TYPE])
3092 			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3093 
3094 		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3095 			NL_SET_ERR_MSG(extack, "Invalid group type");
3096 			goto out;
3097 		}
3098 		err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
3099 					  cfg->nh_grp_type, extack);
3100 		if (err)
3101 			goto out;
3102 
3103 		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3104 			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3105 						       cfg, extack);
3106 
3107 		if (tb[NHA_HW_STATS_ENABLE])
3108 			cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3109 
3110 		/* no other attributes should be set */
3111 		goto out;
3112 	}
3113 
3114 	if (tb[NHA_BLACKHOLE]) {
3115 		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3116 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3117 			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3118 			goto out;
3119 		}
3120 
3121 		cfg->nh_blackhole = 1;
3122 		err = 0;
3123 		goto out;
3124 	}
3125 
3126 	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3127 		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3128 		goto out;
3129 	}
3130 
3131 	if (!cfg->nh_fdb && tb[NHA_OIF]) {
3132 		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3133 		if (cfg->nh_ifindex)
3134 			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3135 
3136 		if (!cfg->dev) {
3137 			NL_SET_ERR_MSG(extack, "Invalid device index");
3138 			goto out;
3139 		} else if (!(cfg->dev->flags & IFF_UP)) {
3140 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3141 			err = -ENETDOWN;
3142 			goto out;
3143 		} else if (!netif_carrier_ok(cfg->dev)) {
3144 			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3145 			err = -ENETDOWN;
3146 			goto out;
3147 		}
3148 	}
3149 
3150 	err = -EINVAL;
3151 	if (tb[NHA_GATEWAY]) {
3152 		struct nlattr *gwa = tb[NHA_GATEWAY];
3153 
3154 		switch (cfg->nh_family) {
3155 		case AF_INET:
3156 			if (nla_len(gwa) != sizeof(u32)) {
3157 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3158 				goto out;
3159 			}
3160 			cfg->gw.ipv4 = nla_get_be32(gwa);
3161 			break;
3162 		case AF_INET6:
3163 			if (nla_len(gwa) != sizeof(struct in6_addr)) {
3164 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3165 				goto out;
3166 			}
3167 			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3168 			break;
3169 		default:
3170 			NL_SET_ERR_MSG(extack,
3171 				       "Unknown address family for gateway");
3172 			goto out;
3173 		}
3174 	} else {
3175 		/* device only nexthop (no gateway) */
3176 		if (cfg->nh_flags & RTNH_F_ONLINK) {
3177 			NL_SET_ERR_MSG(extack,
3178 				       "ONLINK flag can not be set for nexthop without a gateway");
3179 			goto out;
3180 		}
3181 	}
3182 
3183 	if (tb[NHA_ENCAP]) {
3184 		cfg->nh_encap = tb[NHA_ENCAP];
3185 
3186 		if (!tb[NHA_ENCAP_TYPE]) {
3187 			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3188 			goto out;
3189 		}
3190 
3191 		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3192 		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3193 		if (err < 0)
3194 			goto out;
3195 
3196 	} else if (tb[NHA_ENCAP_TYPE]) {
3197 		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3198 		goto out;
3199 	}
3200 
3201 	if (tb[NHA_HW_STATS_ENABLE]) {
3202 		NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3203 		goto out;
3204 	}
3205 
3206 	err = 0;
3207 out:
3208 	return err;
3209 }
3210 
3211 /* rtnl */
3212 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3213 			   struct netlink_ext_ack *extack)
3214 {
3215 	struct net *net = sock_net(skb->sk);
3216 	struct nh_config cfg;
3217 	struct nexthop *nh;
3218 	int err;
3219 
3220 	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3221 	if (!err) {
3222 		nh = nexthop_add(net, &cfg, extack);
3223 		if (IS_ERR(nh))
3224 			err = PTR_ERR(nh);
3225 	}
3226 
3227 	return err;
3228 }
3229 
3230 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3231 				struct nlattr **tb, u32 *id, u32 *op_flags,
3232 				struct netlink_ext_ack *extack)
3233 {
3234 	struct nhmsg *nhm = nlmsg_data(nlh);
3235 
3236 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3237 		NL_SET_ERR_MSG(extack, "Invalid values in header");
3238 		return -EINVAL;
3239 	}
3240 
3241 	if (!tb[NHA_ID]) {
3242 		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3243 		return -EINVAL;
3244 	}
3245 
3246 	*id = nla_get_u32(tb[NHA_ID]);
3247 	if (!(*id)) {
3248 		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3249 		return -EINVAL;
3250 	}
3251 
3252 	if (op_flags) {
3253 		if (tb[NHA_OP_FLAGS])
3254 			*op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3255 		else
3256 			*op_flags = 0;
3257 	}
3258 
3259 	return 0;
3260 }
3261 
3262 /* rtnl */
3263 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3264 			   struct netlink_ext_ack *extack)
3265 {
3266 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3267 	struct net *net = sock_net(skb->sk);
3268 	struct nl_info nlinfo = {
3269 		.nlh = nlh,
3270 		.nl_net = net,
3271 		.portid = NETLINK_CB(skb).portid,
3272 	};
3273 	struct nexthop *nh;
3274 	int err;
3275 	u32 id;
3276 
3277 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3278 			  ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3279 			  extack);
3280 	if (err < 0)
3281 		return err;
3282 
3283 	err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3284 	if (err)
3285 		return err;
3286 
3287 	nh = nexthop_find_by_id(net, id);
3288 	if (!nh)
3289 		return -ENOENT;
3290 
3291 	remove_nexthop(net, nh, &nlinfo);
3292 
3293 	return 0;
3294 }
3295 
3296 /* rtnl */
3297 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3298 			   struct netlink_ext_ack *extack)
3299 {
3300 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3301 	struct net *net = sock_net(in_skb->sk);
3302 	struct sk_buff *skb = NULL;
3303 	struct nexthop *nh;
3304 	u32 op_flags;
3305 	int err;
3306 	u32 id;
3307 
3308 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3309 			  ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3310 			  extack);
3311 	if (err < 0)
3312 		return err;
3313 
3314 	err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3315 	if (err)
3316 		return err;
3317 
3318 	err = -ENOBUFS;
3319 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3320 	if (!skb)
3321 		goto out;
3322 
3323 	err = -ENOENT;
3324 	nh = nexthop_find_by_id(net, id);
3325 	if (!nh)
3326 		goto errout_free;
3327 
3328 	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3329 			   nlh->nlmsg_seq, 0, op_flags);
3330 	if (err < 0) {
3331 		WARN_ON(err == -EMSGSIZE);
3332 		goto errout_free;
3333 	}
3334 
3335 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3336 out:
3337 	return err;
3338 errout_free:
3339 	kfree_skb(skb);
3340 	goto out;
3341 }
3342 
3343 struct nh_dump_filter {
3344 	u32 nh_id;
3345 	int dev_idx;
3346 	int master_idx;
3347 	bool group_filter;
3348 	bool fdb_filter;
3349 	u32 res_bucket_nh_id;
3350 	u32 op_flags;
3351 };
3352 
3353 static bool nh_dump_filtered(struct nexthop *nh,
3354 			     struct nh_dump_filter *filter, u8 family)
3355 {
3356 	const struct net_device *dev;
3357 	const struct nh_info *nhi;
3358 
3359 	if (filter->group_filter && !nh->is_group)
3360 		return true;
3361 
3362 	if (!filter->dev_idx && !filter->master_idx && !family)
3363 		return false;
3364 
3365 	if (nh->is_group)
3366 		return true;
3367 
3368 	nhi = rtnl_dereference(nh->nh_info);
3369 	if (family && nhi->family != family)
3370 		return true;
3371 
3372 	dev = nhi->fib_nhc.nhc_dev;
3373 	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3374 		return true;
3375 
3376 	if (filter->master_idx) {
3377 		struct net_device *master;
3378 
3379 		if (!dev)
3380 			return true;
3381 
3382 		master = netdev_master_upper_dev_get((struct net_device *)dev);
3383 		if (!master || master->ifindex != filter->master_idx)
3384 			return true;
3385 	}
3386 
3387 	return false;
3388 }
3389 
3390 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3391 			       struct nh_dump_filter *filter,
3392 			       struct netlink_ext_ack *extack)
3393 {
3394 	struct nhmsg *nhm;
3395 	u32 idx;
3396 
3397 	if (tb[NHA_OIF]) {
3398 		idx = nla_get_u32(tb[NHA_OIF]);
3399 		if (idx > INT_MAX) {
3400 			NL_SET_ERR_MSG(extack, "Invalid device index");
3401 			return -EINVAL;
3402 		}
3403 		filter->dev_idx = idx;
3404 	}
3405 	if (tb[NHA_MASTER]) {
3406 		idx = nla_get_u32(tb[NHA_MASTER]);
3407 		if (idx > INT_MAX) {
3408 			NL_SET_ERR_MSG(extack, "Invalid master device index");
3409 			return -EINVAL;
3410 		}
3411 		filter->master_idx = idx;
3412 	}
3413 	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3414 	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3415 
3416 	nhm = nlmsg_data(nlh);
3417 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3418 		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3419 		return -EINVAL;
3420 	}
3421 
3422 	return 0;
3423 }
3424 
3425 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3426 			     struct nh_dump_filter *filter,
3427 			     struct netlink_callback *cb)
3428 {
3429 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3430 	int err;
3431 
3432 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3433 			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3434 			  rtm_nh_policy_dump, cb->extack);
3435 	if (err < 0)
3436 		return err;
3437 
3438 	if (tb[NHA_OP_FLAGS])
3439 		filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3440 	else
3441 		filter->op_flags = 0;
3442 
3443 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3444 }
3445 
3446 struct rtm_dump_nh_ctx {
3447 	u32 idx;
3448 };
3449 
3450 static struct rtm_dump_nh_ctx *
3451 rtm_dump_nh_ctx(struct netlink_callback *cb)
3452 {
3453 	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3454 
3455 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3456 	return ctx;
3457 }
3458 
3459 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3460 				  struct netlink_callback *cb,
3461 				  struct rb_root *root,
3462 				  struct rtm_dump_nh_ctx *ctx,
3463 				  int (*nh_cb)(struct sk_buff *skb,
3464 					       struct netlink_callback *cb,
3465 					       struct nexthop *nh, void *data),
3466 				  void *data)
3467 {
3468 	struct rb_node *node;
3469 	int s_idx;
3470 	int err;
3471 
3472 	s_idx = ctx->idx;
3473 	for (node = rb_first(root); node; node = rb_next(node)) {
3474 		struct nexthop *nh;
3475 
3476 		nh = rb_entry(node, struct nexthop, rb_node);
3477 		if (nh->id < s_idx)
3478 			continue;
3479 
3480 		ctx->idx = nh->id;
3481 		err = nh_cb(skb, cb, nh, data);
3482 		if (err)
3483 			return err;
3484 	}
3485 
3486 	return 0;
3487 }
3488 
3489 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3490 			       struct nexthop *nh, void *data)
3491 {
3492 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3493 	struct nh_dump_filter *filter = data;
3494 
3495 	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3496 		return 0;
3497 
3498 	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3499 			    NETLINK_CB(cb->skb).portid,
3500 			    cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3501 }
3502 
3503 /* rtnl */
3504 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3505 {
3506 	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3507 	struct net *net = sock_net(skb->sk);
3508 	struct rb_root *root = &net->nexthop.rb_root;
3509 	struct nh_dump_filter filter = {};
3510 	int err;
3511 
3512 	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3513 	if (err < 0)
3514 		return err;
3515 
3516 	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3517 				     &rtm_dump_nexthop_cb, &filter);
3518 
3519 	cb->seq = net->nexthop.seq;
3520 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3521 	return err;
3522 }
3523 
3524 static struct nexthop *
3525 nexthop_find_group_resilient(struct net *net, u32 id,
3526 			     struct netlink_ext_ack *extack)
3527 {
3528 	struct nh_group *nhg;
3529 	struct nexthop *nh;
3530 
3531 	nh = nexthop_find_by_id(net, id);
3532 	if (!nh)
3533 		return ERR_PTR(-ENOENT);
3534 
3535 	if (!nh->is_group) {
3536 		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3537 		return ERR_PTR(-EINVAL);
3538 	}
3539 
3540 	nhg = rtnl_dereference(nh->nh_grp);
3541 	if (!nhg->resilient) {
3542 		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3543 		return ERR_PTR(-EINVAL);
3544 	}
3545 
3546 	return nh;
3547 }
3548 
3549 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3550 			      struct netlink_ext_ack *extack)
3551 {
3552 	u32 idx;
3553 
3554 	if (attr) {
3555 		idx = nla_get_u32(attr);
3556 		if (!idx) {
3557 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3558 			return -EINVAL;
3559 		}
3560 		*nh_id_p = idx;
3561 	} else {
3562 		*nh_id_p = 0;
3563 	}
3564 
3565 	return 0;
3566 }
3567 
3568 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3569 				    struct nh_dump_filter *filter,
3570 				    struct netlink_callback *cb)
3571 {
3572 	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3573 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3574 	int err;
3575 
3576 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3577 			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3578 			  rtm_nh_policy_dump_bucket, NULL);
3579 	if (err < 0)
3580 		return err;
3581 
3582 	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3583 	if (err)
3584 		return err;
3585 
3586 	if (tb[NHA_RES_BUCKET]) {
3587 		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3588 
3589 		err = nla_parse_nested(res_tb, max,
3590 				       tb[NHA_RES_BUCKET],
3591 				       rtm_nh_res_bucket_policy_dump,
3592 				       cb->extack);
3593 		if (err < 0)
3594 			return err;
3595 
3596 		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3597 					 &filter->res_bucket_nh_id,
3598 					 cb->extack);
3599 		if (err)
3600 			return err;
3601 	}
3602 
3603 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3604 }
3605 
3606 struct rtm_dump_res_bucket_ctx {
3607 	struct rtm_dump_nh_ctx nh;
3608 	u16 bucket_index;
3609 };
3610 
3611 static struct rtm_dump_res_bucket_ctx *
3612 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3613 {
3614 	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3615 
3616 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3617 	return ctx;
3618 }
3619 
3620 struct rtm_dump_nexthop_bucket_data {
3621 	struct rtm_dump_res_bucket_ctx *ctx;
3622 	struct nh_dump_filter filter;
3623 };
3624 
3625 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3626 				      struct netlink_callback *cb,
3627 				      struct nexthop *nh,
3628 				      struct rtm_dump_nexthop_bucket_data *dd)
3629 {
3630 	u32 portid = NETLINK_CB(cb->skb).portid;
3631 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3632 	struct nh_res_table *res_table;
3633 	struct nh_group *nhg;
3634 	u16 bucket_index;
3635 	int err;
3636 
3637 	nhg = rtnl_dereference(nh->nh_grp);
3638 	res_table = rtnl_dereference(nhg->res_table);
3639 	for (bucket_index = dd->ctx->bucket_index;
3640 	     bucket_index < res_table->num_nh_buckets;
3641 	     bucket_index++) {
3642 		struct nh_res_bucket *bucket;
3643 		struct nh_grp_entry *nhge;
3644 
3645 		bucket = &res_table->nh_buckets[bucket_index];
3646 		nhge = rtnl_dereference(bucket->nh_entry);
3647 		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3648 			continue;
3649 
3650 		if (dd->filter.res_bucket_nh_id &&
3651 		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3652 			continue;
3653 
3654 		dd->ctx->bucket_index = bucket_index;
3655 		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3656 					 RTM_NEWNEXTHOPBUCKET, portid,
3657 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3658 					 cb->extack);
3659 		if (err)
3660 			return err;
3661 	}
3662 
3663 	dd->ctx->bucket_index = 0;
3664 
3665 	return 0;
3666 }
3667 
3668 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3669 				      struct netlink_callback *cb,
3670 				      struct nexthop *nh, void *data)
3671 {
3672 	struct rtm_dump_nexthop_bucket_data *dd = data;
3673 	struct nh_group *nhg;
3674 
3675 	if (!nh->is_group)
3676 		return 0;
3677 
3678 	nhg = rtnl_dereference(nh->nh_grp);
3679 	if (!nhg->resilient)
3680 		return 0;
3681 
3682 	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3683 }
3684 
3685 /* rtnl */
3686 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3687 				   struct netlink_callback *cb)
3688 {
3689 	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3690 	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3691 	struct net *net = sock_net(skb->sk);
3692 	struct nexthop *nh;
3693 	int err;
3694 
3695 	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3696 	if (err)
3697 		return err;
3698 
3699 	if (dd.filter.nh_id) {
3700 		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3701 						  cb->extack);
3702 		if (IS_ERR(nh))
3703 			return PTR_ERR(nh);
3704 		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3705 	} else {
3706 		struct rb_root *root = &net->nexthop.rb_root;
3707 
3708 		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3709 					     &rtm_dump_nexthop_bucket_cb, &dd);
3710 	}
3711 
3712 	cb->seq = net->nexthop.seq;
3713 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3714 	return err;
3715 }
3716 
3717 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3718 					      u16 *bucket_index,
3719 					      struct netlink_ext_ack *extack)
3720 {
3721 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3722 	int err;
3723 
3724 	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3725 			       res, rtm_nh_res_bucket_policy_get, extack);
3726 	if (err < 0)
3727 		return err;
3728 
3729 	if (!tb[NHA_RES_BUCKET_INDEX]) {
3730 		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3731 		return -EINVAL;
3732 	}
3733 
3734 	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3735 	return 0;
3736 }
3737 
3738 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3739 				   u32 *id, u16 *bucket_index,
3740 				   struct netlink_ext_ack *extack)
3741 {
3742 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3743 	int err;
3744 
3745 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3746 			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3747 			  rtm_nh_policy_get_bucket, extack);
3748 	if (err < 0)
3749 		return err;
3750 
3751 	err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3752 	if (err)
3753 		return err;
3754 
3755 	if (!tb[NHA_RES_BUCKET]) {
3756 		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3757 		return -EINVAL;
3758 	}
3759 
3760 	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3761 						 bucket_index, extack);
3762 	if (err)
3763 		return err;
3764 
3765 	return 0;
3766 }
3767 
3768 /* rtnl */
3769 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3770 				  struct netlink_ext_ack *extack)
3771 {
3772 	struct net *net = sock_net(in_skb->sk);
3773 	struct nh_res_table *res_table;
3774 	struct sk_buff *skb = NULL;
3775 	struct nh_group *nhg;
3776 	struct nexthop *nh;
3777 	u16 bucket_index;
3778 	int err;
3779 	u32 id;
3780 
3781 	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3782 	if (err)
3783 		return err;
3784 
3785 	nh = nexthop_find_group_resilient(net, id, extack);
3786 	if (IS_ERR(nh))
3787 		return PTR_ERR(nh);
3788 
3789 	nhg = rtnl_dereference(nh->nh_grp);
3790 	res_table = rtnl_dereference(nhg->res_table);
3791 	if (bucket_index >= res_table->num_nh_buckets) {
3792 		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3793 		return -ENOENT;
3794 	}
3795 
3796 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3797 	if (!skb)
3798 		return -ENOBUFS;
3799 
3800 	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3801 				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3802 				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3803 				 0, extack);
3804 	if (err < 0) {
3805 		WARN_ON(err == -EMSGSIZE);
3806 		goto errout_free;
3807 	}
3808 
3809 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3810 
3811 errout_free:
3812 	kfree_skb(skb);
3813 	return err;
3814 }
3815 
3816 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3817 {
3818 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3819 	struct net *net = dev_net(dev);
3820 	struct hlist_head *head = &net->nexthop.devhash[hash];
3821 	struct hlist_node *n;
3822 	struct nh_info *nhi;
3823 
3824 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3825 		if (nhi->fib_nhc.nhc_dev == dev) {
3826 			if (nhi->family == AF_INET)
3827 				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3828 						   orig_mtu);
3829 		}
3830 	}
3831 }
3832 
3833 /* rtnl */
3834 static int nh_netdev_event(struct notifier_block *this,
3835 			   unsigned long event, void *ptr)
3836 {
3837 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3838 	struct netdev_notifier_info_ext *info_ext;
3839 
3840 	switch (event) {
3841 	case NETDEV_DOWN:
3842 	case NETDEV_UNREGISTER:
3843 		nexthop_flush_dev(dev, event);
3844 		break;
3845 	case NETDEV_CHANGE:
3846 		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3847 			nexthop_flush_dev(dev, event);
3848 		break;
3849 	case NETDEV_CHANGEMTU:
3850 		info_ext = ptr;
3851 		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3852 		rt_cache_flush(dev_net(dev));
3853 		break;
3854 	}
3855 	return NOTIFY_DONE;
3856 }
3857 
3858 static struct notifier_block nh_netdev_notifier = {
3859 	.notifier_call = nh_netdev_event,
3860 };
3861 
3862 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3863 			 enum nexthop_event_type event_type,
3864 			 struct netlink_ext_ack *extack)
3865 {
3866 	struct rb_root *root = &net->nexthop.rb_root;
3867 	struct rb_node *node;
3868 	int err = 0;
3869 
3870 	for (node = rb_first(root); node; node = rb_next(node)) {
3871 		struct nexthop *nh;
3872 
3873 		nh = rb_entry(node, struct nexthop, rb_node);
3874 		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3875 		if (err)
3876 			break;
3877 	}
3878 
3879 	return err;
3880 }
3881 
3882 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3883 			      struct netlink_ext_ack *extack)
3884 {
3885 	int err;
3886 
3887 	rtnl_lock();
3888 	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3889 	if (err)
3890 		goto unlock;
3891 	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3892 					       nb);
3893 unlock:
3894 	rtnl_unlock();
3895 	return err;
3896 }
3897 EXPORT_SYMBOL(register_nexthop_notifier);
3898 
3899 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3900 {
3901 	int err;
3902 
3903 	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3904 						 nb);
3905 	if (!err)
3906 		nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3907 	return err;
3908 }
3909 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3910 
3911 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3912 {
3913 	int err;
3914 
3915 	rtnl_lock();
3916 	err = __unregister_nexthop_notifier(net, nb);
3917 	rtnl_unlock();
3918 	return err;
3919 }
3920 EXPORT_SYMBOL(unregister_nexthop_notifier);
3921 
3922 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3923 {
3924 	struct nexthop *nexthop;
3925 
3926 	rcu_read_lock();
3927 
3928 	nexthop = nexthop_find_by_id(net, id);
3929 	if (!nexthop)
3930 		goto out;
3931 
3932 	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3933 	if (offload)
3934 		nexthop->nh_flags |= RTNH_F_OFFLOAD;
3935 	if (trap)
3936 		nexthop->nh_flags |= RTNH_F_TRAP;
3937 
3938 out:
3939 	rcu_read_unlock();
3940 }
3941 EXPORT_SYMBOL(nexthop_set_hw_flags);
3942 
3943 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3944 				 bool offload, bool trap)
3945 {
3946 	struct nh_res_table *res_table;
3947 	struct nh_res_bucket *bucket;
3948 	struct nexthop *nexthop;
3949 	struct nh_group *nhg;
3950 
3951 	rcu_read_lock();
3952 
3953 	nexthop = nexthop_find_by_id(net, id);
3954 	if (!nexthop || !nexthop->is_group)
3955 		goto out;
3956 
3957 	nhg = rcu_dereference(nexthop->nh_grp);
3958 	if (!nhg->resilient)
3959 		goto out;
3960 
3961 	if (bucket_index >= nhg->res_table->num_nh_buckets)
3962 		goto out;
3963 
3964 	res_table = rcu_dereference(nhg->res_table);
3965 	bucket = &res_table->nh_buckets[bucket_index];
3966 	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3967 	if (offload)
3968 		bucket->nh_flags |= RTNH_F_OFFLOAD;
3969 	if (trap)
3970 		bucket->nh_flags |= RTNH_F_TRAP;
3971 
3972 out:
3973 	rcu_read_unlock();
3974 }
3975 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3976 
3977 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3978 				     unsigned long *activity)
3979 {
3980 	struct nh_res_table *res_table;
3981 	struct nexthop *nexthop;
3982 	struct nh_group *nhg;
3983 	u16 i;
3984 
3985 	rcu_read_lock();
3986 
3987 	nexthop = nexthop_find_by_id(net, id);
3988 	if (!nexthop || !nexthop->is_group)
3989 		goto out;
3990 
3991 	nhg = rcu_dereference(nexthop->nh_grp);
3992 	if (!nhg->resilient)
3993 		goto out;
3994 
3995 	/* Instead of silently ignoring some buckets, demand that the sizes
3996 	 * be the same.
3997 	 */
3998 	res_table = rcu_dereference(nhg->res_table);
3999 	if (num_buckets != res_table->num_nh_buckets)
4000 		goto out;
4001 
4002 	for (i = 0; i < num_buckets; i++) {
4003 		if (test_bit(i, activity))
4004 			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
4005 	}
4006 
4007 out:
4008 	rcu_read_unlock();
4009 }
4010 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4011 
4012 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
4013 						   struct list_head *dev_to_kill)
4014 {
4015 	struct net *net;
4016 
4017 	ASSERT_RTNL();
4018 	list_for_each_entry(net, net_list, exit_list)
4019 		flush_all_nexthops(net);
4020 }
4021 
4022 static void __net_exit nexthop_net_exit(struct net *net)
4023 {
4024 	kfree(net->nexthop.devhash);
4025 	net->nexthop.devhash = NULL;
4026 }
4027 
4028 static int __net_init nexthop_net_init(struct net *net)
4029 {
4030 	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4031 
4032 	net->nexthop.rb_root = RB_ROOT;
4033 	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4034 	if (!net->nexthop.devhash)
4035 		return -ENOMEM;
4036 	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4037 
4038 	return 0;
4039 }
4040 
4041 static struct pernet_operations nexthop_net_ops = {
4042 	.init = nexthop_net_init,
4043 	.exit = nexthop_net_exit,
4044 	.exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
4045 };
4046 
4047 static int __init nexthop_init(void)
4048 {
4049 	register_pernet_subsys(&nexthop_net_ops);
4050 
4051 	register_netdevice_notifier(&nh_netdev_notifier);
4052 
4053 	rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4054 	rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
4055 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
4056 		      rtm_dump_nexthop, 0);
4057 
4058 	rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4059 	rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4060 
4061 	rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4062 	rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4063 
4064 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
4065 		      rtm_dump_nexthop_bucket, 0);
4066 
4067 	return 0;
4068 }
4069 subsys_initcall(nexthop_init);
4070