xref: /linux/net/ipv4/nexthop.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7 
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19 
20 #define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
22 
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 			   struct nl_info *nlinfo);
25 
26 #define NH_DEV_HASHBITS  8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28 
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS |		\
30 			       NHA_OP_FLAG_DUMP_HW_STATS)
31 
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 	[NHA_ID]		= { .type = NLA_U32 },
34 	[NHA_GROUP]		= { .type = NLA_BINARY },
35 	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
36 	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
37 	[NHA_OIF]		= { .type = NLA_U32 },
38 	[NHA_GATEWAY]		= { .type = NLA_BINARY },
39 	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
40 	[NHA_ENCAP]		= { .type = NLA_NESTED },
41 	[NHA_FDB]		= { .type = NLA_FLAG },
42 	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
43 	[NHA_HW_STATS_ENABLE]	= NLA_POLICY_MAX(NLA_U32, true),
44 };
45 
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 	[NHA_ID]		= { .type = NLA_U32 },
48 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
49 						  NHA_OP_FLAGS_DUMP_ALL),
50 };
51 
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 	[NHA_ID]		= { .type = NLA_U32 },
54 };
55 
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 	[NHA_OIF]		= { .type = NLA_U32 },
58 	[NHA_GROUPS]		= { .type = NLA_FLAG },
59 	[NHA_MASTER]		= { .type = NLA_U32 },
60 	[NHA_FDB]		= { .type = NLA_FLAG },
61 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
62 						  NHA_OP_FLAGS_DUMP_ALL),
63 };
64 
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
67 	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
68 	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
69 };
70 
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 	[NHA_ID]		= { .type = NLA_U32 },
73 	[NHA_OIF]		= { .type = NLA_U32 },
74 	[NHA_MASTER]		= { .type = NLA_U32 },
75 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
76 };
77 
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
80 };
81 
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 	[NHA_ID]		= { .type = NLA_U32 },
84 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
85 };
86 
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
89 };
90 
91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 	return !net->nexthop.notifier_chain.head;
94 }
95 
96 static void
97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 			       const struct nh_info *nhi)
99 {
100 	nh_info->dev = nhi->fib_nhc.nhc_dev;
101 	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 	if (nh_info->gw_family == AF_INET)
103 		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 	else if (nh_info->gw_family == AF_INET6)
105 		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106 
107 	nh_info->id = nhi->nh_parent->id;
108 	nh_info->is_reject = nhi->reject_nh;
109 	nh_info->is_fdb = nhi->fdb_nh;
110 	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112 
113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 					const struct nexthop *nh)
115 {
116 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117 
118 	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
120 	if (!info->nh)
121 		return -ENOMEM;
122 
123 	__nh_notifier_single_info_init(info->nh, nhi);
124 
125 	return 0;
126 }
127 
128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 	kfree(info->nh);
131 }
132 
133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 				       struct nh_group *nhg)
135 {
136 	u16 num_nh = nhg->num_nh;
137 	int i;
138 
139 	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
141 			       GFP_KERNEL);
142 	if (!info->nh_grp)
143 		return -ENOMEM;
144 
145 	info->nh_grp->num_nh = num_nh;
146 	info->nh_grp->is_fdb = nhg->fdb_nh;
147 	info->nh_grp->hw_stats = nhg->hw_stats;
148 
149 	for (i = 0; i < num_nh; i++) {
150 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 		struct nh_info *nhi;
152 
153 		nhi = rtnl_dereference(nhge->nh->nh_info);
154 		info->nh_grp->nh_entries[i].weight = nhge->weight;
155 		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
156 					       nhi);
157 	}
158 
159 	return 0;
160 }
161 
162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 					   struct nh_group *nhg)
164 {
165 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 	u16 num_nh_buckets = res_table->num_nh_buckets;
167 	unsigned long size;
168 	u16 i;
169 
170 	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 				       __GFP_NOWARN);
174 	if (!info->nh_res_table)
175 		return -ENOMEM;
176 
177 	info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 	info->nh_res_table->hw_stats = nhg->hw_stats;
179 
180 	for (i = 0; i < num_nh_buckets; i++) {
181 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 		struct nh_grp_entry *nhge;
183 		struct nh_info *nhi;
184 
185 		nhge = rtnl_dereference(bucket->nh_entry);
186 		nhi = rtnl_dereference(nhge->nh->nh_info);
187 		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
188 					       nhi);
189 	}
190 
191 	return 0;
192 }
193 
194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 				     const struct nexthop *nh)
196 {
197 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198 
199 	if (nhg->hash_threshold)
200 		return nh_notifier_mpath_info_init(info, nhg);
201 	else if (nhg->resilient)
202 		return nh_notifier_res_table_info_init(info, nhg);
203 	return -EINVAL;
204 }
205 
206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 				      const struct nexthop *nh)
208 {
209 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210 
211 	if (nhg->hash_threshold)
212 		kfree(info->nh_grp);
213 	else if (nhg->resilient)
214 		vfree(info->nh_res_table);
215 }
216 
217 static int nh_notifier_info_init(struct nh_notifier_info *info,
218 				 const struct nexthop *nh)
219 {
220 	info->id = nh->id;
221 
222 	if (nh->is_group)
223 		return nh_notifier_grp_info_init(info, nh);
224 	else
225 		return nh_notifier_single_info_init(info, nh);
226 }
227 
228 static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 				  const struct nexthop *nh)
230 {
231 	if (nh->is_group)
232 		nh_notifier_grp_info_fini(info, nh);
233 	else
234 		nh_notifier_single_info_fini(info);
235 }
236 
237 static int call_nexthop_notifiers(struct net *net,
238 				  enum nexthop_event_type event_type,
239 				  struct nexthop *nh,
240 				  struct netlink_ext_ack *extack)
241 {
242 	struct nh_notifier_info info = {
243 		.net = net,
244 		.extack = extack,
245 	};
246 	int err;
247 
248 	ASSERT_RTNL();
249 
250 	if (nexthop_notifiers_is_empty(net))
251 		return 0;
252 
253 	err = nh_notifier_info_init(&info, nh);
254 	if (err) {
255 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
256 		return err;
257 	}
258 
259 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
260 					   event_type, &info);
261 	nh_notifier_info_fini(&info, nh);
262 
263 	return notifier_to_errno(err);
264 }
265 
266 static int
267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 				      bool force, unsigned int *p_idle_timer_ms)
269 {
270 	struct nh_res_table *res_table;
271 	struct nh_group *nhg;
272 	struct nexthop *nh;
273 	int err = 0;
274 
275 	/* When 'force' is false, nexthop bucket replacement is performed
276 	 * because the bucket was deemed to be idle. In this case, capable
277 	 * listeners can choose to perform an atomic replacement: The bucket is
278 	 * only replaced if it is inactive. However, if the idle timer interval
279 	 * is smaller than the interval in which a listener is querying
280 	 * buckets' activity from the device, then atomic replacement should
281 	 * not be tried. Pass the idle timer value to listeners, so that they
282 	 * could determine which type of replacement to perform.
283 	 */
284 	if (force) {
285 		*p_idle_timer_ms = 0;
286 		return 0;
287 	}
288 
289 	rcu_read_lock();
290 
291 	nh = nexthop_find_by_id(info->net, info->id);
292 	if (!nh) {
293 		err = -EINVAL;
294 		goto out;
295 	}
296 
297 	nhg = rcu_dereference(nh->nh_grp);
298 	res_table = rcu_dereference(nhg->res_table);
299 	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
300 
301 out:
302 	rcu_read_unlock();
303 
304 	return err;
305 }
306 
307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 					    u16 bucket_index, bool force,
309 					    struct nh_info *oldi,
310 					    struct nh_info *newi)
311 {
312 	unsigned int idle_timer_ms;
313 	int err;
314 
315 	err = nh_notifier_res_bucket_idle_timer_get(info, force,
316 						    &idle_timer_ms);
317 	if (err)
318 		return err;
319 
320 	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 				      GFP_KERNEL);
323 	if (!info->nh_res_bucket)
324 		return -ENOMEM;
325 
326 	info->nh_res_bucket->bucket_index = bucket_index;
327 	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 	info->nh_res_bucket->force = force;
329 	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
330 	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
331 	return 0;
332 }
333 
334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335 {
336 	kfree(info->nh_res_bucket);
337 }
338 
339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 					       u16 bucket_index, bool force,
341 					       struct nh_info *oldi,
342 					       struct nh_info *newi,
343 					       struct netlink_ext_ack *extack)
344 {
345 	struct nh_notifier_info info = {
346 		.net = net,
347 		.extack = extack,
348 		.id = nhg_id,
349 	};
350 	int err;
351 
352 	if (nexthop_notifiers_is_empty(net))
353 		return 0;
354 
355 	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
356 					       oldi, newi);
357 	if (err)
358 		return err;
359 
360 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
361 					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
362 	nh_notifier_res_bucket_info_fini(&info);
363 
364 	return notifier_to_errno(err);
365 }
366 
367 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
368  *
369  * 1) a collection of callbacks for NH maintenance. This operates under
370  *    RTNL,
371  * 2) the delayed work that gradually balances the resilient table,
372  * 3) and nexthop_select_path(), operating under RCU.
373  *
374  * Both the delayed work and the RTNL block are writers, and need to
375  * maintain mutual exclusion. Since there are only two and well-known
376  * writers for each table, the RTNL code can make sure it has exclusive
377  * access thus:
378  *
379  * - Have the DW operate without locking;
380  * - synchronously cancel the DW;
381  * - do the writing;
382  * - if the write was not actually a delete, call upkeep, which schedules
383  *   DW again if necessary.
384  *
385  * The functions that are always called from the RTNL context use
386  * rtnl_dereference(). The functions that can also be called from the DW do
387  * a raw dereference and rely on the above mutual exclusion scheme.
388  */
389 #define nh_res_dereference(p) (rcu_dereference_raw(p))
390 
391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 					     u16 bucket_index, bool force,
393 					     struct nexthop *old_nh,
394 					     struct nexthop *new_nh,
395 					     struct netlink_ext_ack *extack)
396 {
397 	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399 
400 	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 						   force, oldi, newi, extack);
402 }
403 
404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 					    struct netlink_ext_ack *extack)
406 {
407 	struct nh_notifier_info info = {
408 		.net = net,
409 		.extack = extack,
410 		.id = nh->id,
411 	};
412 	struct nh_group *nhg;
413 	int err;
414 
415 	ASSERT_RTNL();
416 
417 	if (nexthop_notifiers_is_empty(net))
418 		return 0;
419 
420 	/* At this point, the nexthop buckets are still not populated. Only
421 	 * emit a notification with the logical nexthops, so that a listener
422 	 * could potentially veto it in case of unsupported configuration.
423 	 */
424 	nhg = rtnl_dereference(nh->nh_grp);
425 	err = nh_notifier_mpath_info_init(&info, nhg);
426 	if (err) {
427 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
428 		return err;
429 	}
430 
431 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
432 					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
433 					   &info);
434 	kfree(info.nh_grp);
435 
436 	return notifier_to_errno(err);
437 }
438 
439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 				 enum nexthop_event_type event_type,
441 				 struct nexthop *nh,
442 				 struct netlink_ext_ack *extack)
443 {
444 	struct nh_notifier_info info = {
445 		.net = net,
446 		.extack = extack,
447 	};
448 	int err;
449 
450 	err = nh_notifier_info_init(&info, nh);
451 	if (err)
452 		return err;
453 
454 	err = nb->notifier_call(nb, event_type, &info);
455 	nh_notifier_info_fini(&info, nh);
456 
457 	return notifier_to_errno(err);
458 }
459 
460 static unsigned int nh_dev_hashfn(unsigned int val)
461 {
462 	unsigned int mask = NH_DEV_HASHSIZE - 1;
463 
464 	return (val ^
465 		(val >> NH_DEV_HASHBITS) ^
466 		(val >> (NH_DEV_HASHBITS * 2))) & mask;
467 }
468 
469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
470 {
471 	struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 	struct hlist_head *head;
473 	unsigned int hash;
474 
475 	WARN_ON(!dev);
476 
477 	hash = nh_dev_hashfn(dev->ifindex);
478 	head = &net->nexthop.devhash[hash];
479 	hlist_add_head(&nhi->dev_hash, head);
480 }
481 
482 static void nexthop_free_group(struct nexthop *nh)
483 {
484 	struct nh_group *nhg;
485 	int i;
486 
487 	nhg = rcu_dereference_raw(nh->nh_grp);
488 	for (i = 0; i < nhg->num_nh; ++i) {
489 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
490 
491 		WARN_ON(!list_empty(&nhge->nh_list));
492 		free_percpu(nhge->stats);
493 		nexthop_put(nhge->nh);
494 	}
495 
496 	WARN_ON(nhg->spare == nhg);
497 
498 	if (nhg->resilient)
499 		vfree(rcu_dereference_raw(nhg->res_table));
500 
501 	kfree(nhg->spare);
502 	kfree(nhg);
503 }
504 
505 static void nexthop_free_single(struct nexthop *nh)
506 {
507 	struct nh_info *nhi;
508 
509 	nhi = rcu_dereference_raw(nh->nh_info);
510 	switch (nhi->family) {
511 	case AF_INET:
512 		fib_nh_release(nh->net, &nhi->fib_nh);
513 		break;
514 	case AF_INET6:
515 		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
516 		break;
517 	}
518 	kfree(nhi);
519 }
520 
521 void nexthop_free_rcu(struct rcu_head *head)
522 {
523 	struct nexthop *nh = container_of(head, struct nexthop, rcu);
524 
525 	if (nh->is_group)
526 		nexthop_free_group(nh);
527 	else
528 		nexthop_free_single(nh);
529 
530 	kfree(nh);
531 }
532 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
533 
534 static struct nexthop *nexthop_alloc(void)
535 {
536 	struct nexthop *nh;
537 
538 	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
539 	if (nh) {
540 		INIT_LIST_HEAD(&nh->fi_list);
541 		INIT_LIST_HEAD(&nh->f6i_list);
542 		INIT_LIST_HEAD(&nh->grp_list);
543 		INIT_LIST_HEAD(&nh->fdb_list);
544 	}
545 	return nh;
546 }
547 
548 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
549 {
550 	struct nh_group *nhg;
551 
552 	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
553 	if (nhg)
554 		nhg->num_nh = num_nh;
555 
556 	return nhg;
557 }
558 
559 static void nh_res_table_upkeep_dw(struct work_struct *work);
560 
561 static struct nh_res_table *
562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
563 {
564 	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
565 	struct nh_res_table *res_table;
566 	unsigned long size;
567 
568 	size = struct_size(res_table, nh_buckets, num_nh_buckets);
569 	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
570 	if (!res_table)
571 		return NULL;
572 
573 	res_table->net = net;
574 	res_table->nhg_id = nhg_id;
575 	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
576 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
577 	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
578 	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
579 	res_table->num_nh_buckets = num_nh_buckets;
580 	return res_table;
581 }
582 
583 static void nh_base_seq_inc(struct net *net)
584 {
585 	while (++net->nexthop.seq == 0)
586 		;
587 }
588 
589 /* no reference taken; rcu lock or rtnl must be held */
590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
591 {
592 	struct rb_node **pp, *parent = NULL, *next;
593 
594 	pp = &net->nexthop.rb_root.rb_node;
595 	while (1) {
596 		struct nexthop *nh;
597 
598 		next = rcu_dereference_raw(*pp);
599 		if (!next)
600 			break;
601 		parent = next;
602 
603 		nh = rb_entry(parent, struct nexthop, rb_node);
604 		if (id < nh->id)
605 			pp = &next->rb_left;
606 		else if (id > nh->id)
607 			pp = &next->rb_right;
608 		else
609 			return nh;
610 	}
611 	return NULL;
612 }
613 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
614 
615 /* used for auto id allocation; called with rtnl held */
616 static u32 nh_find_unused_id(struct net *net)
617 {
618 	u32 id_start = net->nexthop.last_id_allocated;
619 
620 	while (1) {
621 		net->nexthop.last_id_allocated++;
622 		if (net->nexthop.last_id_allocated == id_start)
623 			break;
624 
625 		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
626 			return net->nexthop.last_id_allocated;
627 	}
628 	return 0;
629 }
630 
631 static void nh_res_time_set_deadline(unsigned long next_time,
632 				     unsigned long *deadline)
633 {
634 	if (time_before(next_time, *deadline))
635 		*deadline = next_time;
636 }
637 
638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
639 {
640 	if (list_empty(&res_table->uw_nh_entries))
641 		return 0;
642 	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
643 }
644 
645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
646 {
647 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
648 	struct nlattr *nest;
649 
650 	nest = nla_nest_start(skb, NHA_RES_GROUP);
651 	if (!nest)
652 		return -EMSGSIZE;
653 
654 	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
655 			res_table->num_nh_buckets) ||
656 	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
657 			jiffies_to_clock_t(res_table->idle_timer)) ||
658 	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
659 			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
660 	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
661 			      nh_res_table_unbalanced_time(res_table),
662 			      NHA_RES_GROUP_PAD))
663 		goto nla_put_failure;
664 
665 	nla_nest_end(skb, nest);
666 	return 0;
667 
668 nla_put_failure:
669 	nla_nest_cancel(skb, nest);
670 	return -EMSGSIZE;
671 }
672 
673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
674 {
675 	struct nh_grp_entry_stats *cpu_stats;
676 
677 	cpu_stats = get_cpu_ptr(nhge->stats);
678 	u64_stats_update_begin(&cpu_stats->syncp);
679 	u64_stats_inc(&cpu_stats->packets);
680 	u64_stats_update_end(&cpu_stats->syncp);
681 	put_cpu_ptr(cpu_stats);
682 }
683 
684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
685 				    u64 *ret_packets)
686 {
687 	int i;
688 
689 	*ret_packets = 0;
690 
691 	for_each_possible_cpu(i) {
692 		struct nh_grp_entry_stats *cpu_stats;
693 		unsigned int start;
694 		u64 packets;
695 
696 		cpu_stats = per_cpu_ptr(nhge->stats, i);
697 		do {
698 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
699 			packets = u64_stats_read(&cpu_stats->packets);
700 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
701 
702 		*ret_packets += packets;
703 	}
704 }
705 
706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
707 					 const struct nexthop *nh)
708 {
709 	struct nh_group *nhg;
710 	int i;
711 
712 	ASSERT_RTNL();
713 	nhg = rtnl_dereference(nh->nh_grp);
714 
715 	info->id = nh->id;
716 	info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
717 	info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
718 						    stats, nhg->num_nh),
719 					GFP_KERNEL);
720 	if (!info->nh_grp_hw_stats)
721 		return -ENOMEM;
722 
723 	info->nh_grp_hw_stats->num_nh = nhg->num_nh;
724 	for (i = 0; i < nhg->num_nh; i++) {
725 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
726 
727 		info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
728 	}
729 
730 	return 0;
731 }
732 
733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
734 {
735 	kfree(info->nh_grp_hw_stats);
736 }
737 
738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
739 				  unsigned int nh_idx,
740 				  u64 delta_packets)
741 {
742 	info->hw_stats_used = true;
743 	info->stats[nh_idx].packets += delta_packets;
744 }
745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
746 
747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
748 					 struct nh_notifier_info *info)
749 {
750 	struct nh_group *nhg;
751 	int i;
752 
753 	ASSERT_RTNL();
754 	nhg = rtnl_dereference(nh->nh_grp);
755 
756 	for (i = 0; i < nhg->num_nh; i++) {
757 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
758 
759 		nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
760 	}
761 }
762 
763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
764 {
765 	struct nh_notifier_info info = {
766 		.net = nh->net,
767 	};
768 	struct net *net = nh->net;
769 	int err;
770 
771 	if (nexthop_notifiers_is_empty(net)) {
772 		*hw_stats_used = false;
773 		return 0;
774 	}
775 
776 	err = nh_notifier_grp_hw_stats_init(&info, nh);
777 	if (err)
778 		return err;
779 
780 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
781 					   NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
782 					   &info);
783 
784 	/* Cache whatever we got, even if there was an error, otherwise the
785 	 * successful stats retrievals would get lost.
786 	 */
787 	nh_grp_hw_stats_apply_update(nh, &info);
788 	*hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
789 
790 	nh_notifier_grp_hw_stats_fini(&info);
791 	return notifier_to_errno(err);
792 }
793 
794 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
795 					struct nh_grp_entry *nhge,
796 					u32 op_flags)
797 {
798 	struct nlattr *nest;
799 	u64 packets;
800 
801 	nh_grp_entry_stats_read(nhge, &packets);
802 
803 	nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
804 	if (!nest)
805 		return -EMSGSIZE;
806 
807 	if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
808 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
809 			 packets + nhge->packets_hw))
810 		goto nla_put_failure;
811 
812 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
813 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
814 			 nhge->packets_hw))
815 		goto nla_put_failure;
816 
817 	nla_nest_end(skb, nest);
818 	return 0;
819 
820 nla_put_failure:
821 	nla_nest_cancel(skb, nest);
822 	return -EMSGSIZE;
823 }
824 
825 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
826 				  u32 op_flags)
827 {
828 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
829 	struct nlattr *nest;
830 	bool hw_stats_used;
831 	int err;
832 	int i;
833 
834 	if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
835 		goto err_out;
836 
837 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
838 	    nhg->hw_stats) {
839 		err = nh_grp_hw_stats_update(nh, &hw_stats_used);
840 		if (err)
841 			goto out;
842 
843 		if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
844 			goto err_out;
845 	}
846 
847 	nest = nla_nest_start(skb, NHA_GROUP_STATS);
848 	if (!nest)
849 		goto err_out;
850 
851 	for (i = 0; i < nhg->num_nh; i++)
852 		if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
853 						 op_flags))
854 			goto cancel_out;
855 
856 	nla_nest_end(skb, nest);
857 	return 0;
858 
859 cancel_out:
860 	nla_nest_cancel(skb, nest);
861 err_out:
862 	err = -EMSGSIZE;
863 out:
864 	return err;
865 }
866 
867 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
868 			    u32 op_flags, u32 *resp_op_flags)
869 {
870 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
871 	struct nexthop_grp *p;
872 	size_t len = nhg->num_nh * sizeof(*p);
873 	struct nlattr *nla;
874 	u16 group_type = 0;
875 	u16 weight;
876 	int i;
877 
878 	*resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
879 
880 	if (nhg->hash_threshold)
881 		group_type = NEXTHOP_GRP_TYPE_MPATH;
882 	else if (nhg->resilient)
883 		group_type = NEXTHOP_GRP_TYPE_RES;
884 
885 	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
886 		goto nla_put_failure;
887 
888 	nla = nla_reserve(skb, NHA_GROUP, len);
889 	if (!nla)
890 		goto nla_put_failure;
891 
892 	p = nla_data(nla);
893 	for (i = 0; i < nhg->num_nh; ++i) {
894 		weight = nhg->nh_entries[i].weight - 1;
895 
896 		*p++ = (struct nexthop_grp) {
897 			.id = nhg->nh_entries[i].nh->id,
898 			.weight = weight,
899 			.weight_high = weight >> 8,
900 		};
901 	}
902 
903 	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
904 		goto nla_put_failure;
905 
906 	if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
907 	    (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
908 	     nla_put_nh_group_stats(skb, nh, op_flags)))
909 		goto nla_put_failure;
910 
911 	return 0;
912 
913 nla_put_failure:
914 	return -EMSGSIZE;
915 }
916 
917 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
918 			int event, u32 portid, u32 seq, unsigned int nlflags,
919 			u32 op_flags)
920 {
921 	struct fib6_nh *fib6_nh;
922 	struct fib_nh *fib_nh;
923 	struct nlmsghdr *nlh;
924 	struct nh_info *nhi;
925 	struct nhmsg *nhm;
926 
927 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
928 	if (!nlh)
929 		return -EMSGSIZE;
930 
931 	nhm = nlmsg_data(nlh);
932 	nhm->nh_family = AF_UNSPEC;
933 	nhm->nh_flags = nh->nh_flags;
934 	nhm->nh_protocol = nh->protocol;
935 	nhm->nh_scope = 0;
936 	nhm->resvd = 0;
937 
938 	if (nla_put_u32(skb, NHA_ID, nh->id))
939 		goto nla_put_failure;
940 
941 	if (nh->is_group) {
942 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
943 		u32 resp_op_flags = 0;
944 
945 		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
946 			goto nla_put_failure;
947 		if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
948 		    nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
949 			goto nla_put_failure;
950 		goto out;
951 	}
952 
953 	nhi = rtnl_dereference(nh->nh_info);
954 	nhm->nh_family = nhi->family;
955 	if (nhi->reject_nh) {
956 		if (nla_put_flag(skb, NHA_BLACKHOLE))
957 			goto nla_put_failure;
958 		goto out;
959 	} else if (nhi->fdb_nh) {
960 		if (nla_put_flag(skb, NHA_FDB))
961 			goto nla_put_failure;
962 	} else {
963 		const struct net_device *dev;
964 
965 		dev = nhi->fib_nhc.nhc_dev;
966 		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
967 			goto nla_put_failure;
968 	}
969 
970 	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
971 	switch (nhi->family) {
972 	case AF_INET:
973 		fib_nh = &nhi->fib_nh;
974 		if (fib_nh->fib_nh_gw_family &&
975 		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
976 			goto nla_put_failure;
977 		break;
978 
979 	case AF_INET6:
980 		fib6_nh = &nhi->fib6_nh;
981 		if (fib6_nh->fib_nh_gw_family &&
982 		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
983 			goto nla_put_failure;
984 		break;
985 	}
986 
987 	if (nhi->fib_nhc.nhc_lwtstate &&
988 	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
989 				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
990 		goto nla_put_failure;
991 
992 out:
993 	nlmsg_end(skb, nlh);
994 	return 0;
995 
996 nla_put_failure:
997 	nlmsg_cancel(skb, nlh);
998 	return -EMSGSIZE;
999 }
1000 
1001 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
1002 {
1003 	return nla_total_size(0) +	/* NHA_RES_GROUP */
1004 		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
1005 		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
1006 		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
1007 		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1008 }
1009 
1010 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1011 {
1012 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1013 	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1014 	size_t tot = nla_total_size(sz) +
1015 		nla_total_size(2); /* NHA_GROUP_TYPE */
1016 
1017 	if (nhg->resilient)
1018 		tot += nh_nlmsg_size_grp_res(nhg);
1019 
1020 	return tot;
1021 }
1022 
1023 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1024 {
1025 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1026 	size_t sz;
1027 
1028 	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1029 	 * are mutually exclusive
1030 	 */
1031 	sz = nla_total_size(4);  /* NHA_OIF */
1032 
1033 	switch (nhi->family) {
1034 	case AF_INET:
1035 		if (nhi->fib_nh.fib_nh_gw_family)
1036 			sz += nla_total_size(4);  /* NHA_GATEWAY */
1037 		break;
1038 
1039 	case AF_INET6:
1040 		/* NHA_GATEWAY */
1041 		if (nhi->fib6_nh.fib_nh_gw_family)
1042 			sz += nla_total_size(sizeof(const struct in6_addr));
1043 		break;
1044 	}
1045 
1046 	if (nhi->fib_nhc.nhc_lwtstate) {
1047 		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1048 		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
1049 	}
1050 
1051 	return sz;
1052 }
1053 
1054 static size_t nh_nlmsg_size(struct nexthop *nh)
1055 {
1056 	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1057 
1058 	sz += nla_total_size(4); /* NHA_ID */
1059 
1060 	if (nh->is_group)
1061 		sz += nh_nlmsg_size_grp(nh) +
1062 		      nla_total_size(4) +	/* NHA_OP_FLAGS */
1063 		      0;
1064 	else
1065 		sz += nh_nlmsg_size_single(nh);
1066 
1067 	return sz;
1068 }
1069 
1070 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1071 {
1072 	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1073 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1074 	struct sk_buff *skb;
1075 	int err = -ENOBUFS;
1076 
1077 	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1078 	if (!skb)
1079 		goto errout;
1080 
1081 	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1082 	if (err < 0) {
1083 		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1084 		WARN_ON(err == -EMSGSIZE);
1085 		kfree_skb(skb);
1086 		goto errout;
1087 	}
1088 
1089 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1090 		    info->nlh, gfp_any());
1091 	return;
1092 errout:
1093 	rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1094 }
1095 
1096 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1097 {
1098 	return (unsigned long)atomic_long_read(&bucket->used_time);
1099 }
1100 
1101 static unsigned long
1102 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1103 			 const struct nh_res_bucket *bucket,
1104 			 unsigned long now)
1105 {
1106 	unsigned long time = nh_res_bucket_used_time(bucket);
1107 
1108 	/* Bucket was not used since it was migrated. The idle time is now. */
1109 	if (time == bucket->migrated_time)
1110 		return now;
1111 
1112 	return time + res_table->idle_timer;
1113 }
1114 
1115 static unsigned long
1116 nh_res_table_unb_point(const struct nh_res_table *res_table)
1117 {
1118 	return res_table->unbalanced_since + res_table->unbalanced_timer;
1119 }
1120 
1121 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1122 				   struct nh_res_bucket *bucket)
1123 {
1124 	unsigned long now = jiffies;
1125 
1126 	atomic_long_set(&bucket->used_time, (long)now);
1127 	bucket->migrated_time = now;
1128 }
1129 
1130 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1131 {
1132 	atomic_long_set(&bucket->used_time, (long)jiffies);
1133 }
1134 
1135 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1136 {
1137 	unsigned long used_time = nh_res_bucket_used_time(bucket);
1138 
1139 	return jiffies_delta_to_clock_t(jiffies - used_time);
1140 }
1141 
1142 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1143 			      struct nh_res_bucket *bucket, u16 bucket_index,
1144 			      int event, u32 portid, u32 seq,
1145 			      unsigned int nlflags,
1146 			      struct netlink_ext_ack *extack)
1147 {
1148 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1149 	struct nlmsghdr *nlh;
1150 	struct nlattr *nest;
1151 	struct nhmsg *nhm;
1152 
1153 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1154 	if (!nlh)
1155 		return -EMSGSIZE;
1156 
1157 	nhm = nlmsg_data(nlh);
1158 	nhm->nh_family = AF_UNSPEC;
1159 	nhm->nh_flags = bucket->nh_flags;
1160 	nhm->nh_protocol = nh->protocol;
1161 	nhm->nh_scope = 0;
1162 	nhm->resvd = 0;
1163 
1164 	if (nla_put_u32(skb, NHA_ID, nh->id))
1165 		goto nla_put_failure;
1166 
1167 	nest = nla_nest_start(skb, NHA_RES_BUCKET);
1168 	if (!nest)
1169 		goto nla_put_failure;
1170 
1171 	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1172 	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1173 	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1174 			      nh_res_bucket_idle_time(bucket),
1175 			      NHA_RES_BUCKET_PAD))
1176 		goto nla_put_failure_nest;
1177 
1178 	nla_nest_end(skb, nest);
1179 	nlmsg_end(skb, nlh);
1180 	return 0;
1181 
1182 nla_put_failure_nest:
1183 	nla_nest_cancel(skb, nest);
1184 nla_put_failure:
1185 	nlmsg_cancel(skb, nlh);
1186 	return -EMSGSIZE;
1187 }
1188 
1189 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1190 				  u16 bucket_index)
1191 {
1192 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1193 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1194 	struct nexthop *nh = nhge->nh_parent;
1195 	struct sk_buff *skb;
1196 	int err = -ENOBUFS;
1197 
1198 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1199 	if (!skb)
1200 		goto errout;
1201 
1202 	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1203 				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1204 				 NULL);
1205 	if (err < 0) {
1206 		kfree_skb(skb);
1207 		goto errout;
1208 	}
1209 
1210 	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1211 	return;
1212 errout:
1213 	rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1214 }
1215 
1216 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1217 			   bool *is_fdb, struct netlink_ext_ack *extack)
1218 {
1219 	if (nh->is_group) {
1220 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1221 
1222 		/* Nesting groups within groups is not supported. */
1223 		if (nhg->hash_threshold) {
1224 			NL_SET_ERR_MSG(extack,
1225 				       "Hash-threshold group can not be a nexthop within a group");
1226 			return false;
1227 		}
1228 		if (nhg->resilient) {
1229 			NL_SET_ERR_MSG(extack,
1230 				       "Resilient group can not be a nexthop within a group");
1231 			return false;
1232 		}
1233 		*is_fdb = nhg->fdb_nh;
1234 	} else {
1235 		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1236 
1237 		if (nhi->reject_nh && npaths > 1) {
1238 			NL_SET_ERR_MSG(extack,
1239 				       "Blackhole nexthop can not be used in a group with more than 1 path");
1240 			return false;
1241 		}
1242 		*is_fdb = nhi->fdb_nh;
1243 	}
1244 
1245 	return true;
1246 }
1247 
1248 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1249 				   struct netlink_ext_ack *extack)
1250 {
1251 	struct nh_info *nhi;
1252 
1253 	nhi = rtnl_dereference(nh->nh_info);
1254 
1255 	if (!nhi->fdb_nh) {
1256 		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1257 		return -EINVAL;
1258 	}
1259 
1260 	if (*nh_family == AF_UNSPEC) {
1261 		*nh_family = nhi->family;
1262 	} else if (*nh_family != nhi->family) {
1263 		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1264 		return -EINVAL;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static int nh_check_attr_group(struct net *net,
1271 			       struct nlattr *tb[], size_t tb_size,
1272 			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1273 {
1274 	unsigned int len = nla_len(tb[NHA_GROUP]);
1275 	u8 nh_family = AF_UNSPEC;
1276 	struct nexthop_grp *nhg;
1277 	unsigned int i, j;
1278 	u8 nhg_fdb = 0;
1279 
1280 	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1281 		NL_SET_ERR_MSG(extack,
1282 			       "Invalid length for nexthop group attribute");
1283 		return -EINVAL;
1284 	}
1285 
1286 	/* convert len to number of nexthop ids */
1287 	len /= sizeof(*nhg);
1288 
1289 	nhg = nla_data(tb[NHA_GROUP]);
1290 	for (i = 0; i < len; ++i) {
1291 		if (nhg[i].resvd2) {
1292 			NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1293 			return -EINVAL;
1294 		}
1295 		if (nexthop_grp_weight(&nhg[i]) == 0) {
1296 			/* 0xffff got passed in, representing weight of 0x10000,
1297 			 * which is too heavy.
1298 			 */
1299 			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1300 			return -EINVAL;
1301 		}
1302 		for (j = i + 1; j < len; ++j) {
1303 			if (nhg[i].id == nhg[j].id) {
1304 				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1305 				return -EINVAL;
1306 			}
1307 		}
1308 	}
1309 
1310 	if (tb[NHA_FDB])
1311 		nhg_fdb = 1;
1312 	nhg = nla_data(tb[NHA_GROUP]);
1313 	for (i = 0; i < len; ++i) {
1314 		struct nexthop *nh;
1315 		bool is_fdb_nh;
1316 
1317 		nh = nexthop_find_by_id(net, nhg[i].id);
1318 		if (!nh) {
1319 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1320 			return -EINVAL;
1321 		}
1322 		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1323 			return -EINVAL;
1324 
1325 		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1326 			return -EINVAL;
1327 
1328 		if (!nhg_fdb && is_fdb_nh) {
1329 			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1330 			return -EINVAL;
1331 		}
1332 	}
1333 	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1334 		if (!tb[i])
1335 			continue;
1336 		switch (i) {
1337 		case NHA_HW_STATS_ENABLE:
1338 		case NHA_FDB:
1339 			continue;
1340 		case NHA_RES_GROUP:
1341 			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1342 				continue;
1343 			break;
1344 		}
1345 		NL_SET_ERR_MSG(extack,
1346 			       "No other attributes can be set in nexthop groups");
1347 		return -EINVAL;
1348 	}
1349 
1350 	return 0;
1351 }
1352 
1353 static bool ipv6_good_nh(const struct fib6_nh *nh)
1354 {
1355 	int state = NUD_REACHABLE;
1356 	struct neighbour *n;
1357 
1358 	rcu_read_lock();
1359 
1360 	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1361 	if (n)
1362 		state = READ_ONCE(n->nud_state);
1363 
1364 	rcu_read_unlock();
1365 
1366 	return !!(state & NUD_VALID);
1367 }
1368 
1369 static bool ipv4_good_nh(const struct fib_nh *nh)
1370 {
1371 	int state = NUD_REACHABLE;
1372 	struct neighbour *n;
1373 
1374 	rcu_read_lock();
1375 
1376 	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1377 				      (__force u32)nh->fib_nh_gw4);
1378 	if (n)
1379 		state = READ_ONCE(n->nud_state);
1380 
1381 	rcu_read_unlock();
1382 
1383 	return !!(state & NUD_VALID);
1384 }
1385 
1386 static bool nexthop_is_good_nh(const struct nexthop *nh)
1387 {
1388 	struct nh_info *nhi = rcu_dereference(nh->nh_info);
1389 
1390 	switch (nhi->family) {
1391 	case AF_INET:
1392 		return ipv4_good_nh(&nhi->fib_nh);
1393 	case AF_INET6:
1394 		return ipv6_good_nh(&nhi->fib6_nh);
1395 	}
1396 
1397 	return false;
1398 }
1399 
1400 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1401 {
1402 	int i;
1403 
1404 	for (i = 0; i < nhg->num_nh; i++) {
1405 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1406 
1407 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1408 			continue;
1409 
1410 		nh_grp_entry_stats_inc(nhge);
1411 		return nhge->nh;
1412 	}
1413 
1414 	WARN_ON_ONCE(1);
1415 	return NULL;
1416 }
1417 
1418 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1419 {
1420 	struct nh_grp_entry *nhge0 = NULL;
1421 	int i;
1422 
1423 	if (nhg->fdb_nh)
1424 		return nexthop_select_path_fdb(nhg, hash);
1425 
1426 	for (i = 0; i < nhg->num_nh; ++i) {
1427 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1428 
1429 		/* nexthops always check if it is good and does
1430 		 * not rely on a sysctl for this behavior
1431 		 */
1432 		if (!nexthop_is_good_nh(nhge->nh))
1433 			continue;
1434 
1435 		if (!nhge0)
1436 			nhge0 = nhge;
1437 
1438 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1439 			continue;
1440 
1441 		nh_grp_entry_stats_inc(nhge);
1442 		return nhge->nh;
1443 	}
1444 
1445 	if (!nhge0)
1446 		nhge0 = &nhg->nh_entries[0];
1447 	nh_grp_entry_stats_inc(nhge0);
1448 	return nhge0->nh;
1449 }
1450 
1451 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1452 {
1453 	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1454 	u16 bucket_index = hash % res_table->num_nh_buckets;
1455 	struct nh_res_bucket *bucket;
1456 	struct nh_grp_entry *nhge;
1457 
1458 	/* nexthop_select_path() is expected to return a non-NULL value, so
1459 	 * skip protocol validation and just hand out whatever there is.
1460 	 */
1461 	bucket = &res_table->nh_buckets[bucket_index];
1462 	nh_res_bucket_set_busy(bucket);
1463 	nhge = rcu_dereference(bucket->nh_entry);
1464 	nh_grp_entry_stats_inc(nhge);
1465 	return nhge->nh;
1466 }
1467 
1468 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1469 {
1470 	struct nh_group *nhg;
1471 
1472 	if (!nh->is_group)
1473 		return nh;
1474 
1475 	nhg = rcu_dereference(nh->nh_grp);
1476 	if (nhg->hash_threshold)
1477 		return nexthop_select_path_hthr(nhg, hash);
1478 	else if (nhg->resilient)
1479 		return nexthop_select_path_res(nhg, hash);
1480 
1481 	/* Unreachable. */
1482 	return NULL;
1483 }
1484 EXPORT_SYMBOL_GPL(nexthop_select_path);
1485 
1486 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1487 			     int (*cb)(struct fib6_nh *nh, void *arg),
1488 			     void *arg)
1489 {
1490 	struct nh_info *nhi;
1491 	int err;
1492 
1493 	if (nh->is_group) {
1494 		struct nh_group *nhg;
1495 		int i;
1496 
1497 		nhg = rcu_dereference_rtnl(nh->nh_grp);
1498 		for (i = 0; i < nhg->num_nh; i++) {
1499 			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1500 
1501 			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1502 			err = cb(&nhi->fib6_nh, arg);
1503 			if (err)
1504 				return err;
1505 		}
1506 	} else {
1507 		nhi = rcu_dereference_rtnl(nh->nh_info);
1508 		err = cb(&nhi->fib6_nh, arg);
1509 		if (err)
1510 			return err;
1511 	}
1512 
1513 	return 0;
1514 }
1515 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1516 
1517 static int check_src_addr(const struct in6_addr *saddr,
1518 			  struct netlink_ext_ack *extack)
1519 {
1520 	if (!ipv6_addr_any(saddr)) {
1521 		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1522 		return -EINVAL;
1523 	}
1524 	return 0;
1525 }
1526 
1527 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1528 		       struct netlink_ext_ack *extack)
1529 {
1530 	struct nh_info *nhi;
1531 	bool is_fdb_nh;
1532 
1533 	/* fib6_src is unique to a fib6_info and limits the ability to cache
1534 	 * routes in fib6_nh within a nexthop that is potentially shared
1535 	 * across multiple fib entries. If the config wants to use source
1536 	 * routing it can not use nexthop objects. mlxsw also does not allow
1537 	 * fib6_src on routes.
1538 	 */
1539 	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1540 		return -EINVAL;
1541 
1542 	if (nh->is_group) {
1543 		struct nh_group *nhg;
1544 
1545 		nhg = rtnl_dereference(nh->nh_grp);
1546 		if (nhg->has_v4)
1547 			goto no_v4_nh;
1548 		is_fdb_nh = nhg->fdb_nh;
1549 	} else {
1550 		nhi = rtnl_dereference(nh->nh_info);
1551 		if (nhi->family == AF_INET)
1552 			goto no_v4_nh;
1553 		is_fdb_nh = nhi->fdb_nh;
1554 	}
1555 
1556 	if (is_fdb_nh) {
1557 		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1558 		return -EINVAL;
1559 	}
1560 
1561 	return 0;
1562 no_v4_nh:
1563 	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1564 	return -EINVAL;
1565 }
1566 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1567 
1568 /* if existing nexthop has ipv6 routes linked to it, need
1569  * to verify this new spec works with ipv6
1570  */
1571 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1572 			      struct netlink_ext_ack *extack)
1573 {
1574 	struct fib6_info *f6i;
1575 
1576 	if (list_empty(&old->f6i_list))
1577 		return 0;
1578 
1579 	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1580 		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1581 			return -EINVAL;
1582 	}
1583 
1584 	return fib6_check_nexthop(new, NULL, extack);
1585 }
1586 
1587 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1588 			       struct netlink_ext_ack *extack)
1589 {
1590 	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1591 		NL_SET_ERR_MSG(extack,
1592 			       "Route with host scope can not have a gateway");
1593 		return -EINVAL;
1594 	}
1595 
1596 	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1597 		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1598 		return -EINVAL;
1599 	}
1600 
1601 	return 0;
1602 }
1603 
1604 /* Invoked by fib add code to verify nexthop by id is ok with
1605  * config for prefix; parts of fib_check_nh not done when nexthop
1606  * object is used.
1607  */
1608 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1609 		      struct netlink_ext_ack *extack)
1610 {
1611 	struct nh_info *nhi;
1612 	int err = 0;
1613 
1614 	if (nh->is_group) {
1615 		struct nh_group *nhg;
1616 
1617 		nhg = rtnl_dereference(nh->nh_grp);
1618 		if (nhg->fdb_nh) {
1619 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1620 			err = -EINVAL;
1621 			goto out;
1622 		}
1623 
1624 		if (scope == RT_SCOPE_HOST) {
1625 			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1626 			err = -EINVAL;
1627 			goto out;
1628 		}
1629 
1630 		/* all nexthops in a group have the same scope */
1631 		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1632 		err = nexthop_check_scope(nhi, scope, extack);
1633 	} else {
1634 		nhi = rtnl_dereference(nh->nh_info);
1635 		if (nhi->fdb_nh) {
1636 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1637 			err = -EINVAL;
1638 			goto out;
1639 		}
1640 		err = nexthop_check_scope(nhi, scope, extack);
1641 	}
1642 
1643 out:
1644 	return err;
1645 }
1646 
1647 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1648 			     struct netlink_ext_ack *extack)
1649 {
1650 	struct fib_info *fi;
1651 
1652 	list_for_each_entry(fi, &old->fi_list, nh_list) {
1653 		int err;
1654 
1655 		err = fib_check_nexthop(new, fi->fib_scope, extack);
1656 		if (err)
1657 			return err;
1658 	}
1659 	return 0;
1660 }
1661 
1662 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1663 {
1664 	return nhge->res.count_buckets == nhge->res.wants_buckets;
1665 }
1666 
1667 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1668 {
1669 	return nhge->res.count_buckets > nhge->res.wants_buckets;
1670 }
1671 
1672 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1673 {
1674 	return nhge->res.count_buckets < nhge->res.wants_buckets;
1675 }
1676 
1677 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1678 {
1679 	return list_empty(&res_table->uw_nh_entries);
1680 }
1681 
1682 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1683 {
1684 	struct nh_grp_entry *nhge;
1685 
1686 	if (bucket->occupied) {
1687 		nhge = nh_res_dereference(bucket->nh_entry);
1688 		nhge->res.count_buckets--;
1689 		bucket->occupied = false;
1690 	}
1691 }
1692 
1693 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1694 				 struct nh_grp_entry *nhge)
1695 {
1696 	nh_res_bucket_unset_nh(bucket);
1697 
1698 	bucket->occupied = true;
1699 	rcu_assign_pointer(bucket->nh_entry, nhge);
1700 	nhge->res.count_buckets++;
1701 }
1702 
1703 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1704 					 struct nh_res_bucket *bucket,
1705 					 unsigned long *deadline, bool *force)
1706 {
1707 	unsigned long now = jiffies;
1708 	struct nh_grp_entry *nhge;
1709 	unsigned long idle_point;
1710 
1711 	if (!bucket->occupied) {
1712 		/* The bucket is not occupied, its NHGE pointer is either
1713 		 * NULL or obsolete. We _have to_ migrate: set force.
1714 		 */
1715 		*force = true;
1716 		return true;
1717 	}
1718 
1719 	nhge = nh_res_dereference(bucket->nh_entry);
1720 
1721 	/* If the bucket is populated by an underweight or balanced
1722 	 * nexthop, do not migrate.
1723 	 */
1724 	if (!nh_res_nhge_is_ow(nhge))
1725 		return false;
1726 
1727 	/* At this point we know that the bucket is populated with an
1728 	 * overweight nexthop. It needs to be migrated to a new nexthop if
1729 	 * the idle timer of unbalanced timer expired.
1730 	 */
1731 
1732 	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1733 	if (time_after_eq(now, idle_point)) {
1734 		/* The bucket is idle. We _can_ migrate: unset force. */
1735 		*force = false;
1736 		return true;
1737 	}
1738 
1739 	/* Unbalanced timer of 0 means "never force". */
1740 	if (res_table->unbalanced_timer) {
1741 		unsigned long unb_point;
1742 
1743 		unb_point = nh_res_table_unb_point(res_table);
1744 		if (time_after(now, unb_point)) {
1745 			/* The bucket is not idle, but the unbalanced timer
1746 			 * expired. We _can_ migrate, but set force anyway,
1747 			 * so that drivers know to ignore activity reports
1748 			 * from the HW.
1749 			 */
1750 			*force = true;
1751 			return true;
1752 		}
1753 
1754 		nh_res_time_set_deadline(unb_point, deadline);
1755 	}
1756 
1757 	nh_res_time_set_deadline(idle_point, deadline);
1758 	return false;
1759 }
1760 
1761 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1762 				  u16 bucket_index, bool notify,
1763 				  bool notify_nl, bool force)
1764 {
1765 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1766 	struct nh_grp_entry *new_nhge;
1767 	struct netlink_ext_ack extack;
1768 	int err;
1769 
1770 	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1771 					    struct nh_grp_entry,
1772 					    res.uw_nh_entry);
1773 	if (WARN_ON_ONCE(!new_nhge))
1774 		/* If this function is called, "bucket" is either not
1775 		 * occupied, or it belongs to a next hop that is
1776 		 * overweight. In either case, there ought to be a
1777 		 * corresponding underweight next hop.
1778 		 */
1779 		return false;
1780 
1781 	if (notify) {
1782 		struct nh_grp_entry *old_nhge;
1783 
1784 		old_nhge = nh_res_dereference(bucket->nh_entry);
1785 		err = call_nexthop_res_bucket_notifiers(res_table->net,
1786 							res_table->nhg_id,
1787 							bucket_index, force,
1788 							old_nhge->nh,
1789 							new_nhge->nh, &extack);
1790 		if (err) {
1791 			pr_err_ratelimited("%s\n", extack._msg);
1792 			if (!force)
1793 				return false;
1794 			/* It is not possible to veto a forced replacement, so
1795 			 * just clear the hardware flags from the nexthop
1796 			 * bucket to indicate to user space that this bucket is
1797 			 * not correctly populated in hardware.
1798 			 */
1799 			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1800 		}
1801 	}
1802 
1803 	nh_res_bucket_set_nh(bucket, new_nhge);
1804 	nh_res_bucket_set_idle(res_table, bucket);
1805 
1806 	if (notify_nl)
1807 		nexthop_bucket_notify(res_table, bucket_index);
1808 
1809 	if (nh_res_nhge_is_balanced(new_nhge))
1810 		list_del(&new_nhge->res.uw_nh_entry);
1811 	return true;
1812 }
1813 
1814 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1815 
1816 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1817 				bool notify, bool notify_nl)
1818 {
1819 	unsigned long now = jiffies;
1820 	unsigned long deadline;
1821 	u16 i;
1822 
1823 	/* Deadline is the next time that upkeep should be run. It is the
1824 	 * earliest time at which one of the buckets might be migrated.
1825 	 * Start at the most pessimistic estimate: either unbalanced_timer
1826 	 * from now, or if there is none, idle_timer from now. For each
1827 	 * encountered time point, call nh_res_time_set_deadline() to
1828 	 * refine the estimate.
1829 	 */
1830 	if (res_table->unbalanced_timer)
1831 		deadline = now + res_table->unbalanced_timer;
1832 	else
1833 		deadline = now + res_table->idle_timer;
1834 
1835 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1836 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1837 		bool force;
1838 
1839 		if (nh_res_bucket_should_migrate(res_table, bucket,
1840 						 &deadline, &force)) {
1841 			if (!nh_res_bucket_migrate(res_table, i, notify,
1842 						   notify_nl, force)) {
1843 				unsigned long idle_point;
1844 
1845 				/* A driver can override the migration
1846 				 * decision if the HW reports that the
1847 				 * bucket is actually not idle. Therefore
1848 				 * remark the bucket as busy again and
1849 				 * update the deadline.
1850 				 */
1851 				nh_res_bucket_set_busy(bucket);
1852 				idle_point = nh_res_bucket_idle_point(res_table,
1853 								      bucket,
1854 								      now);
1855 				nh_res_time_set_deadline(idle_point, &deadline);
1856 			}
1857 		}
1858 	}
1859 
1860 	/* If the group is still unbalanced, schedule the next upkeep to
1861 	 * either the deadline computed above, or the minimum deadline,
1862 	 * whichever comes later.
1863 	 */
1864 	if (!nh_res_table_is_balanced(res_table)) {
1865 		unsigned long now = jiffies;
1866 		unsigned long min_deadline;
1867 
1868 		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1869 		if (time_before(deadline, min_deadline))
1870 			deadline = min_deadline;
1871 
1872 		queue_delayed_work(system_power_efficient_wq,
1873 				   &res_table->upkeep_dw, deadline - now);
1874 	}
1875 }
1876 
1877 static void nh_res_table_upkeep_dw(struct work_struct *work)
1878 {
1879 	struct delayed_work *dw = to_delayed_work(work);
1880 	struct nh_res_table *res_table;
1881 
1882 	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1883 	nh_res_table_upkeep(res_table, true, true);
1884 }
1885 
1886 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1887 {
1888 	cancel_delayed_work_sync(&res_table->upkeep_dw);
1889 }
1890 
1891 static void nh_res_group_rebalance(struct nh_group *nhg,
1892 				   struct nh_res_table *res_table)
1893 {
1894 	u16 prev_upper_bound = 0;
1895 	u32 total = 0;
1896 	u32 w = 0;
1897 	int i;
1898 
1899 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1900 
1901 	for (i = 0; i < nhg->num_nh; ++i)
1902 		total += nhg->nh_entries[i].weight;
1903 
1904 	for (i = 0; i < nhg->num_nh; ++i) {
1905 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1906 		u16 upper_bound;
1907 		u64 btw;
1908 
1909 		w += nhge->weight;
1910 		btw = ((u64)res_table->num_nh_buckets) * w;
1911 		upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1912 		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1913 		prev_upper_bound = upper_bound;
1914 
1915 		if (nh_res_nhge_is_uw(nhge)) {
1916 			if (list_empty(&res_table->uw_nh_entries))
1917 				res_table->unbalanced_since = jiffies;
1918 			list_add(&nhge->res.uw_nh_entry,
1919 				 &res_table->uw_nh_entries);
1920 		}
1921 	}
1922 }
1923 
1924 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1925  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1926  * entry in NHG as not occupied.
1927  */
1928 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1929 					 struct nh_group *nhg)
1930 {
1931 	u16 i;
1932 
1933 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1934 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1935 		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1936 		bool found = false;
1937 		int j;
1938 
1939 		for (j = 0; j < nhg->num_nh; j++) {
1940 			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1941 
1942 			if (nhge->nh->id == id) {
1943 				nh_res_bucket_set_nh(bucket, nhge);
1944 				found = true;
1945 				break;
1946 			}
1947 		}
1948 
1949 		if (!found)
1950 			nh_res_bucket_unset_nh(bucket);
1951 	}
1952 }
1953 
1954 static void replace_nexthop_grp_res(struct nh_group *oldg,
1955 				    struct nh_group *newg)
1956 {
1957 	/* For NH group replacement, the new NHG might only have a stub
1958 	 * hash table with 0 buckets, because the number of buckets was not
1959 	 * specified. For NH removal, oldg and newg both reference the same
1960 	 * res_table. So in any case, in the following, we want to work
1961 	 * with oldg->res_table.
1962 	 */
1963 	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1964 	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1965 	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1966 
1967 	nh_res_table_cancel_upkeep(old_res_table);
1968 	nh_res_table_migrate_buckets(old_res_table, newg);
1969 	nh_res_group_rebalance(newg, old_res_table);
1970 	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1971 		old_res_table->unbalanced_since = prev_unbalanced_since;
1972 	nh_res_table_upkeep(old_res_table, true, false);
1973 }
1974 
1975 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1976 {
1977 	u32 total = 0;
1978 	u32 w = 0;
1979 	int i;
1980 
1981 	for (i = 0; i < nhg->num_nh; ++i)
1982 		total += nhg->nh_entries[i].weight;
1983 
1984 	for (i = 0; i < nhg->num_nh; ++i) {
1985 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1986 		u32 upper_bound;
1987 
1988 		w += nhge->weight;
1989 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1990 		atomic_set(&nhge->hthr.upper_bound, upper_bound);
1991 	}
1992 }
1993 
1994 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1995 				struct nl_info *nlinfo)
1996 {
1997 	struct nh_grp_entry *nhges, *new_nhges;
1998 	struct nexthop *nhp = nhge->nh_parent;
1999 	struct netlink_ext_ack extack;
2000 	struct nexthop *nh = nhge->nh;
2001 	struct nh_group *nhg, *newg;
2002 	int i, j, err;
2003 
2004 	WARN_ON(!nh);
2005 
2006 	nhg = rtnl_dereference(nhp->nh_grp);
2007 	newg = nhg->spare;
2008 
2009 	/* last entry, keep it visible and remove the parent */
2010 	if (nhg->num_nh == 1) {
2011 		remove_nexthop(net, nhp, nlinfo);
2012 		return;
2013 	}
2014 
2015 	newg->has_v4 = false;
2016 	newg->is_multipath = nhg->is_multipath;
2017 	newg->hash_threshold = nhg->hash_threshold;
2018 	newg->resilient = nhg->resilient;
2019 	newg->fdb_nh = nhg->fdb_nh;
2020 	newg->num_nh = nhg->num_nh;
2021 
2022 	/* copy old entries to new except the one getting removed */
2023 	nhges = nhg->nh_entries;
2024 	new_nhges = newg->nh_entries;
2025 	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2026 		struct nh_info *nhi;
2027 
2028 		/* current nexthop getting removed */
2029 		if (nhg->nh_entries[i].nh == nh) {
2030 			newg->num_nh--;
2031 			continue;
2032 		}
2033 
2034 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2035 		if (nhi->family == AF_INET)
2036 			newg->has_v4 = true;
2037 
2038 		list_del(&nhges[i].nh_list);
2039 		new_nhges[j].stats = nhges[i].stats;
2040 		new_nhges[j].nh_parent = nhges[i].nh_parent;
2041 		new_nhges[j].nh = nhges[i].nh;
2042 		new_nhges[j].weight = nhges[i].weight;
2043 		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2044 		j++;
2045 	}
2046 
2047 	if (newg->hash_threshold)
2048 		nh_hthr_group_rebalance(newg);
2049 	else if (newg->resilient)
2050 		replace_nexthop_grp_res(nhg, newg);
2051 
2052 	rcu_assign_pointer(nhp->nh_grp, newg);
2053 
2054 	list_del(&nhge->nh_list);
2055 	free_percpu(nhge->stats);
2056 	nexthop_put(nhge->nh);
2057 
2058 	/* Removal of a NH from a resilient group is notified through
2059 	 * bucket notifications.
2060 	 */
2061 	if (newg->hash_threshold) {
2062 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2063 					     &extack);
2064 		if (err)
2065 			pr_err("%s\n", extack._msg);
2066 	}
2067 
2068 	if (nlinfo)
2069 		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2070 }
2071 
2072 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2073 				       struct nl_info *nlinfo)
2074 {
2075 	struct nh_grp_entry *nhge, *tmp;
2076 
2077 	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2078 		remove_nh_grp_entry(net, nhge, nlinfo);
2079 
2080 	/* make sure all see the newly published array before releasing rtnl */
2081 	synchronize_net();
2082 }
2083 
2084 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2085 {
2086 	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2087 	struct nh_res_table *res_table;
2088 	int i, num_nh = nhg->num_nh;
2089 
2090 	for (i = 0; i < num_nh; ++i) {
2091 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2092 
2093 		if (WARN_ON(!nhge->nh))
2094 			continue;
2095 
2096 		list_del_init(&nhge->nh_list);
2097 	}
2098 
2099 	if (nhg->resilient) {
2100 		res_table = rtnl_dereference(nhg->res_table);
2101 		nh_res_table_cancel_upkeep(res_table);
2102 	}
2103 }
2104 
2105 /* not called for nexthop replace */
2106 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2107 {
2108 	struct fib6_info *f6i, *tmp;
2109 	bool do_flush = false;
2110 	struct fib_info *fi;
2111 
2112 	list_for_each_entry(fi, &nh->fi_list, nh_list) {
2113 		fi->fib_flags |= RTNH_F_DEAD;
2114 		do_flush = true;
2115 	}
2116 	if (do_flush)
2117 		fib_flush(net);
2118 
2119 	/* ip6_del_rt removes the entry from this list hence the _safe */
2120 	list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
2121 		/* __ip6_del_rt does a release, so do a hold here */
2122 		fib6_info_hold(f6i);
2123 		ipv6_stub->ip6_del_rt(net, f6i,
2124 				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2125 	}
2126 }
2127 
2128 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2129 			     struct nl_info *nlinfo)
2130 {
2131 	__remove_nexthop_fib(net, nh);
2132 
2133 	if (nh->is_group) {
2134 		remove_nexthop_group(nh, nlinfo);
2135 	} else {
2136 		struct nh_info *nhi;
2137 
2138 		nhi = rtnl_dereference(nh->nh_info);
2139 		if (nhi->fib_nhc.nhc_dev)
2140 			hlist_del(&nhi->dev_hash);
2141 
2142 		remove_nexthop_from_groups(net, nh, nlinfo);
2143 	}
2144 }
2145 
2146 static void remove_nexthop(struct net *net, struct nexthop *nh,
2147 			   struct nl_info *nlinfo)
2148 {
2149 	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2150 
2151 	/* remove from the tree */
2152 	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2153 
2154 	if (nlinfo)
2155 		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2156 
2157 	__remove_nexthop(net, nh, nlinfo);
2158 	nh_base_seq_inc(net);
2159 
2160 	nexthop_put(nh);
2161 }
2162 
2163 /* if any FIB entries reference this nexthop, any dst entries
2164  * need to be regenerated
2165  */
2166 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2167 			      struct nexthop *replaced_nh)
2168 {
2169 	struct fib6_info *f6i;
2170 	struct nh_group *nhg;
2171 	int i;
2172 
2173 	if (!list_empty(&nh->fi_list))
2174 		rt_cache_flush(net);
2175 
2176 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2177 		ipv6_stub->fib6_update_sernum(net, f6i);
2178 
2179 	/* if an IPv6 group was replaced, we have to release all old
2180 	 * dsts to make sure all refcounts are released
2181 	 */
2182 	if (!replaced_nh->is_group)
2183 		return;
2184 
2185 	nhg = rtnl_dereference(replaced_nh->nh_grp);
2186 	for (i = 0; i < nhg->num_nh; i++) {
2187 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2188 		struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2189 
2190 		if (nhi->family == AF_INET6)
2191 			ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2192 	}
2193 }
2194 
2195 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2196 			       struct nexthop *new, const struct nh_config *cfg,
2197 			       struct netlink_ext_ack *extack)
2198 {
2199 	struct nh_res_table *tmp_table = NULL;
2200 	struct nh_res_table *new_res_table;
2201 	struct nh_res_table *old_res_table;
2202 	struct nh_group *oldg, *newg;
2203 	int i, err;
2204 
2205 	if (!new->is_group) {
2206 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2207 		return -EINVAL;
2208 	}
2209 
2210 	oldg = rtnl_dereference(old->nh_grp);
2211 	newg = rtnl_dereference(new->nh_grp);
2212 
2213 	if (newg->hash_threshold != oldg->hash_threshold) {
2214 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2215 		return -EINVAL;
2216 	}
2217 
2218 	if (newg->hash_threshold) {
2219 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2220 					     extack);
2221 		if (err)
2222 			return err;
2223 	} else if (newg->resilient) {
2224 		new_res_table = rtnl_dereference(newg->res_table);
2225 		old_res_table = rtnl_dereference(oldg->res_table);
2226 
2227 		/* Accept if num_nh_buckets was not given, but if it was
2228 		 * given, demand that the value be correct.
2229 		 */
2230 		if (cfg->nh_grp_res_has_num_buckets &&
2231 		    cfg->nh_grp_res_num_buckets !=
2232 		    old_res_table->num_nh_buckets) {
2233 			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2234 			return -EINVAL;
2235 		}
2236 
2237 		/* Emit a pre-replace notification so that listeners could veto
2238 		 * a potentially unsupported configuration. Otherwise,
2239 		 * individual bucket replacement notifications would need to be
2240 		 * vetoed, which is something that should only happen if the
2241 		 * bucket is currently active.
2242 		 */
2243 		err = call_nexthop_res_table_notifiers(net, new, extack);
2244 		if (err)
2245 			return err;
2246 
2247 		if (cfg->nh_grp_res_has_idle_timer)
2248 			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2249 		if (cfg->nh_grp_res_has_unbalanced_timer)
2250 			old_res_table->unbalanced_timer =
2251 				cfg->nh_grp_res_unbalanced_timer;
2252 
2253 		replace_nexthop_grp_res(oldg, newg);
2254 
2255 		tmp_table = new_res_table;
2256 		rcu_assign_pointer(newg->res_table, old_res_table);
2257 		rcu_assign_pointer(newg->spare->res_table, old_res_table);
2258 	}
2259 
2260 	/* update parents - used by nexthop code for cleanup */
2261 	for (i = 0; i < newg->num_nh; i++)
2262 		newg->nh_entries[i].nh_parent = old;
2263 
2264 	rcu_assign_pointer(old->nh_grp, newg);
2265 
2266 	/* Make sure concurrent readers are not using 'oldg' anymore. */
2267 	synchronize_net();
2268 
2269 	if (newg->resilient) {
2270 		rcu_assign_pointer(oldg->res_table, tmp_table);
2271 		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2272 	}
2273 
2274 	for (i = 0; i < oldg->num_nh; i++)
2275 		oldg->nh_entries[i].nh_parent = new;
2276 
2277 	rcu_assign_pointer(new->nh_grp, oldg);
2278 
2279 	return 0;
2280 }
2281 
2282 static void nh_group_v4_update(struct nh_group *nhg)
2283 {
2284 	struct nh_grp_entry *nhges;
2285 	bool has_v4 = false;
2286 	int i;
2287 
2288 	nhges = nhg->nh_entries;
2289 	for (i = 0; i < nhg->num_nh; i++) {
2290 		struct nh_info *nhi;
2291 
2292 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2293 		if (nhi->family == AF_INET)
2294 			has_v4 = true;
2295 	}
2296 	nhg->has_v4 = has_v4;
2297 }
2298 
2299 static int replace_nexthop_single_notify_res(struct net *net,
2300 					     struct nh_res_table *res_table,
2301 					     struct nexthop *old,
2302 					     struct nh_info *oldi,
2303 					     struct nh_info *newi,
2304 					     struct netlink_ext_ack *extack)
2305 {
2306 	u32 nhg_id = res_table->nhg_id;
2307 	int err;
2308 	u16 i;
2309 
2310 	for (i = 0; i < res_table->num_nh_buckets; i++) {
2311 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2312 		struct nh_grp_entry *nhge;
2313 
2314 		nhge = rtnl_dereference(bucket->nh_entry);
2315 		if (nhge->nh == old) {
2316 			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2317 								  i, true,
2318 								  oldi, newi,
2319 								  extack);
2320 			if (err)
2321 				goto err_notify;
2322 		}
2323 	}
2324 
2325 	return 0;
2326 
2327 err_notify:
2328 	while (i-- > 0) {
2329 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2330 		struct nh_grp_entry *nhge;
2331 
2332 		nhge = rtnl_dereference(bucket->nh_entry);
2333 		if (nhge->nh == old)
2334 			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2335 							    true, newi, oldi,
2336 							    extack);
2337 	}
2338 	return err;
2339 }
2340 
2341 static int replace_nexthop_single_notify(struct net *net,
2342 					 struct nexthop *group_nh,
2343 					 struct nexthop *old,
2344 					 struct nh_info *oldi,
2345 					 struct nh_info *newi,
2346 					 struct netlink_ext_ack *extack)
2347 {
2348 	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2349 	struct nh_res_table *res_table;
2350 
2351 	if (nhg->hash_threshold) {
2352 		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2353 					      group_nh, extack);
2354 	} else if (nhg->resilient) {
2355 		res_table = rtnl_dereference(nhg->res_table);
2356 		return replace_nexthop_single_notify_res(net, res_table,
2357 							 old, oldi, newi,
2358 							 extack);
2359 	}
2360 
2361 	return -EINVAL;
2362 }
2363 
2364 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2365 				  struct nexthop *new,
2366 				  struct netlink_ext_ack *extack)
2367 {
2368 	u8 old_protocol, old_nh_flags;
2369 	struct nh_info *oldi, *newi;
2370 	struct nh_grp_entry *nhge;
2371 	int err;
2372 
2373 	if (new->is_group) {
2374 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2375 		return -EINVAL;
2376 	}
2377 
2378 	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2379 	if (err)
2380 		return err;
2381 
2382 	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2383 	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2384 	 */
2385 	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2386 
2387 	oldi = rtnl_dereference(old->nh_info);
2388 	newi = rtnl_dereference(new->nh_info);
2389 
2390 	newi->nh_parent = old;
2391 	oldi->nh_parent = new;
2392 
2393 	old_protocol = old->protocol;
2394 	old_nh_flags = old->nh_flags;
2395 
2396 	old->protocol = new->protocol;
2397 	old->nh_flags = new->nh_flags;
2398 
2399 	rcu_assign_pointer(old->nh_info, newi);
2400 	rcu_assign_pointer(new->nh_info, oldi);
2401 
2402 	/* Send a replace notification for all the groups using the nexthop. */
2403 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2404 		struct nexthop *nhp = nhge->nh_parent;
2405 
2406 		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2407 						    extack);
2408 		if (err)
2409 			goto err_notify;
2410 	}
2411 
2412 	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2413 	 * update IPv4 indication in all the groups using the nexthop.
2414 	 */
2415 	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2416 		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2417 			struct nexthop *nhp = nhge->nh_parent;
2418 			struct nh_group *nhg;
2419 
2420 			nhg = rtnl_dereference(nhp->nh_grp);
2421 			nh_group_v4_update(nhg);
2422 		}
2423 	}
2424 
2425 	return 0;
2426 
2427 err_notify:
2428 	rcu_assign_pointer(new->nh_info, newi);
2429 	rcu_assign_pointer(old->nh_info, oldi);
2430 	old->nh_flags = old_nh_flags;
2431 	old->protocol = old_protocol;
2432 	oldi->nh_parent = old;
2433 	newi->nh_parent = new;
2434 	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2435 		struct nexthop *nhp = nhge->nh_parent;
2436 
2437 		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2438 	}
2439 	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2440 	return err;
2441 }
2442 
2443 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2444 				     struct nl_info *info)
2445 {
2446 	struct fib6_info *f6i;
2447 
2448 	if (!list_empty(&nh->fi_list)) {
2449 		struct fib_info *fi;
2450 
2451 		/* expectation is a few fib_info per nexthop and then
2452 		 * a lot of routes per fib_info. So mark the fib_info
2453 		 * and then walk the fib tables once
2454 		 */
2455 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2456 			fi->nh_updated = true;
2457 
2458 		fib_info_notify_update(net, info);
2459 
2460 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2461 			fi->nh_updated = false;
2462 	}
2463 
2464 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2465 		ipv6_stub->fib6_rt_update(net, f6i, info);
2466 }
2467 
2468 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2469  * linked to this nexthop and for all groups that the nexthop
2470  * is a member of
2471  */
2472 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2473 				   struct nl_info *info)
2474 {
2475 	struct nh_grp_entry *nhge;
2476 
2477 	__nexthop_replace_notify(net, nh, info);
2478 
2479 	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2480 		__nexthop_replace_notify(net, nhge->nh_parent, info);
2481 }
2482 
2483 static int replace_nexthop(struct net *net, struct nexthop *old,
2484 			   struct nexthop *new, const struct nh_config *cfg,
2485 			   struct netlink_ext_ack *extack)
2486 {
2487 	bool new_is_reject = false;
2488 	struct nh_grp_entry *nhge;
2489 	int err;
2490 
2491 	/* check that existing FIB entries are ok with the
2492 	 * new nexthop definition
2493 	 */
2494 	err = fib_check_nh_list(old, new, extack);
2495 	if (err)
2496 		return err;
2497 
2498 	err = fib6_check_nh_list(old, new, extack);
2499 	if (err)
2500 		return err;
2501 
2502 	if (!new->is_group) {
2503 		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2504 
2505 		new_is_reject = nhi->reject_nh;
2506 	}
2507 
2508 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2509 		/* if new nexthop is a blackhole, any groups using this
2510 		 * nexthop cannot have more than 1 path
2511 		 */
2512 		if (new_is_reject &&
2513 		    nexthop_num_path(nhge->nh_parent) > 1) {
2514 			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2515 			return -EINVAL;
2516 		}
2517 
2518 		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2519 		if (err)
2520 			return err;
2521 
2522 		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2523 		if (err)
2524 			return err;
2525 	}
2526 
2527 	if (old->is_group)
2528 		err = replace_nexthop_grp(net, old, new, cfg, extack);
2529 	else
2530 		err = replace_nexthop_single(net, old, new, extack);
2531 
2532 	if (!err) {
2533 		nh_rt_cache_flush(net, old, new);
2534 
2535 		__remove_nexthop(net, new, NULL);
2536 		nexthop_put(new);
2537 	}
2538 
2539 	return err;
2540 }
2541 
2542 /* called with rtnl_lock held */
2543 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2544 			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2545 {
2546 	struct rb_node **pp, *parent = NULL, *next;
2547 	struct rb_root *root = &net->nexthop.rb_root;
2548 	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2549 	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2550 	u32 new_id = new_nh->id;
2551 	int replace_notify = 0;
2552 	int rc = -EEXIST;
2553 
2554 	pp = &root->rb_node;
2555 	while (1) {
2556 		struct nexthop *nh;
2557 
2558 		next = *pp;
2559 		if (!next)
2560 			break;
2561 
2562 		parent = next;
2563 
2564 		nh = rb_entry(parent, struct nexthop, rb_node);
2565 		if (new_id < nh->id) {
2566 			pp = &next->rb_left;
2567 		} else if (new_id > nh->id) {
2568 			pp = &next->rb_right;
2569 		} else if (replace) {
2570 			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2571 			if (!rc) {
2572 				new_nh = nh; /* send notification with old nh */
2573 				replace_notify = 1;
2574 			}
2575 			goto out;
2576 		} else {
2577 			/* id already exists and not a replace */
2578 			goto out;
2579 		}
2580 	}
2581 
2582 	if (replace && !create) {
2583 		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2584 		rc = -ENOENT;
2585 		goto out;
2586 	}
2587 
2588 	if (new_nh->is_group) {
2589 		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2590 		struct nh_res_table *res_table;
2591 
2592 		if (nhg->resilient) {
2593 			res_table = rtnl_dereference(nhg->res_table);
2594 
2595 			/* Not passing the number of buckets is OK when
2596 			 * replacing, but not when creating a new group.
2597 			 */
2598 			if (!cfg->nh_grp_res_has_num_buckets) {
2599 				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2600 				rc = -EINVAL;
2601 				goto out;
2602 			}
2603 
2604 			nh_res_group_rebalance(nhg, res_table);
2605 
2606 			/* Do not send bucket notifications, we do full
2607 			 * notification below.
2608 			 */
2609 			nh_res_table_upkeep(res_table, false, false);
2610 		}
2611 	}
2612 
2613 	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2614 	rb_insert_color(&new_nh->rb_node, root);
2615 
2616 	/* The initial insertion is a full notification for hash-threshold as
2617 	 * well as resilient groups.
2618 	 */
2619 	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2620 	if (rc)
2621 		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2622 
2623 out:
2624 	if (!rc) {
2625 		nh_base_seq_inc(net);
2626 		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2627 		if (replace_notify &&
2628 		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2629 			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2630 	}
2631 
2632 	return rc;
2633 }
2634 
2635 /* rtnl */
2636 /* remove all nexthops tied to a device being deleted */
2637 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2638 {
2639 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2640 	struct net *net = dev_net(dev);
2641 	struct hlist_head *head = &net->nexthop.devhash[hash];
2642 	struct hlist_node *n;
2643 	struct nh_info *nhi;
2644 
2645 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2646 		if (nhi->fib_nhc.nhc_dev != dev)
2647 			continue;
2648 
2649 		if (nhi->reject_nh &&
2650 		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2651 			continue;
2652 
2653 		remove_nexthop(net, nhi->nh_parent, NULL);
2654 	}
2655 }
2656 
2657 /* rtnl; called when net namespace is deleted */
2658 static void flush_all_nexthops(struct net *net)
2659 {
2660 	struct rb_root *root = &net->nexthop.rb_root;
2661 	struct rb_node *node;
2662 	struct nexthop *nh;
2663 
2664 	while ((node = rb_first(root))) {
2665 		nh = rb_entry(node, struct nexthop, rb_node);
2666 		remove_nexthop(net, nh, NULL);
2667 		cond_resched();
2668 	}
2669 }
2670 
2671 static struct nexthop *nexthop_create_group(struct net *net,
2672 					    struct nh_config *cfg)
2673 {
2674 	struct nlattr *grps_attr = cfg->nh_grp;
2675 	struct nexthop_grp *entry = nla_data(grps_attr);
2676 	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2677 	struct nh_group *nhg;
2678 	struct nexthop *nh;
2679 	int err;
2680 	int i;
2681 
2682 	if (WARN_ON(!num_nh))
2683 		return ERR_PTR(-EINVAL);
2684 
2685 	nh = nexthop_alloc();
2686 	if (!nh)
2687 		return ERR_PTR(-ENOMEM);
2688 
2689 	nh->is_group = 1;
2690 
2691 	nhg = nexthop_grp_alloc(num_nh);
2692 	if (!nhg) {
2693 		kfree(nh);
2694 		return ERR_PTR(-ENOMEM);
2695 	}
2696 
2697 	/* spare group used for removals */
2698 	nhg->spare = nexthop_grp_alloc(num_nh);
2699 	if (!nhg->spare) {
2700 		kfree(nhg);
2701 		kfree(nh);
2702 		return ERR_PTR(-ENOMEM);
2703 	}
2704 	nhg->spare->spare = nhg;
2705 
2706 	for (i = 0; i < nhg->num_nh; ++i) {
2707 		struct nexthop *nhe;
2708 		struct nh_info *nhi;
2709 
2710 		nhe = nexthop_find_by_id(net, entry[i].id);
2711 		if (!nexthop_get(nhe)) {
2712 			err = -ENOENT;
2713 			goto out_no_nh;
2714 		}
2715 
2716 		nhi = rtnl_dereference(nhe->nh_info);
2717 		if (nhi->family == AF_INET)
2718 			nhg->has_v4 = true;
2719 
2720 		nhg->nh_entries[i].stats =
2721 			netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2722 		if (!nhg->nh_entries[i].stats) {
2723 			err = -ENOMEM;
2724 			nexthop_put(nhe);
2725 			goto out_no_nh;
2726 		}
2727 		nhg->nh_entries[i].nh = nhe;
2728 		nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
2729 
2730 		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2731 		nhg->nh_entries[i].nh_parent = nh;
2732 	}
2733 
2734 	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2735 		nhg->hash_threshold = 1;
2736 		nhg->is_multipath = true;
2737 	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2738 		struct nh_res_table *res_table;
2739 
2740 		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2741 		if (!res_table) {
2742 			err = -ENOMEM;
2743 			goto out_no_nh;
2744 		}
2745 
2746 		rcu_assign_pointer(nhg->spare->res_table, res_table);
2747 		rcu_assign_pointer(nhg->res_table, res_table);
2748 		nhg->resilient = true;
2749 		nhg->is_multipath = true;
2750 	}
2751 
2752 	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2753 
2754 	if (nhg->hash_threshold)
2755 		nh_hthr_group_rebalance(nhg);
2756 
2757 	if (cfg->nh_fdb)
2758 		nhg->fdb_nh = 1;
2759 
2760 	if (cfg->nh_hw_stats)
2761 		nhg->hw_stats = true;
2762 
2763 	rcu_assign_pointer(nh->nh_grp, nhg);
2764 
2765 	return nh;
2766 
2767 out_no_nh:
2768 	for (i--; i >= 0; --i) {
2769 		list_del(&nhg->nh_entries[i].nh_list);
2770 		free_percpu(nhg->nh_entries[i].stats);
2771 		nexthop_put(nhg->nh_entries[i].nh);
2772 	}
2773 
2774 	kfree(nhg->spare);
2775 	kfree(nhg);
2776 	kfree(nh);
2777 
2778 	return ERR_PTR(err);
2779 }
2780 
2781 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2782 			  struct nh_info *nhi, struct nh_config *cfg,
2783 			  struct netlink_ext_ack *extack)
2784 {
2785 	struct fib_nh *fib_nh = &nhi->fib_nh;
2786 	struct fib_config fib_cfg = {
2787 		.fc_oif   = cfg->nh_ifindex,
2788 		.fc_gw4   = cfg->gw.ipv4,
2789 		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2790 		.fc_flags = cfg->nh_flags,
2791 		.fc_nlinfo = cfg->nlinfo,
2792 		.fc_encap = cfg->nh_encap,
2793 		.fc_encap_type = cfg->nh_encap_type,
2794 	};
2795 	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2796 	int err;
2797 
2798 	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2799 	if (err) {
2800 		fib_nh_release(net, fib_nh);
2801 		goto out;
2802 	}
2803 
2804 	if (nhi->fdb_nh)
2805 		goto out;
2806 
2807 	/* sets nh_dev if successful */
2808 	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2809 	if (!err) {
2810 		nh->nh_flags = fib_nh->fib_nh_flags;
2811 		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2812 					  !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2813 	} else {
2814 		fib_nh_release(net, fib_nh);
2815 	}
2816 out:
2817 	return err;
2818 }
2819 
2820 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2821 			  struct nh_info *nhi, struct nh_config *cfg,
2822 			  struct netlink_ext_ack *extack)
2823 {
2824 	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2825 	struct fib6_config fib6_cfg = {
2826 		.fc_table = l3mdev_fib_table(cfg->dev),
2827 		.fc_ifindex = cfg->nh_ifindex,
2828 		.fc_gateway = cfg->gw.ipv6,
2829 		.fc_flags = cfg->nh_flags,
2830 		.fc_nlinfo = cfg->nlinfo,
2831 		.fc_encap = cfg->nh_encap,
2832 		.fc_encap_type = cfg->nh_encap_type,
2833 		.fc_is_fdb = cfg->nh_fdb,
2834 	};
2835 	int err;
2836 
2837 	if (!ipv6_addr_any(&cfg->gw.ipv6))
2838 		fib6_cfg.fc_flags |= RTF_GATEWAY;
2839 
2840 	/* sets nh_dev if successful */
2841 	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2842 				      extack);
2843 	if (err) {
2844 		/* IPv6 is not enabled, don't call fib6_nh_release */
2845 		if (err == -EAFNOSUPPORT)
2846 			goto out;
2847 		ipv6_stub->fib6_nh_release(fib6_nh);
2848 	} else {
2849 		nh->nh_flags = fib6_nh->fib_nh_flags;
2850 	}
2851 out:
2852 	return err;
2853 }
2854 
2855 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2856 				      struct netlink_ext_ack *extack)
2857 {
2858 	struct nh_info *nhi;
2859 	struct nexthop *nh;
2860 	int err = 0;
2861 
2862 	nh = nexthop_alloc();
2863 	if (!nh)
2864 		return ERR_PTR(-ENOMEM);
2865 
2866 	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2867 	if (!nhi) {
2868 		kfree(nh);
2869 		return ERR_PTR(-ENOMEM);
2870 	}
2871 
2872 	nh->nh_flags = cfg->nh_flags;
2873 	nh->net = net;
2874 
2875 	nhi->nh_parent = nh;
2876 	nhi->family = cfg->nh_family;
2877 	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2878 
2879 	if (cfg->nh_fdb)
2880 		nhi->fdb_nh = 1;
2881 
2882 	if (cfg->nh_blackhole) {
2883 		nhi->reject_nh = 1;
2884 		cfg->nh_ifindex = net->loopback_dev->ifindex;
2885 	}
2886 
2887 	switch (cfg->nh_family) {
2888 	case AF_INET:
2889 		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2890 		break;
2891 	case AF_INET6:
2892 		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2893 		break;
2894 	}
2895 
2896 	if (err) {
2897 		kfree(nhi);
2898 		kfree(nh);
2899 		return ERR_PTR(err);
2900 	}
2901 
2902 	/* add the entry to the device based hash */
2903 	if (!nhi->fdb_nh)
2904 		nexthop_devhash_add(net, nhi);
2905 
2906 	rcu_assign_pointer(nh->nh_info, nhi);
2907 
2908 	return nh;
2909 }
2910 
2911 /* called with rtnl lock held */
2912 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2913 				   struct netlink_ext_ack *extack)
2914 {
2915 	struct nexthop *nh;
2916 	int err;
2917 
2918 	if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2919 		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2920 		return ERR_PTR(-EINVAL);
2921 	}
2922 
2923 	if (!cfg->nh_id) {
2924 		cfg->nh_id = nh_find_unused_id(net);
2925 		if (!cfg->nh_id) {
2926 			NL_SET_ERR_MSG(extack, "No unused id");
2927 			return ERR_PTR(-EINVAL);
2928 		}
2929 	}
2930 
2931 	if (cfg->nh_grp)
2932 		nh = nexthop_create_group(net, cfg);
2933 	else
2934 		nh = nexthop_create(net, cfg, extack);
2935 
2936 	if (IS_ERR(nh))
2937 		return nh;
2938 
2939 	refcount_set(&nh->refcnt, 1);
2940 	nh->id = cfg->nh_id;
2941 	nh->protocol = cfg->nh_protocol;
2942 	nh->net = net;
2943 
2944 	err = insert_nexthop(net, nh, cfg, extack);
2945 	if (err) {
2946 		__remove_nexthop(net, nh, NULL);
2947 		nexthop_put(nh);
2948 		nh = ERR_PTR(err);
2949 	}
2950 
2951 	return nh;
2952 }
2953 
2954 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2955 			    unsigned long *timer_p, bool *has_p,
2956 			    struct netlink_ext_ack *extack)
2957 {
2958 	unsigned long timer;
2959 	u32 value;
2960 
2961 	if (!attr) {
2962 		*timer_p = fallback;
2963 		*has_p = false;
2964 		return 0;
2965 	}
2966 
2967 	value = nla_get_u32(attr);
2968 	timer = clock_t_to_jiffies(value);
2969 	if (timer == ~0UL) {
2970 		NL_SET_ERR_MSG(extack, "Timer value too large");
2971 		return -EINVAL;
2972 	}
2973 
2974 	*timer_p = timer;
2975 	*has_p = true;
2976 	return 0;
2977 }
2978 
2979 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2980 				    struct netlink_ext_ack *extack)
2981 {
2982 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2983 	int err;
2984 
2985 	if (res) {
2986 		err = nla_parse_nested(tb,
2987 				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2988 				       res, rtm_nh_res_policy_new, extack);
2989 		if (err < 0)
2990 			return err;
2991 	}
2992 
2993 	if (tb[NHA_RES_GROUP_BUCKETS]) {
2994 		cfg->nh_grp_res_num_buckets =
2995 			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2996 		cfg->nh_grp_res_has_num_buckets = true;
2997 		if (!cfg->nh_grp_res_num_buckets) {
2998 			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2999 			return -EINVAL;
3000 		}
3001 	}
3002 
3003 	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
3004 			       NH_RES_DEFAULT_IDLE_TIMER,
3005 			       &cfg->nh_grp_res_idle_timer,
3006 			       &cfg->nh_grp_res_has_idle_timer,
3007 			       extack);
3008 	if (err)
3009 		return err;
3010 
3011 	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3012 				NH_RES_DEFAULT_UNBALANCED_TIMER,
3013 				&cfg->nh_grp_res_unbalanced_timer,
3014 				&cfg->nh_grp_res_has_unbalanced_timer,
3015 				extack);
3016 }
3017 
3018 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3019 			    struct nlmsghdr *nlh, struct nh_config *cfg,
3020 			    struct netlink_ext_ack *extack)
3021 {
3022 	struct nhmsg *nhm = nlmsg_data(nlh);
3023 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3024 	int err;
3025 
3026 	err = nlmsg_parse(nlh, sizeof(*nhm), tb,
3027 			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
3028 			  rtm_nh_policy_new, extack);
3029 	if (err < 0)
3030 		return err;
3031 
3032 	err = -EINVAL;
3033 	if (nhm->resvd || nhm->nh_scope) {
3034 		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3035 		goto out;
3036 	}
3037 	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3038 		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3039 		goto out;
3040 	}
3041 
3042 	switch (nhm->nh_family) {
3043 	case AF_INET:
3044 	case AF_INET6:
3045 		break;
3046 	case AF_UNSPEC:
3047 		if (tb[NHA_GROUP])
3048 			break;
3049 		fallthrough;
3050 	default:
3051 		NL_SET_ERR_MSG(extack, "Invalid address family");
3052 		goto out;
3053 	}
3054 
3055 	memset(cfg, 0, sizeof(*cfg));
3056 	cfg->nlflags = nlh->nlmsg_flags;
3057 	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3058 	cfg->nlinfo.nlh = nlh;
3059 	cfg->nlinfo.nl_net = net;
3060 
3061 	cfg->nh_family = nhm->nh_family;
3062 	cfg->nh_protocol = nhm->nh_protocol;
3063 	cfg->nh_flags = nhm->nh_flags;
3064 
3065 	if (tb[NHA_ID])
3066 		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3067 
3068 	if (tb[NHA_FDB]) {
3069 		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3070 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
3071 			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3072 			goto out;
3073 		}
3074 		if (nhm->nh_flags) {
3075 			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3076 			goto out;
3077 		}
3078 		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3079 	}
3080 
3081 	if (tb[NHA_GROUP]) {
3082 		if (nhm->nh_family != AF_UNSPEC) {
3083 			NL_SET_ERR_MSG(extack, "Invalid family for group");
3084 			goto out;
3085 		}
3086 		cfg->nh_grp = tb[NHA_GROUP];
3087 
3088 		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3089 		if (tb[NHA_GROUP_TYPE])
3090 			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3091 
3092 		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3093 			NL_SET_ERR_MSG(extack, "Invalid group type");
3094 			goto out;
3095 		}
3096 		err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
3097 					  cfg->nh_grp_type, extack);
3098 		if (err)
3099 			goto out;
3100 
3101 		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3102 			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3103 						       cfg, extack);
3104 
3105 		if (tb[NHA_HW_STATS_ENABLE])
3106 			cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3107 
3108 		/* no other attributes should be set */
3109 		goto out;
3110 	}
3111 
3112 	if (tb[NHA_BLACKHOLE]) {
3113 		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3114 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3115 			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3116 			goto out;
3117 		}
3118 
3119 		cfg->nh_blackhole = 1;
3120 		err = 0;
3121 		goto out;
3122 	}
3123 
3124 	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3125 		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3126 		goto out;
3127 	}
3128 
3129 	if (!cfg->nh_fdb && tb[NHA_OIF]) {
3130 		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3131 		if (cfg->nh_ifindex)
3132 			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3133 
3134 		if (!cfg->dev) {
3135 			NL_SET_ERR_MSG(extack, "Invalid device index");
3136 			goto out;
3137 		} else if (!(cfg->dev->flags & IFF_UP)) {
3138 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3139 			err = -ENETDOWN;
3140 			goto out;
3141 		} else if (!netif_carrier_ok(cfg->dev)) {
3142 			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3143 			err = -ENETDOWN;
3144 			goto out;
3145 		}
3146 	}
3147 
3148 	err = -EINVAL;
3149 	if (tb[NHA_GATEWAY]) {
3150 		struct nlattr *gwa = tb[NHA_GATEWAY];
3151 
3152 		switch (cfg->nh_family) {
3153 		case AF_INET:
3154 			if (nla_len(gwa) != sizeof(u32)) {
3155 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3156 				goto out;
3157 			}
3158 			cfg->gw.ipv4 = nla_get_be32(gwa);
3159 			break;
3160 		case AF_INET6:
3161 			if (nla_len(gwa) != sizeof(struct in6_addr)) {
3162 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3163 				goto out;
3164 			}
3165 			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3166 			break;
3167 		default:
3168 			NL_SET_ERR_MSG(extack,
3169 				       "Unknown address family for gateway");
3170 			goto out;
3171 		}
3172 	} else {
3173 		/* device only nexthop (no gateway) */
3174 		if (cfg->nh_flags & RTNH_F_ONLINK) {
3175 			NL_SET_ERR_MSG(extack,
3176 				       "ONLINK flag can not be set for nexthop without a gateway");
3177 			goto out;
3178 		}
3179 	}
3180 
3181 	if (tb[NHA_ENCAP]) {
3182 		cfg->nh_encap = tb[NHA_ENCAP];
3183 
3184 		if (!tb[NHA_ENCAP_TYPE]) {
3185 			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3186 			goto out;
3187 		}
3188 
3189 		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3190 		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3191 		if (err < 0)
3192 			goto out;
3193 
3194 	} else if (tb[NHA_ENCAP_TYPE]) {
3195 		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3196 		goto out;
3197 	}
3198 
3199 	if (tb[NHA_HW_STATS_ENABLE]) {
3200 		NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3201 		goto out;
3202 	}
3203 
3204 	err = 0;
3205 out:
3206 	return err;
3207 }
3208 
3209 /* rtnl */
3210 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3211 			   struct netlink_ext_ack *extack)
3212 {
3213 	struct net *net = sock_net(skb->sk);
3214 	struct nh_config cfg;
3215 	struct nexthop *nh;
3216 	int err;
3217 
3218 	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3219 	if (!err) {
3220 		nh = nexthop_add(net, &cfg, extack);
3221 		if (IS_ERR(nh))
3222 			err = PTR_ERR(nh);
3223 	}
3224 
3225 	return err;
3226 }
3227 
3228 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3229 				struct nlattr **tb, u32 *id, u32 *op_flags,
3230 				struct netlink_ext_ack *extack)
3231 {
3232 	struct nhmsg *nhm = nlmsg_data(nlh);
3233 
3234 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3235 		NL_SET_ERR_MSG(extack, "Invalid values in header");
3236 		return -EINVAL;
3237 	}
3238 
3239 	if (!tb[NHA_ID]) {
3240 		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3241 		return -EINVAL;
3242 	}
3243 
3244 	*id = nla_get_u32(tb[NHA_ID]);
3245 	if (!(*id)) {
3246 		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3247 		return -EINVAL;
3248 	}
3249 
3250 	if (op_flags) {
3251 		if (tb[NHA_OP_FLAGS])
3252 			*op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3253 		else
3254 			*op_flags = 0;
3255 	}
3256 
3257 	return 0;
3258 }
3259 
3260 /* rtnl */
3261 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3262 			   struct netlink_ext_ack *extack)
3263 {
3264 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3265 	struct net *net = sock_net(skb->sk);
3266 	struct nl_info nlinfo = {
3267 		.nlh = nlh,
3268 		.nl_net = net,
3269 		.portid = NETLINK_CB(skb).portid,
3270 	};
3271 	struct nexthop *nh;
3272 	int err;
3273 	u32 id;
3274 
3275 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3276 			  ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3277 			  extack);
3278 	if (err < 0)
3279 		return err;
3280 
3281 	err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3282 	if (err)
3283 		return err;
3284 
3285 	nh = nexthop_find_by_id(net, id);
3286 	if (!nh)
3287 		return -ENOENT;
3288 
3289 	remove_nexthop(net, nh, &nlinfo);
3290 
3291 	return 0;
3292 }
3293 
3294 /* rtnl */
3295 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3296 			   struct netlink_ext_ack *extack)
3297 {
3298 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3299 	struct net *net = sock_net(in_skb->sk);
3300 	struct sk_buff *skb = NULL;
3301 	struct nexthop *nh;
3302 	u32 op_flags;
3303 	int err;
3304 	u32 id;
3305 
3306 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3307 			  ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3308 			  extack);
3309 	if (err < 0)
3310 		return err;
3311 
3312 	err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3313 	if (err)
3314 		return err;
3315 
3316 	err = -ENOBUFS;
3317 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3318 	if (!skb)
3319 		goto out;
3320 
3321 	err = -ENOENT;
3322 	nh = nexthop_find_by_id(net, id);
3323 	if (!nh)
3324 		goto errout_free;
3325 
3326 	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3327 			   nlh->nlmsg_seq, 0, op_flags);
3328 	if (err < 0) {
3329 		WARN_ON(err == -EMSGSIZE);
3330 		goto errout_free;
3331 	}
3332 
3333 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3334 out:
3335 	return err;
3336 errout_free:
3337 	kfree_skb(skb);
3338 	goto out;
3339 }
3340 
3341 struct nh_dump_filter {
3342 	u32 nh_id;
3343 	int dev_idx;
3344 	int master_idx;
3345 	bool group_filter;
3346 	bool fdb_filter;
3347 	u32 res_bucket_nh_id;
3348 	u32 op_flags;
3349 };
3350 
3351 static bool nh_dump_filtered(struct nexthop *nh,
3352 			     struct nh_dump_filter *filter, u8 family)
3353 {
3354 	const struct net_device *dev;
3355 	const struct nh_info *nhi;
3356 
3357 	if (filter->group_filter && !nh->is_group)
3358 		return true;
3359 
3360 	if (!filter->dev_idx && !filter->master_idx && !family)
3361 		return false;
3362 
3363 	if (nh->is_group)
3364 		return true;
3365 
3366 	nhi = rtnl_dereference(nh->nh_info);
3367 	if (family && nhi->family != family)
3368 		return true;
3369 
3370 	dev = nhi->fib_nhc.nhc_dev;
3371 	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3372 		return true;
3373 
3374 	if (filter->master_idx) {
3375 		struct net_device *master;
3376 
3377 		if (!dev)
3378 			return true;
3379 
3380 		master = netdev_master_upper_dev_get((struct net_device *)dev);
3381 		if (!master || master->ifindex != filter->master_idx)
3382 			return true;
3383 	}
3384 
3385 	return false;
3386 }
3387 
3388 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3389 			       struct nh_dump_filter *filter,
3390 			       struct netlink_ext_ack *extack)
3391 {
3392 	struct nhmsg *nhm;
3393 	u32 idx;
3394 
3395 	if (tb[NHA_OIF]) {
3396 		idx = nla_get_u32(tb[NHA_OIF]);
3397 		if (idx > INT_MAX) {
3398 			NL_SET_ERR_MSG(extack, "Invalid device index");
3399 			return -EINVAL;
3400 		}
3401 		filter->dev_idx = idx;
3402 	}
3403 	if (tb[NHA_MASTER]) {
3404 		idx = nla_get_u32(tb[NHA_MASTER]);
3405 		if (idx > INT_MAX) {
3406 			NL_SET_ERR_MSG(extack, "Invalid master device index");
3407 			return -EINVAL;
3408 		}
3409 		filter->master_idx = idx;
3410 	}
3411 	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3412 	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3413 
3414 	nhm = nlmsg_data(nlh);
3415 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3416 		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3417 		return -EINVAL;
3418 	}
3419 
3420 	return 0;
3421 }
3422 
3423 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3424 			     struct nh_dump_filter *filter,
3425 			     struct netlink_callback *cb)
3426 {
3427 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3428 	int err;
3429 
3430 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3431 			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3432 			  rtm_nh_policy_dump, cb->extack);
3433 	if (err < 0)
3434 		return err;
3435 
3436 	if (tb[NHA_OP_FLAGS])
3437 		filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3438 	else
3439 		filter->op_flags = 0;
3440 
3441 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3442 }
3443 
3444 struct rtm_dump_nh_ctx {
3445 	u32 idx;
3446 };
3447 
3448 static struct rtm_dump_nh_ctx *
3449 rtm_dump_nh_ctx(struct netlink_callback *cb)
3450 {
3451 	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3452 
3453 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3454 	return ctx;
3455 }
3456 
3457 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3458 				  struct netlink_callback *cb,
3459 				  struct rb_root *root,
3460 				  struct rtm_dump_nh_ctx *ctx,
3461 				  int (*nh_cb)(struct sk_buff *skb,
3462 					       struct netlink_callback *cb,
3463 					       struct nexthop *nh, void *data),
3464 				  void *data)
3465 {
3466 	struct rb_node *node;
3467 	int s_idx;
3468 	int err;
3469 
3470 	s_idx = ctx->idx;
3471 	for (node = rb_first(root); node; node = rb_next(node)) {
3472 		struct nexthop *nh;
3473 
3474 		nh = rb_entry(node, struct nexthop, rb_node);
3475 		if (nh->id < s_idx)
3476 			continue;
3477 
3478 		ctx->idx = nh->id;
3479 		err = nh_cb(skb, cb, nh, data);
3480 		if (err)
3481 			return err;
3482 	}
3483 
3484 	return 0;
3485 }
3486 
3487 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3488 			       struct nexthop *nh, void *data)
3489 {
3490 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3491 	struct nh_dump_filter *filter = data;
3492 
3493 	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3494 		return 0;
3495 
3496 	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3497 			    NETLINK_CB(cb->skb).portid,
3498 			    cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3499 }
3500 
3501 /* rtnl */
3502 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3503 {
3504 	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3505 	struct net *net = sock_net(skb->sk);
3506 	struct rb_root *root = &net->nexthop.rb_root;
3507 	struct nh_dump_filter filter = {};
3508 	int err;
3509 
3510 	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3511 	if (err < 0)
3512 		return err;
3513 
3514 	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3515 				     &rtm_dump_nexthop_cb, &filter);
3516 
3517 	cb->seq = net->nexthop.seq;
3518 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3519 	return err;
3520 }
3521 
3522 static struct nexthop *
3523 nexthop_find_group_resilient(struct net *net, u32 id,
3524 			     struct netlink_ext_ack *extack)
3525 {
3526 	struct nh_group *nhg;
3527 	struct nexthop *nh;
3528 
3529 	nh = nexthop_find_by_id(net, id);
3530 	if (!nh)
3531 		return ERR_PTR(-ENOENT);
3532 
3533 	if (!nh->is_group) {
3534 		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3535 		return ERR_PTR(-EINVAL);
3536 	}
3537 
3538 	nhg = rtnl_dereference(nh->nh_grp);
3539 	if (!nhg->resilient) {
3540 		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3541 		return ERR_PTR(-EINVAL);
3542 	}
3543 
3544 	return nh;
3545 }
3546 
3547 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3548 			      struct netlink_ext_ack *extack)
3549 {
3550 	u32 idx;
3551 
3552 	if (attr) {
3553 		idx = nla_get_u32(attr);
3554 		if (!idx) {
3555 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3556 			return -EINVAL;
3557 		}
3558 		*nh_id_p = idx;
3559 	} else {
3560 		*nh_id_p = 0;
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3567 				    struct nh_dump_filter *filter,
3568 				    struct netlink_callback *cb)
3569 {
3570 	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3571 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3572 	int err;
3573 
3574 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3575 			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3576 			  rtm_nh_policy_dump_bucket, NULL);
3577 	if (err < 0)
3578 		return err;
3579 
3580 	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3581 	if (err)
3582 		return err;
3583 
3584 	if (tb[NHA_RES_BUCKET]) {
3585 		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3586 
3587 		err = nla_parse_nested(res_tb, max,
3588 				       tb[NHA_RES_BUCKET],
3589 				       rtm_nh_res_bucket_policy_dump,
3590 				       cb->extack);
3591 		if (err < 0)
3592 			return err;
3593 
3594 		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3595 					 &filter->res_bucket_nh_id,
3596 					 cb->extack);
3597 		if (err)
3598 			return err;
3599 	}
3600 
3601 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3602 }
3603 
3604 struct rtm_dump_res_bucket_ctx {
3605 	struct rtm_dump_nh_ctx nh;
3606 	u16 bucket_index;
3607 };
3608 
3609 static struct rtm_dump_res_bucket_ctx *
3610 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3611 {
3612 	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3613 
3614 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3615 	return ctx;
3616 }
3617 
3618 struct rtm_dump_nexthop_bucket_data {
3619 	struct rtm_dump_res_bucket_ctx *ctx;
3620 	struct nh_dump_filter filter;
3621 };
3622 
3623 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3624 				      struct netlink_callback *cb,
3625 				      struct nexthop *nh,
3626 				      struct rtm_dump_nexthop_bucket_data *dd)
3627 {
3628 	u32 portid = NETLINK_CB(cb->skb).portid;
3629 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3630 	struct nh_res_table *res_table;
3631 	struct nh_group *nhg;
3632 	u16 bucket_index;
3633 	int err;
3634 
3635 	nhg = rtnl_dereference(nh->nh_grp);
3636 	res_table = rtnl_dereference(nhg->res_table);
3637 	for (bucket_index = dd->ctx->bucket_index;
3638 	     bucket_index < res_table->num_nh_buckets;
3639 	     bucket_index++) {
3640 		struct nh_res_bucket *bucket;
3641 		struct nh_grp_entry *nhge;
3642 
3643 		bucket = &res_table->nh_buckets[bucket_index];
3644 		nhge = rtnl_dereference(bucket->nh_entry);
3645 		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3646 			continue;
3647 
3648 		if (dd->filter.res_bucket_nh_id &&
3649 		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3650 			continue;
3651 
3652 		dd->ctx->bucket_index = bucket_index;
3653 		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3654 					 RTM_NEWNEXTHOPBUCKET, portid,
3655 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3656 					 cb->extack);
3657 		if (err)
3658 			return err;
3659 	}
3660 
3661 	dd->ctx->bucket_index = 0;
3662 
3663 	return 0;
3664 }
3665 
3666 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3667 				      struct netlink_callback *cb,
3668 				      struct nexthop *nh, void *data)
3669 {
3670 	struct rtm_dump_nexthop_bucket_data *dd = data;
3671 	struct nh_group *nhg;
3672 
3673 	if (!nh->is_group)
3674 		return 0;
3675 
3676 	nhg = rtnl_dereference(nh->nh_grp);
3677 	if (!nhg->resilient)
3678 		return 0;
3679 
3680 	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3681 }
3682 
3683 /* rtnl */
3684 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3685 				   struct netlink_callback *cb)
3686 {
3687 	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3688 	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3689 	struct net *net = sock_net(skb->sk);
3690 	struct nexthop *nh;
3691 	int err;
3692 
3693 	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3694 	if (err)
3695 		return err;
3696 
3697 	if (dd.filter.nh_id) {
3698 		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3699 						  cb->extack);
3700 		if (IS_ERR(nh))
3701 			return PTR_ERR(nh);
3702 		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3703 	} else {
3704 		struct rb_root *root = &net->nexthop.rb_root;
3705 
3706 		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3707 					     &rtm_dump_nexthop_bucket_cb, &dd);
3708 	}
3709 
3710 	cb->seq = net->nexthop.seq;
3711 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3712 	return err;
3713 }
3714 
3715 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3716 					      u16 *bucket_index,
3717 					      struct netlink_ext_ack *extack)
3718 {
3719 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3720 	int err;
3721 
3722 	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3723 			       res, rtm_nh_res_bucket_policy_get, extack);
3724 	if (err < 0)
3725 		return err;
3726 
3727 	if (!tb[NHA_RES_BUCKET_INDEX]) {
3728 		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3729 		return -EINVAL;
3730 	}
3731 
3732 	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3733 	return 0;
3734 }
3735 
3736 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3737 				   u32 *id, u16 *bucket_index,
3738 				   struct netlink_ext_ack *extack)
3739 {
3740 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3741 	int err;
3742 
3743 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3744 			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3745 			  rtm_nh_policy_get_bucket, extack);
3746 	if (err < 0)
3747 		return err;
3748 
3749 	err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3750 	if (err)
3751 		return err;
3752 
3753 	if (!tb[NHA_RES_BUCKET]) {
3754 		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3755 		return -EINVAL;
3756 	}
3757 
3758 	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3759 						 bucket_index, extack);
3760 	if (err)
3761 		return err;
3762 
3763 	return 0;
3764 }
3765 
3766 /* rtnl */
3767 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3768 				  struct netlink_ext_ack *extack)
3769 {
3770 	struct net *net = sock_net(in_skb->sk);
3771 	struct nh_res_table *res_table;
3772 	struct sk_buff *skb = NULL;
3773 	struct nh_group *nhg;
3774 	struct nexthop *nh;
3775 	u16 bucket_index;
3776 	int err;
3777 	u32 id;
3778 
3779 	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3780 	if (err)
3781 		return err;
3782 
3783 	nh = nexthop_find_group_resilient(net, id, extack);
3784 	if (IS_ERR(nh))
3785 		return PTR_ERR(nh);
3786 
3787 	nhg = rtnl_dereference(nh->nh_grp);
3788 	res_table = rtnl_dereference(nhg->res_table);
3789 	if (bucket_index >= res_table->num_nh_buckets) {
3790 		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3791 		return -ENOENT;
3792 	}
3793 
3794 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3795 	if (!skb)
3796 		return -ENOBUFS;
3797 
3798 	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3799 				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3800 				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3801 				 0, extack);
3802 	if (err < 0) {
3803 		WARN_ON(err == -EMSGSIZE);
3804 		goto errout_free;
3805 	}
3806 
3807 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3808 
3809 errout_free:
3810 	kfree_skb(skb);
3811 	return err;
3812 }
3813 
3814 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3815 {
3816 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3817 	struct net *net = dev_net(dev);
3818 	struct hlist_head *head = &net->nexthop.devhash[hash];
3819 	struct hlist_node *n;
3820 	struct nh_info *nhi;
3821 
3822 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3823 		if (nhi->fib_nhc.nhc_dev == dev) {
3824 			if (nhi->family == AF_INET)
3825 				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3826 						   orig_mtu);
3827 		}
3828 	}
3829 }
3830 
3831 /* rtnl */
3832 static int nh_netdev_event(struct notifier_block *this,
3833 			   unsigned long event, void *ptr)
3834 {
3835 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3836 	struct netdev_notifier_info_ext *info_ext;
3837 
3838 	switch (event) {
3839 	case NETDEV_DOWN:
3840 	case NETDEV_UNREGISTER:
3841 		nexthop_flush_dev(dev, event);
3842 		break;
3843 	case NETDEV_CHANGE:
3844 		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3845 			nexthop_flush_dev(dev, event);
3846 		break;
3847 	case NETDEV_CHANGEMTU:
3848 		info_ext = ptr;
3849 		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3850 		rt_cache_flush(dev_net(dev));
3851 		break;
3852 	}
3853 	return NOTIFY_DONE;
3854 }
3855 
3856 static struct notifier_block nh_netdev_notifier = {
3857 	.notifier_call = nh_netdev_event,
3858 };
3859 
3860 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3861 			 enum nexthop_event_type event_type,
3862 			 struct netlink_ext_ack *extack)
3863 {
3864 	struct rb_root *root = &net->nexthop.rb_root;
3865 	struct rb_node *node;
3866 	int err = 0;
3867 
3868 	for (node = rb_first(root); node; node = rb_next(node)) {
3869 		struct nexthop *nh;
3870 
3871 		nh = rb_entry(node, struct nexthop, rb_node);
3872 		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3873 		if (err)
3874 			break;
3875 	}
3876 
3877 	return err;
3878 }
3879 
3880 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3881 			      struct netlink_ext_ack *extack)
3882 {
3883 	int err;
3884 
3885 	rtnl_lock();
3886 	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3887 	if (err)
3888 		goto unlock;
3889 	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3890 					       nb);
3891 unlock:
3892 	rtnl_unlock();
3893 	return err;
3894 }
3895 EXPORT_SYMBOL(register_nexthop_notifier);
3896 
3897 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3898 {
3899 	int err;
3900 
3901 	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3902 						 nb);
3903 	if (!err)
3904 		nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3905 	return err;
3906 }
3907 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3908 
3909 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3910 {
3911 	int err;
3912 
3913 	rtnl_lock();
3914 	err = __unregister_nexthop_notifier(net, nb);
3915 	rtnl_unlock();
3916 	return err;
3917 }
3918 EXPORT_SYMBOL(unregister_nexthop_notifier);
3919 
3920 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3921 {
3922 	struct nexthop *nexthop;
3923 
3924 	rcu_read_lock();
3925 
3926 	nexthop = nexthop_find_by_id(net, id);
3927 	if (!nexthop)
3928 		goto out;
3929 
3930 	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3931 	if (offload)
3932 		nexthop->nh_flags |= RTNH_F_OFFLOAD;
3933 	if (trap)
3934 		nexthop->nh_flags |= RTNH_F_TRAP;
3935 
3936 out:
3937 	rcu_read_unlock();
3938 }
3939 EXPORT_SYMBOL(nexthop_set_hw_flags);
3940 
3941 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3942 				 bool offload, bool trap)
3943 {
3944 	struct nh_res_table *res_table;
3945 	struct nh_res_bucket *bucket;
3946 	struct nexthop *nexthop;
3947 	struct nh_group *nhg;
3948 
3949 	rcu_read_lock();
3950 
3951 	nexthop = nexthop_find_by_id(net, id);
3952 	if (!nexthop || !nexthop->is_group)
3953 		goto out;
3954 
3955 	nhg = rcu_dereference(nexthop->nh_grp);
3956 	if (!nhg->resilient)
3957 		goto out;
3958 
3959 	if (bucket_index >= nhg->res_table->num_nh_buckets)
3960 		goto out;
3961 
3962 	res_table = rcu_dereference(nhg->res_table);
3963 	bucket = &res_table->nh_buckets[bucket_index];
3964 	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3965 	if (offload)
3966 		bucket->nh_flags |= RTNH_F_OFFLOAD;
3967 	if (trap)
3968 		bucket->nh_flags |= RTNH_F_TRAP;
3969 
3970 out:
3971 	rcu_read_unlock();
3972 }
3973 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3974 
3975 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3976 				     unsigned long *activity)
3977 {
3978 	struct nh_res_table *res_table;
3979 	struct nexthop *nexthop;
3980 	struct nh_group *nhg;
3981 	u16 i;
3982 
3983 	rcu_read_lock();
3984 
3985 	nexthop = nexthop_find_by_id(net, id);
3986 	if (!nexthop || !nexthop->is_group)
3987 		goto out;
3988 
3989 	nhg = rcu_dereference(nexthop->nh_grp);
3990 	if (!nhg->resilient)
3991 		goto out;
3992 
3993 	/* Instead of silently ignoring some buckets, demand that the sizes
3994 	 * be the same.
3995 	 */
3996 	res_table = rcu_dereference(nhg->res_table);
3997 	if (num_buckets != res_table->num_nh_buckets)
3998 		goto out;
3999 
4000 	for (i = 0; i < num_buckets; i++) {
4001 		if (test_bit(i, activity))
4002 			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
4003 	}
4004 
4005 out:
4006 	rcu_read_unlock();
4007 }
4008 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4009 
4010 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
4011 						   struct list_head *dev_to_kill)
4012 {
4013 	struct net *net;
4014 
4015 	ASSERT_RTNL();
4016 	list_for_each_entry(net, net_list, exit_list)
4017 		flush_all_nexthops(net);
4018 }
4019 
4020 static void __net_exit nexthop_net_exit(struct net *net)
4021 {
4022 	kfree(net->nexthop.devhash);
4023 	net->nexthop.devhash = NULL;
4024 }
4025 
4026 static int __net_init nexthop_net_init(struct net *net)
4027 {
4028 	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4029 
4030 	net->nexthop.rb_root = RB_ROOT;
4031 	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4032 	if (!net->nexthop.devhash)
4033 		return -ENOMEM;
4034 	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4035 
4036 	return 0;
4037 }
4038 
4039 static struct pernet_operations nexthop_net_ops = {
4040 	.init = nexthop_net_init,
4041 	.exit = nexthop_net_exit,
4042 	.exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
4043 };
4044 
4045 static int __init nexthop_init(void)
4046 {
4047 	register_pernet_subsys(&nexthop_net_ops);
4048 
4049 	register_netdevice_notifier(&nh_netdev_notifier);
4050 
4051 	rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4052 	rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
4053 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
4054 		      rtm_dump_nexthop, 0);
4055 
4056 	rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4057 	rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4058 
4059 	rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4060 	rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4061 
4062 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
4063 		      rtm_dump_nexthop_bucket, 0);
4064 
4065 	return 0;
4066 }
4067 subsys_initcall(nexthop_init);
4068