xref: /linux/net/ipv4/nexthop.c (revision cd80e7ee47d2fd5c97563c003ff31ce8240ca2d8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7 
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19 
20 #define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
22 
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 			   struct nl_info *nlinfo);
25 
26 #define NH_DEV_HASHBITS  8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28 
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS |		\
30 			       NHA_OP_FLAG_DUMP_HW_STATS)
31 
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 	[NHA_ID]		= { .type = NLA_U32 },
34 	[NHA_GROUP]		= { .type = NLA_BINARY },
35 	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
36 	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
37 	[NHA_OIF]		= { .type = NLA_U32 },
38 	[NHA_GATEWAY]		= { .type = NLA_BINARY },
39 	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
40 	[NHA_ENCAP]		= { .type = NLA_NESTED },
41 	[NHA_FDB]		= { .type = NLA_FLAG },
42 	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
43 	[NHA_HW_STATS_ENABLE]	= NLA_POLICY_MAX(NLA_U32, true),
44 };
45 
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 	[NHA_ID]		= { .type = NLA_U32 },
48 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
49 						  NHA_OP_FLAGS_DUMP_ALL),
50 };
51 
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 	[NHA_ID]		= { .type = NLA_U32 },
54 };
55 
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 	[NHA_OIF]		= { .type = NLA_U32 },
58 	[NHA_GROUPS]		= { .type = NLA_FLAG },
59 	[NHA_MASTER]		= { .type = NLA_U32 },
60 	[NHA_FDB]		= { .type = NLA_FLAG },
61 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
62 						  NHA_OP_FLAGS_DUMP_ALL),
63 };
64 
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
67 	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
68 	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
69 };
70 
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 	[NHA_ID]		= { .type = NLA_U32 },
73 	[NHA_OIF]		= { .type = NLA_U32 },
74 	[NHA_MASTER]		= { .type = NLA_U32 },
75 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
76 };
77 
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
80 };
81 
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 	[NHA_ID]		= { .type = NLA_U32 },
84 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
85 };
86 
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
89 };
90 
91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 	return !net->nexthop.notifier_chain.head;
94 }
95 
96 static void
97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 			       const struct nh_info *nhi)
99 {
100 	nh_info->dev = nhi->fib_nhc.nhc_dev;
101 	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 	if (nh_info->gw_family == AF_INET)
103 		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 	else if (nh_info->gw_family == AF_INET6)
105 		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106 
107 	nh_info->id = nhi->nh_parent->id;
108 	nh_info->is_reject = nhi->reject_nh;
109 	nh_info->is_fdb = nhi->fdb_nh;
110 	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112 
113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 					const struct nexthop *nh)
115 {
116 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117 
118 	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
120 	if (!info->nh)
121 		return -ENOMEM;
122 
123 	__nh_notifier_single_info_init(info->nh, nhi);
124 
125 	return 0;
126 }
127 
128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 	kfree(info->nh);
131 }
132 
133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 				       struct nh_group *nhg)
135 {
136 	u16 num_nh = nhg->num_nh;
137 	int i;
138 
139 	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
141 			       GFP_KERNEL);
142 	if (!info->nh_grp)
143 		return -ENOMEM;
144 
145 	info->nh_grp->num_nh = num_nh;
146 	info->nh_grp->is_fdb = nhg->fdb_nh;
147 	info->nh_grp->hw_stats = nhg->hw_stats;
148 
149 	for (i = 0; i < num_nh; i++) {
150 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 		struct nh_info *nhi;
152 
153 		nhi = rtnl_dereference(nhge->nh->nh_info);
154 		info->nh_grp->nh_entries[i].weight = nhge->weight;
155 		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
156 					       nhi);
157 	}
158 
159 	return 0;
160 }
161 
162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 					   struct nh_group *nhg)
164 {
165 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 	u16 num_nh_buckets = res_table->num_nh_buckets;
167 	unsigned long size;
168 	u16 i;
169 
170 	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 				       __GFP_NOWARN);
174 	if (!info->nh_res_table)
175 		return -ENOMEM;
176 
177 	info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 	info->nh_res_table->hw_stats = nhg->hw_stats;
179 
180 	for (i = 0; i < num_nh_buckets; i++) {
181 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 		struct nh_grp_entry *nhge;
183 		struct nh_info *nhi;
184 
185 		nhge = rtnl_dereference(bucket->nh_entry);
186 		nhi = rtnl_dereference(nhge->nh->nh_info);
187 		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
188 					       nhi);
189 	}
190 
191 	return 0;
192 }
193 
194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 				     const struct nexthop *nh)
196 {
197 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198 
199 	if (nhg->hash_threshold)
200 		return nh_notifier_mpath_info_init(info, nhg);
201 	else if (nhg->resilient)
202 		return nh_notifier_res_table_info_init(info, nhg);
203 	return -EINVAL;
204 }
205 
206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 				      const struct nexthop *nh)
208 {
209 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210 
211 	if (nhg->hash_threshold)
212 		kfree(info->nh_grp);
213 	else if (nhg->resilient)
214 		vfree(info->nh_res_table);
215 }
216 
217 static int nh_notifier_info_init(struct nh_notifier_info *info,
218 				 const struct nexthop *nh)
219 {
220 	info->id = nh->id;
221 
222 	if (nh->is_group)
223 		return nh_notifier_grp_info_init(info, nh);
224 	else
225 		return nh_notifier_single_info_init(info, nh);
226 }
227 
228 static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 				  const struct nexthop *nh)
230 {
231 	if (nh->is_group)
232 		nh_notifier_grp_info_fini(info, nh);
233 	else
234 		nh_notifier_single_info_fini(info);
235 }
236 
237 static int call_nexthop_notifiers(struct net *net,
238 				  enum nexthop_event_type event_type,
239 				  struct nexthop *nh,
240 				  struct netlink_ext_ack *extack)
241 {
242 	struct nh_notifier_info info = {
243 		.net = net,
244 		.extack = extack,
245 	};
246 	int err;
247 
248 	ASSERT_RTNL();
249 
250 	if (nexthop_notifiers_is_empty(net))
251 		return 0;
252 
253 	err = nh_notifier_info_init(&info, nh);
254 	if (err) {
255 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
256 		return err;
257 	}
258 
259 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
260 					   event_type, &info);
261 	nh_notifier_info_fini(&info, nh);
262 
263 	return notifier_to_errno(err);
264 }
265 
266 static int
267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 				      bool force, unsigned int *p_idle_timer_ms)
269 {
270 	struct nh_res_table *res_table;
271 	struct nh_group *nhg;
272 	struct nexthop *nh;
273 	int err = 0;
274 
275 	/* When 'force' is false, nexthop bucket replacement is performed
276 	 * because the bucket was deemed to be idle. In this case, capable
277 	 * listeners can choose to perform an atomic replacement: The bucket is
278 	 * only replaced if it is inactive. However, if the idle timer interval
279 	 * is smaller than the interval in which a listener is querying
280 	 * buckets' activity from the device, then atomic replacement should
281 	 * not be tried. Pass the idle timer value to listeners, so that they
282 	 * could determine which type of replacement to perform.
283 	 */
284 	if (force) {
285 		*p_idle_timer_ms = 0;
286 		return 0;
287 	}
288 
289 	rcu_read_lock();
290 
291 	nh = nexthop_find_by_id(info->net, info->id);
292 	if (!nh) {
293 		err = -EINVAL;
294 		goto out;
295 	}
296 
297 	nhg = rcu_dereference(nh->nh_grp);
298 	res_table = rcu_dereference(nhg->res_table);
299 	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
300 
301 out:
302 	rcu_read_unlock();
303 
304 	return err;
305 }
306 
307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 					    u16 bucket_index, bool force,
309 					    struct nh_info *oldi,
310 					    struct nh_info *newi)
311 {
312 	unsigned int idle_timer_ms;
313 	int err;
314 
315 	err = nh_notifier_res_bucket_idle_timer_get(info, force,
316 						    &idle_timer_ms);
317 	if (err)
318 		return err;
319 
320 	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 				      GFP_KERNEL);
323 	if (!info->nh_res_bucket)
324 		return -ENOMEM;
325 
326 	info->nh_res_bucket->bucket_index = bucket_index;
327 	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 	info->nh_res_bucket->force = force;
329 	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
330 	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
331 	return 0;
332 }
333 
334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335 {
336 	kfree(info->nh_res_bucket);
337 }
338 
339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 					       u16 bucket_index, bool force,
341 					       struct nh_info *oldi,
342 					       struct nh_info *newi,
343 					       struct netlink_ext_ack *extack)
344 {
345 	struct nh_notifier_info info = {
346 		.net = net,
347 		.extack = extack,
348 		.id = nhg_id,
349 	};
350 	int err;
351 
352 	if (nexthop_notifiers_is_empty(net))
353 		return 0;
354 
355 	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
356 					       oldi, newi);
357 	if (err)
358 		return err;
359 
360 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
361 					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
362 	nh_notifier_res_bucket_info_fini(&info);
363 
364 	return notifier_to_errno(err);
365 }
366 
367 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
368  *
369  * 1) a collection of callbacks for NH maintenance. This operates under
370  *    RTNL,
371  * 2) the delayed work that gradually balances the resilient table,
372  * 3) and nexthop_select_path(), operating under RCU.
373  *
374  * Both the delayed work and the RTNL block are writers, and need to
375  * maintain mutual exclusion. Since there are only two and well-known
376  * writers for each table, the RTNL code can make sure it has exclusive
377  * access thus:
378  *
379  * - Have the DW operate without locking;
380  * - synchronously cancel the DW;
381  * - do the writing;
382  * - if the write was not actually a delete, call upkeep, which schedules
383  *   DW again if necessary.
384  *
385  * The functions that are always called from the RTNL context use
386  * rtnl_dereference(). The functions that can also be called from the DW do
387  * a raw dereference and rely on the above mutual exclusion scheme.
388  */
389 #define nh_res_dereference(p) (rcu_dereference_raw(p))
390 
391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 					     u16 bucket_index, bool force,
393 					     struct nexthop *old_nh,
394 					     struct nexthop *new_nh,
395 					     struct netlink_ext_ack *extack)
396 {
397 	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399 
400 	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 						   force, oldi, newi, extack);
402 }
403 
404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 					    struct netlink_ext_ack *extack)
406 {
407 	struct nh_notifier_info info = {
408 		.net = net,
409 		.extack = extack,
410 		.id = nh->id,
411 	};
412 	struct nh_group *nhg;
413 	int err;
414 
415 	ASSERT_RTNL();
416 
417 	if (nexthop_notifiers_is_empty(net))
418 		return 0;
419 
420 	/* At this point, the nexthop buckets are still not populated. Only
421 	 * emit a notification with the logical nexthops, so that a listener
422 	 * could potentially veto it in case of unsupported configuration.
423 	 */
424 	nhg = rtnl_dereference(nh->nh_grp);
425 	err = nh_notifier_mpath_info_init(&info, nhg);
426 	if (err) {
427 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
428 		return err;
429 	}
430 
431 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
432 					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
433 					   &info);
434 	kfree(info.nh_grp);
435 
436 	return notifier_to_errno(err);
437 }
438 
439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 				 enum nexthop_event_type event_type,
441 				 struct nexthop *nh,
442 				 struct netlink_ext_ack *extack)
443 {
444 	struct nh_notifier_info info = {
445 		.net = net,
446 		.extack = extack,
447 	};
448 	int err;
449 
450 	err = nh_notifier_info_init(&info, nh);
451 	if (err)
452 		return err;
453 
454 	err = nb->notifier_call(nb, event_type, &info);
455 	nh_notifier_info_fini(&info, nh);
456 
457 	return notifier_to_errno(err);
458 }
459 
460 static unsigned int nh_dev_hashfn(unsigned int val)
461 {
462 	unsigned int mask = NH_DEV_HASHSIZE - 1;
463 
464 	return (val ^
465 		(val >> NH_DEV_HASHBITS) ^
466 		(val >> (NH_DEV_HASHBITS * 2))) & mask;
467 }
468 
469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
470 {
471 	struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 	struct hlist_head *head;
473 	unsigned int hash;
474 
475 	WARN_ON(!dev);
476 
477 	hash = nh_dev_hashfn(dev->ifindex);
478 	head = &net->nexthop.devhash[hash];
479 	hlist_add_head(&nhi->dev_hash, head);
480 }
481 
482 static void nexthop_free_group(struct nexthop *nh)
483 {
484 	struct nh_group *nhg;
485 	int i;
486 
487 	nhg = rcu_dereference_raw(nh->nh_grp);
488 	for (i = 0; i < nhg->num_nh; ++i) {
489 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
490 
491 		WARN_ON(!list_empty(&nhge->nh_list));
492 		free_percpu(nhge->stats);
493 		nexthop_put(nhge->nh);
494 	}
495 
496 	WARN_ON(nhg->spare == nhg);
497 
498 	if (nhg->resilient)
499 		vfree(rcu_dereference_raw(nhg->res_table));
500 
501 	kfree(nhg->spare);
502 	kfree(nhg);
503 }
504 
505 static void nexthop_free_single(struct nexthop *nh)
506 {
507 	struct nh_info *nhi;
508 
509 	nhi = rcu_dereference_raw(nh->nh_info);
510 	switch (nhi->family) {
511 	case AF_INET:
512 		fib_nh_release(nh->net, &nhi->fib_nh);
513 		break;
514 	case AF_INET6:
515 		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
516 		break;
517 	}
518 	kfree(nhi);
519 }
520 
521 void nexthop_free_rcu(struct rcu_head *head)
522 {
523 	struct nexthop *nh = container_of(head, struct nexthop, rcu);
524 
525 	if (nh->is_group)
526 		nexthop_free_group(nh);
527 	else
528 		nexthop_free_single(nh);
529 
530 	kfree(nh);
531 }
532 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
533 
534 static struct nexthop *nexthop_alloc(void)
535 {
536 	struct nexthop *nh;
537 
538 	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
539 	if (nh) {
540 		INIT_LIST_HEAD(&nh->fi_list);
541 		INIT_LIST_HEAD(&nh->f6i_list);
542 		INIT_LIST_HEAD(&nh->grp_list);
543 		INIT_LIST_HEAD(&nh->fdb_list);
544 	}
545 	return nh;
546 }
547 
548 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
549 {
550 	struct nh_group *nhg;
551 
552 	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
553 	if (nhg)
554 		nhg->num_nh = num_nh;
555 
556 	return nhg;
557 }
558 
559 static void nh_res_table_upkeep_dw(struct work_struct *work);
560 
561 static struct nh_res_table *
562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
563 {
564 	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
565 	struct nh_res_table *res_table;
566 	unsigned long size;
567 
568 	size = struct_size(res_table, nh_buckets, num_nh_buckets);
569 	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
570 	if (!res_table)
571 		return NULL;
572 
573 	res_table->net = net;
574 	res_table->nhg_id = nhg_id;
575 	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
576 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
577 	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
578 	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
579 	res_table->num_nh_buckets = num_nh_buckets;
580 	return res_table;
581 }
582 
583 static void nh_base_seq_inc(struct net *net)
584 {
585 	while (++net->nexthop.seq == 0)
586 		;
587 }
588 
589 /* no reference taken; rcu lock or rtnl must be held */
590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
591 {
592 	struct rb_node **pp, *parent = NULL, *next;
593 
594 	pp = &net->nexthop.rb_root.rb_node;
595 	while (1) {
596 		struct nexthop *nh;
597 
598 		next = rcu_dereference_raw(*pp);
599 		if (!next)
600 			break;
601 		parent = next;
602 
603 		nh = rb_entry(parent, struct nexthop, rb_node);
604 		if (id < nh->id)
605 			pp = &next->rb_left;
606 		else if (id > nh->id)
607 			pp = &next->rb_right;
608 		else
609 			return nh;
610 	}
611 	return NULL;
612 }
613 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
614 
615 /* used for auto id allocation; called with rtnl held */
616 static u32 nh_find_unused_id(struct net *net)
617 {
618 	u32 id_start = net->nexthop.last_id_allocated;
619 
620 	while (1) {
621 		net->nexthop.last_id_allocated++;
622 		if (net->nexthop.last_id_allocated == id_start)
623 			break;
624 
625 		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
626 			return net->nexthop.last_id_allocated;
627 	}
628 	return 0;
629 }
630 
631 static void nh_res_time_set_deadline(unsigned long next_time,
632 				     unsigned long *deadline)
633 {
634 	if (time_before(next_time, *deadline))
635 		*deadline = next_time;
636 }
637 
638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
639 {
640 	if (list_empty(&res_table->uw_nh_entries))
641 		return 0;
642 	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
643 }
644 
645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
646 {
647 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
648 	struct nlattr *nest;
649 
650 	nest = nla_nest_start(skb, NHA_RES_GROUP);
651 	if (!nest)
652 		return -EMSGSIZE;
653 
654 	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
655 			res_table->num_nh_buckets) ||
656 	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
657 			jiffies_to_clock_t(res_table->idle_timer)) ||
658 	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
659 			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
660 	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
661 			      nh_res_table_unbalanced_time(res_table),
662 			      NHA_RES_GROUP_PAD))
663 		goto nla_put_failure;
664 
665 	nla_nest_end(skb, nest);
666 	return 0;
667 
668 nla_put_failure:
669 	nla_nest_cancel(skb, nest);
670 	return -EMSGSIZE;
671 }
672 
673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
674 {
675 	struct nh_grp_entry_stats *cpu_stats;
676 
677 	cpu_stats = get_cpu_ptr(nhge->stats);
678 	u64_stats_update_begin(&cpu_stats->syncp);
679 	u64_stats_inc(&cpu_stats->packets);
680 	u64_stats_update_end(&cpu_stats->syncp);
681 	put_cpu_ptr(cpu_stats);
682 }
683 
684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
685 				    u64 *ret_packets)
686 {
687 	int i;
688 
689 	*ret_packets = 0;
690 
691 	for_each_possible_cpu(i) {
692 		struct nh_grp_entry_stats *cpu_stats;
693 		unsigned int start;
694 		u64 packets;
695 
696 		cpu_stats = per_cpu_ptr(nhge->stats, i);
697 		do {
698 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
699 			packets = u64_stats_read(&cpu_stats->packets);
700 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
701 
702 		*ret_packets += packets;
703 	}
704 }
705 
706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
707 					 const struct nexthop *nh)
708 {
709 	struct nh_group *nhg;
710 	int i;
711 
712 	ASSERT_RTNL();
713 	nhg = rtnl_dereference(nh->nh_grp);
714 
715 	info->id = nh->id;
716 	info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
717 	info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
718 						    stats, nhg->num_nh),
719 					GFP_KERNEL);
720 	if (!info->nh_grp_hw_stats)
721 		return -ENOMEM;
722 
723 	info->nh_grp_hw_stats->num_nh = nhg->num_nh;
724 	for (i = 0; i < nhg->num_nh; i++) {
725 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
726 
727 		info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
728 	}
729 
730 	return 0;
731 }
732 
733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
734 {
735 	kfree(info->nh_grp_hw_stats);
736 }
737 
738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
739 				  unsigned int nh_idx,
740 				  u64 delta_packets)
741 {
742 	info->hw_stats_used = true;
743 	info->stats[nh_idx].packets += delta_packets;
744 }
745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
746 
747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
748 					 struct nh_notifier_info *info)
749 {
750 	struct nh_group *nhg;
751 	int i;
752 
753 	ASSERT_RTNL();
754 	nhg = rtnl_dereference(nh->nh_grp);
755 
756 	for (i = 0; i < nhg->num_nh; i++) {
757 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
758 
759 		nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
760 	}
761 }
762 
763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
764 {
765 	struct nh_notifier_info info = {
766 		.net = nh->net,
767 	};
768 	struct net *net = nh->net;
769 	int err;
770 
771 	if (nexthop_notifiers_is_empty(net)) {
772 		*hw_stats_used = false;
773 		return 0;
774 	}
775 
776 	err = nh_notifier_grp_hw_stats_init(&info, nh);
777 	if (err)
778 		return err;
779 
780 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
781 					   NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
782 					   &info);
783 
784 	/* Cache whatever we got, even if there was an error, otherwise the
785 	 * successful stats retrievals would get lost.
786 	 */
787 	nh_grp_hw_stats_apply_update(nh, &info);
788 	*hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
789 
790 	nh_notifier_grp_hw_stats_fini(&info);
791 	return notifier_to_errno(err);
792 }
793 
794 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
795 					struct nh_grp_entry *nhge,
796 					u32 op_flags)
797 {
798 	struct nlattr *nest;
799 	u64 packets;
800 
801 	nh_grp_entry_stats_read(nhge, &packets);
802 
803 	nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
804 	if (!nest)
805 		return -EMSGSIZE;
806 
807 	if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
808 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
809 			 packets + nhge->packets_hw))
810 		goto nla_put_failure;
811 
812 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
813 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
814 			 nhge->packets_hw))
815 		goto nla_put_failure;
816 
817 	nla_nest_end(skb, nest);
818 	return 0;
819 
820 nla_put_failure:
821 	nla_nest_cancel(skb, nest);
822 	return -EMSGSIZE;
823 }
824 
825 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
826 				  u32 op_flags)
827 {
828 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
829 	struct nlattr *nest;
830 	bool hw_stats_used;
831 	int err;
832 	int i;
833 
834 	if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
835 		goto err_out;
836 
837 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
838 	    nhg->hw_stats) {
839 		err = nh_grp_hw_stats_update(nh, &hw_stats_used);
840 		if (err)
841 			goto out;
842 
843 		if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
844 			goto err_out;
845 	}
846 
847 	nest = nla_nest_start(skb, NHA_GROUP_STATS);
848 	if (!nest)
849 		goto err_out;
850 
851 	for (i = 0; i < nhg->num_nh; i++)
852 		if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
853 						 op_flags))
854 			goto cancel_out;
855 
856 	nla_nest_end(skb, nest);
857 	return 0;
858 
859 cancel_out:
860 	nla_nest_cancel(skb, nest);
861 err_out:
862 	err = -EMSGSIZE;
863 out:
864 	return err;
865 }
866 
867 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
868 			    u32 op_flags)
869 {
870 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
871 	struct nexthop_grp *p;
872 	size_t len = nhg->num_nh * sizeof(*p);
873 	struct nlattr *nla;
874 	u16 group_type = 0;
875 	int i;
876 
877 	if (nhg->hash_threshold)
878 		group_type = NEXTHOP_GRP_TYPE_MPATH;
879 	else if (nhg->resilient)
880 		group_type = NEXTHOP_GRP_TYPE_RES;
881 
882 	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
883 		goto nla_put_failure;
884 
885 	nla = nla_reserve(skb, NHA_GROUP, len);
886 	if (!nla)
887 		goto nla_put_failure;
888 
889 	p = nla_data(nla);
890 	for (i = 0; i < nhg->num_nh; ++i) {
891 		p->id = nhg->nh_entries[i].nh->id;
892 		p->weight = nhg->nh_entries[i].weight - 1;
893 		p += 1;
894 	}
895 
896 	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
897 		goto nla_put_failure;
898 
899 	if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
900 	    (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
901 	     nla_put_nh_group_stats(skb, nh, op_flags)))
902 		goto nla_put_failure;
903 
904 	return 0;
905 
906 nla_put_failure:
907 	return -EMSGSIZE;
908 }
909 
910 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
911 			int event, u32 portid, u32 seq, unsigned int nlflags,
912 			u32 op_flags)
913 {
914 	struct fib6_nh *fib6_nh;
915 	struct fib_nh *fib_nh;
916 	struct nlmsghdr *nlh;
917 	struct nh_info *nhi;
918 	struct nhmsg *nhm;
919 
920 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
921 	if (!nlh)
922 		return -EMSGSIZE;
923 
924 	nhm = nlmsg_data(nlh);
925 	nhm->nh_family = AF_UNSPEC;
926 	nhm->nh_flags = nh->nh_flags;
927 	nhm->nh_protocol = nh->protocol;
928 	nhm->nh_scope = 0;
929 	nhm->resvd = 0;
930 
931 	if (nla_put_u32(skb, NHA_ID, nh->id))
932 		goto nla_put_failure;
933 
934 	if (nh->is_group) {
935 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
936 
937 		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
938 			goto nla_put_failure;
939 		if (nla_put_nh_group(skb, nh, op_flags))
940 			goto nla_put_failure;
941 		goto out;
942 	}
943 
944 	nhi = rtnl_dereference(nh->nh_info);
945 	nhm->nh_family = nhi->family;
946 	if (nhi->reject_nh) {
947 		if (nla_put_flag(skb, NHA_BLACKHOLE))
948 			goto nla_put_failure;
949 		goto out;
950 	} else if (nhi->fdb_nh) {
951 		if (nla_put_flag(skb, NHA_FDB))
952 			goto nla_put_failure;
953 	} else {
954 		const struct net_device *dev;
955 
956 		dev = nhi->fib_nhc.nhc_dev;
957 		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
958 			goto nla_put_failure;
959 	}
960 
961 	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
962 	switch (nhi->family) {
963 	case AF_INET:
964 		fib_nh = &nhi->fib_nh;
965 		if (fib_nh->fib_nh_gw_family &&
966 		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
967 			goto nla_put_failure;
968 		break;
969 
970 	case AF_INET6:
971 		fib6_nh = &nhi->fib6_nh;
972 		if (fib6_nh->fib_nh_gw_family &&
973 		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
974 			goto nla_put_failure;
975 		break;
976 	}
977 
978 	if (nhi->fib_nhc.nhc_lwtstate &&
979 	    lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
980 				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
981 		goto nla_put_failure;
982 
983 out:
984 	nlmsg_end(skb, nlh);
985 	return 0;
986 
987 nla_put_failure:
988 	nlmsg_cancel(skb, nlh);
989 	return -EMSGSIZE;
990 }
991 
992 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
993 {
994 	return nla_total_size(0) +	/* NHA_RES_GROUP */
995 		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
996 		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
997 		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
998 		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
999 }
1000 
1001 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1002 {
1003 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1004 	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1005 	size_t tot = nla_total_size(sz) +
1006 		nla_total_size(2); /* NHA_GROUP_TYPE */
1007 
1008 	if (nhg->resilient)
1009 		tot += nh_nlmsg_size_grp_res(nhg);
1010 
1011 	return tot;
1012 }
1013 
1014 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1015 {
1016 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1017 	size_t sz;
1018 
1019 	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1020 	 * are mutually exclusive
1021 	 */
1022 	sz = nla_total_size(4);  /* NHA_OIF */
1023 
1024 	switch (nhi->family) {
1025 	case AF_INET:
1026 		if (nhi->fib_nh.fib_nh_gw_family)
1027 			sz += nla_total_size(4);  /* NHA_GATEWAY */
1028 		break;
1029 
1030 	case AF_INET6:
1031 		/* NHA_GATEWAY */
1032 		if (nhi->fib6_nh.fib_nh_gw_family)
1033 			sz += nla_total_size(sizeof(const struct in6_addr));
1034 		break;
1035 	}
1036 
1037 	if (nhi->fib_nhc.nhc_lwtstate) {
1038 		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1039 		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
1040 	}
1041 
1042 	return sz;
1043 }
1044 
1045 static size_t nh_nlmsg_size(struct nexthop *nh)
1046 {
1047 	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1048 
1049 	sz += nla_total_size(4); /* NHA_ID */
1050 
1051 	if (nh->is_group)
1052 		sz += nh_nlmsg_size_grp(nh);
1053 	else
1054 		sz += nh_nlmsg_size_single(nh);
1055 
1056 	return sz;
1057 }
1058 
1059 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1060 {
1061 	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1062 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1063 	struct sk_buff *skb;
1064 	int err = -ENOBUFS;
1065 
1066 	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1067 	if (!skb)
1068 		goto errout;
1069 
1070 	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1071 	if (err < 0) {
1072 		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1073 		WARN_ON(err == -EMSGSIZE);
1074 		kfree_skb(skb);
1075 		goto errout;
1076 	}
1077 
1078 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1079 		    info->nlh, gfp_any());
1080 	return;
1081 errout:
1082 	if (err < 0)
1083 		rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1084 }
1085 
1086 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1087 {
1088 	return (unsigned long)atomic_long_read(&bucket->used_time);
1089 }
1090 
1091 static unsigned long
1092 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1093 			 const struct nh_res_bucket *bucket,
1094 			 unsigned long now)
1095 {
1096 	unsigned long time = nh_res_bucket_used_time(bucket);
1097 
1098 	/* Bucket was not used since it was migrated. The idle time is now. */
1099 	if (time == bucket->migrated_time)
1100 		return now;
1101 
1102 	return time + res_table->idle_timer;
1103 }
1104 
1105 static unsigned long
1106 nh_res_table_unb_point(const struct nh_res_table *res_table)
1107 {
1108 	return res_table->unbalanced_since + res_table->unbalanced_timer;
1109 }
1110 
1111 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1112 				   struct nh_res_bucket *bucket)
1113 {
1114 	unsigned long now = jiffies;
1115 
1116 	atomic_long_set(&bucket->used_time, (long)now);
1117 	bucket->migrated_time = now;
1118 }
1119 
1120 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1121 {
1122 	atomic_long_set(&bucket->used_time, (long)jiffies);
1123 }
1124 
1125 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1126 {
1127 	unsigned long used_time = nh_res_bucket_used_time(bucket);
1128 
1129 	return jiffies_delta_to_clock_t(jiffies - used_time);
1130 }
1131 
1132 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1133 			      struct nh_res_bucket *bucket, u16 bucket_index,
1134 			      int event, u32 portid, u32 seq,
1135 			      unsigned int nlflags,
1136 			      struct netlink_ext_ack *extack)
1137 {
1138 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1139 	struct nlmsghdr *nlh;
1140 	struct nlattr *nest;
1141 	struct nhmsg *nhm;
1142 
1143 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1144 	if (!nlh)
1145 		return -EMSGSIZE;
1146 
1147 	nhm = nlmsg_data(nlh);
1148 	nhm->nh_family = AF_UNSPEC;
1149 	nhm->nh_flags = bucket->nh_flags;
1150 	nhm->nh_protocol = nh->protocol;
1151 	nhm->nh_scope = 0;
1152 	nhm->resvd = 0;
1153 
1154 	if (nla_put_u32(skb, NHA_ID, nh->id))
1155 		goto nla_put_failure;
1156 
1157 	nest = nla_nest_start(skb, NHA_RES_BUCKET);
1158 	if (!nest)
1159 		goto nla_put_failure;
1160 
1161 	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1162 	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1163 	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1164 			      nh_res_bucket_idle_time(bucket),
1165 			      NHA_RES_BUCKET_PAD))
1166 		goto nla_put_failure_nest;
1167 
1168 	nla_nest_end(skb, nest);
1169 	nlmsg_end(skb, nlh);
1170 	return 0;
1171 
1172 nla_put_failure_nest:
1173 	nla_nest_cancel(skb, nest);
1174 nla_put_failure:
1175 	nlmsg_cancel(skb, nlh);
1176 	return -EMSGSIZE;
1177 }
1178 
1179 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1180 				  u16 bucket_index)
1181 {
1182 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1183 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1184 	struct nexthop *nh = nhge->nh_parent;
1185 	struct sk_buff *skb;
1186 	int err = -ENOBUFS;
1187 
1188 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1189 	if (!skb)
1190 		goto errout;
1191 
1192 	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1193 				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1194 				 NULL);
1195 	if (err < 0) {
1196 		kfree_skb(skb);
1197 		goto errout;
1198 	}
1199 
1200 	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1201 	return;
1202 errout:
1203 	if (err < 0)
1204 		rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1205 }
1206 
1207 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1208 			   bool *is_fdb, struct netlink_ext_ack *extack)
1209 {
1210 	if (nh->is_group) {
1211 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1212 
1213 		/* Nesting groups within groups is not supported. */
1214 		if (nhg->hash_threshold) {
1215 			NL_SET_ERR_MSG(extack,
1216 				       "Hash-threshold group can not be a nexthop within a group");
1217 			return false;
1218 		}
1219 		if (nhg->resilient) {
1220 			NL_SET_ERR_MSG(extack,
1221 				       "Resilient group can not be a nexthop within a group");
1222 			return false;
1223 		}
1224 		*is_fdb = nhg->fdb_nh;
1225 	} else {
1226 		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1227 
1228 		if (nhi->reject_nh && npaths > 1) {
1229 			NL_SET_ERR_MSG(extack,
1230 				       "Blackhole nexthop can not be used in a group with more than 1 path");
1231 			return false;
1232 		}
1233 		*is_fdb = nhi->fdb_nh;
1234 	}
1235 
1236 	return true;
1237 }
1238 
1239 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1240 				   struct netlink_ext_ack *extack)
1241 {
1242 	struct nh_info *nhi;
1243 
1244 	nhi = rtnl_dereference(nh->nh_info);
1245 
1246 	if (!nhi->fdb_nh) {
1247 		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1248 		return -EINVAL;
1249 	}
1250 
1251 	if (*nh_family == AF_UNSPEC) {
1252 		*nh_family = nhi->family;
1253 	} else if (*nh_family != nhi->family) {
1254 		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1255 		return -EINVAL;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int nh_check_attr_group(struct net *net,
1262 			       struct nlattr *tb[], size_t tb_size,
1263 			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1264 {
1265 	unsigned int len = nla_len(tb[NHA_GROUP]);
1266 	u8 nh_family = AF_UNSPEC;
1267 	struct nexthop_grp *nhg;
1268 	unsigned int i, j;
1269 	u8 nhg_fdb = 0;
1270 
1271 	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1272 		NL_SET_ERR_MSG(extack,
1273 			       "Invalid length for nexthop group attribute");
1274 		return -EINVAL;
1275 	}
1276 
1277 	/* convert len to number of nexthop ids */
1278 	len /= sizeof(*nhg);
1279 
1280 	nhg = nla_data(tb[NHA_GROUP]);
1281 	for (i = 0; i < len; ++i) {
1282 		if (nhg[i].resvd1 || nhg[i].resvd2) {
1283 			NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1284 			return -EINVAL;
1285 		}
1286 		if (nhg[i].weight > 254) {
1287 			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1288 			return -EINVAL;
1289 		}
1290 		for (j = i + 1; j < len; ++j) {
1291 			if (nhg[i].id == nhg[j].id) {
1292 				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1293 				return -EINVAL;
1294 			}
1295 		}
1296 	}
1297 
1298 	if (tb[NHA_FDB])
1299 		nhg_fdb = 1;
1300 	nhg = nla_data(tb[NHA_GROUP]);
1301 	for (i = 0; i < len; ++i) {
1302 		struct nexthop *nh;
1303 		bool is_fdb_nh;
1304 
1305 		nh = nexthop_find_by_id(net, nhg[i].id);
1306 		if (!nh) {
1307 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1308 			return -EINVAL;
1309 		}
1310 		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1311 			return -EINVAL;
1312 
1313 		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1314 			return -EINVAL;
1315 
1316 		if (!nhg_fdb && is_fdb_nh) {
1317 			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1318 			return -EINVAL;
1319 		}
1320 	}
1321 	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1322 		if (!tb[i])
1323 			continue;
1324 		switch (i) {
1325 		case NHA_HW_STATS_ENABLE:
1326 		case NHA_FDB:
1327 			continue;
1328 		case NHA_RES_GROUP:
1329 			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1330 				continue;
1331 			break;
1332 		}
1333 		NL_SET_ERR_MSG(extack,
1334 			       "No other attributes can be set in nexthop groups");
1335 		return -EINVAL;
1336 	}
1337 
1338 	return 0;
1339 }
1340 
1341 static bool ipv6_good_nh(const struct fib6_nh *nh)
1342 {
1343 	int state = NUD_REACHABLE;
1344 	struct neighbour *n;
1345 
1346 	rcu_read_lock();
1347 
1348 	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1349 	if (n)
1350 		state = READ_ONCE(n->nud_state);
1351 
1352 	rcu_read_unlock();
1353 
1354 	return !!(state & NUD_VALID);
1355 }
1356 
1357 static bool ipv4_good_nh(const struct fib_nh *nh)
1358 {
1359 	int state = NUD_REACHABLE;
1360 	struct neighbour *n;
1361 
1362 	rcu_read_lock();
1363 
1364 	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1365 				      (__force u32)nh->fib_nh_gw4);
1366 	if (n)
1367 		state = READ_ONCE(n->nud_state);
1368 
1369 	rcu_read_unlock();
1370 
1371 	return !!(state & NUD_VALID);
1372 }
1373 
1374 static bool nexthop_is_good_nh(const struct nexthop *nh)
1375 {
1376 	struct nh_info *nhi = rcu_dereference(nh->nh_info);
1377 
1378 	switch (nhi->family) {
1379 	case AF_INET:
1380 		return ipv4_good_nh(&nhi->fib_nh);
1381 	case AF_INET6:
1382 		return ipv6_good_nh(&nhi->fib6_nh);
1383 	}
1384 
1385 	return false;
1386 }
1387 
1388 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1389 {
1390 	int i;
1391 
1392 	for (i = 0; i < nhg->num_nh; i++) {
1393 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1394 
1395 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1396 			continue;
1397 
1398 		nh_grp_entry_stats_inc(nhge);
1399 		return nhge->nh;
1400 	}
1401 
1402 	WARN_ON_ONCE(1);
1403 	return NULL;
1404 }
1405 
1406 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1407 {
1408 	struct nh_grp_entry *nhge0 = NULL;
1409 	int i;
1410 
1411 	if (nhg->fdb_nh)
1412 		return nexthop_select_path_fdb(nhg, hash);
1413 
1414 	for (i = 0; i < nhg->num_nh; ++i) {
1415 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1416 
1417 		/* nexthops always check if it is good and does
1418 		 * not rely on a sysctl for this behavior
1419 		 */
1420 		if (!nexthop_is_good_nh(nhge->nh))
1421 			continue;
1422 
1423 		if (!nhge0)
1424 			nhge0 = nhge;
1425 
1426 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1427 			continue;
1428 
1429 		nh_grp_entry_stats_inc(nhge);
1430 		return nhge->nh;
1431 	}
1432 
1433 	if (!nhge0)
1434 		nhge0 = &nhg->nh_entries[0];
1435 	nh_grp_entry_stats_inc(nhge0);
1436 	return nhge0->nh;
1437 }
1438 
1439 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1440 {
1441 	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1442 	u16 bucket_index = hash % res_table->num_nh_buckets;
1443 	struct nh_res_bucket *bucket;
1444 	struct nh_grp_entry *nhge;
1445 
1446 	/* nexthop_select_path() is expected to return a non-NULL value, so
1447 	 * skip protocol validation and just hand out whatever there is.
1448 	 */
1449 	bucket = &res_table->nh_buckets[bucket_index];
1450 	nh_res_bucket_set_busy(bucket);
1451 	nhge = rcu_dereference(bucket->nh_entry);
1452 	nh_grp_entry_stats_inc(nhge);
1453 	return nhge->nh;
1454 }
1455 
1456 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1457 {
1458 	struct nh_group *nhg;
1459 
1460 	if (!nh->is_group)
1461 		return nh;
1462 
1463 	nhg = rcu_dereference(nh->nh_grp);
1464 	if (nhg->hash_threshold)
1465 		return nexthop_select_path_hthr(nhg, hash);
1466 	else if (nhg->resilient)
1467 		return nexthop_select_path_res(nhg, hash);
1468 
1469 	/* Unreachable. */
1470 	return NULL;
1471 }
1472 EXPORT_SYMBOL_GPL(nexthop_select_path);
1473 
1474 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1475 			     int (*cb)(struct fib6_nh *nh, void *arg),
1476 			     void *arg)
1477 {
1478 	struct nh_info *nhi;
1479 	int err;
1480 
1481 	if (nh->is_group) {
1482 		struct nh_group *nhg;
1483 		int i;
1484 
1485 		nhg = rcu_dereference_rtnl(nh->nh_grp);
1486 		for (i = 0; i < nhg->num_nh; i++) {
1487 			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1488 
1489 			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1490 			err = cb(&nhi->fib6_nh, arg);
1491 			if (err)
1492 				return err;
1493 		}
1494 	} else {
1495 		nhi = rcu_dereference_rtnl(nh->nh_info);
1496 		err = cb(&nhi->fib6_nh, arg);
1497 		if (err)
1498 			return err;
1499 	}
1500 
1501 	return 0;
1502 }
1503 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1504 
1505 static int check_src_addr(const struct in6_addr *saddr,
1506 			  struct netlink_ext_ack *extack)
1507 {
1508 	if (!ipv6_addr_any(saddr)) {
1509 		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1510 		return -EINVAL;
1511 	}
1512 	return 0;
1513 }
1514 
1515 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1516 		       struct netlink_ext_ack *extack)
1517 {
1518 	struct nh_info *nhi;
1519 	bool is_fdb_nh;
1520 
1521 	/* fib6_src is unique to a fib6_info and limits the ability to cache
1522 	 * routes in fib6_nh within a nexthop that is potentially shared
1523 	 * across multiple fib entries. If the config wants to use source
1524 	 * routing it can not use nexthop objects. mlxsw also does not allow
1525 	 * fib6_src on routes.
1526 	 */
1527 	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1528 		return -EINVAL;
1529 
1530 	if (nh->is_group) {
1531 		struct nh_group *nhg;
1532 
1533 		nhg = rtnl_dereference(nh->nh_grp);
1534 		if (nhg->has_v4)
1535 			goto no_v4_nh;
1536 		is_fdb_nh = nhg->fdb_nh;
1537 	} else {
1538 		nhi = rtnl_dereference(nh->nh_info);
1539 		if (nhi->family == AF_INET)
1540 			goto no_v4_nh;
1541 		is_fdb_nh = nhi->fdb_nh;
1542 	}
1543 
1544 	if (is_fdb_nh) {
1545 		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1546 		return -EINVAL;
1547 	}
1548 
1549 	return 0;
1550 no_v4_nh:
1551 	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1552 	return -EINVAL;
1553 }
1554 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1555 
1556 /* if existing nexthop has ipv6 routes linked to it, need
1557  * to verify this new spec works with ipv6
1558  */
1559 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1560 			      struct netlink_ext_ack *extack)
1561 {
1562 	struct fib6_info *f6i;
1563 
1564 	if (list_empty(&old->f6i_list))
1565 		return 0;
1566 
1567 	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1568 		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1569 			return -EINVAL;
1570 	}
1571 
1572 	return fib6_check_nexthop(new, NULL, extack);
1573 }
1574 
1575 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1576 			       struct netlink_ext_ack *extack)
1577 {
1578 	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1579 		NL_SET_ERR_MSG(extack,
1580 			       "Route with host scope can not have a gateway");
1581 		return -EINVAL;
1582 	}
1583 
1584 	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1585 		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1586 		return -EINVAL;
1587 	}
1588 
1589 	return 0;
1590 }
1591 
1592 /* Invoked by fib add code to verify nexthop by id is ok with
1593  * config for prefix; parts of fib_check_nh not done when nexthop
1594  * object is used.
1595  */
1596 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1597 		      struct netlink_ext_ack *extack)
1598 {
1599 	struct nh_info *nhi;
1600 	int err = 0;
1601 
1602 	if (nh->is_group) {
1603 		struct nh_group *nhg;
1604 
1605 		nhg = rtnl_dereference(nh->nh_grp);
1606 		if (nhg->fdb_nh) {
1607 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1608 			err = -EINVAL;
1609 			goto out;
1610 		}
1611 
1612 		if (scope == RT_SCOPE_HOST) {
1613 			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1614 			err = -EINVAL;
1615 			goto out;
1616 		}
1617 
1618 		/* all nexthops in a group have the same scope */
1619 		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1620 		err = nexthop_check_scope(nhi, scope, extack);
1621 	} else {
1622 		nhi = rtnl_dereference(nh->nh_info);
1623 		if (nhi->fdb_nh) {
1624 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1625 			err = -EINVAL;
1626 			goto out;
1627 		}
1628 		err = nexthop_check_scope(nhi, scope, extack);
1629 	}
1630 
1631 out:
1632 	return err;
1633 }
1634 
1635 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1636 			     struct netlink_ext_ack *extack)
1637 {
1638 	struct fib_info *fi;
1639 
1640 	list_for_each_entry(fi, &old->fi_list, nh_list) {
1641 		int err;
1642 
1643 		err = fib_check_nexthop(new, fi->fib_scope, extack);
1644 		if (err)
1645 			return err;
1646 	}
1647 	return 0;
1648 }
1649 
1650 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1651 {
1652 	return nhge->res.count_buckets == nhge->res.wants_buckets;
1653 }
1654 
1655 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1656 {
1657 	return nhge->res.count_buckets > nhge->res.wants_buckets;
1658 }
1659 
1660 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1661 {
1662 	return nhge->res.count_buckets < nhge->res.wants_buckets;
1663 }
1664 
1665 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1666 {
1667 	return list_empty(&res_table->uw_nh_entries);
1668 }
1669 
1670 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1671 {
1672 	struct nh_grp_entry *nhge;
1673 
1674 	if (bucket->occupied) {
1675 		nhge = nh_res_dereference(bucket->nh_entry);
1676 		nhge->res.count_buckets--;
1677 		bucket->occupied = false;
1678 	}
1679 }
1680 
1681 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1682 				 struct nh_grp_entry *nhge)
1683 {
1684 	nh_res_bucket_unset_nh(bucket);
1685 
1686 	bucket->occupied = true;
1687 	rcu_assign_pointer(bucket->nh_entry, nhge);
1688 	nhge->res.count_buckets++;
1689 }
1690 
1691 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1692 					 struct nh_res_bucket *bucket,
1693 					 unsigned long *deadline, bool *force)
1694 {
1695 	unsigned long now = jiffies;
1696 	struct nh_grp_entry *nhge;
1697 	unsigned long idle_point;
1698 
1699 	if (!bucket->occupied) {
1700 		/* The bucket is not occupied, its NHGE pointer is either
1701 		 * NULL or obsolete. We _have to_ migrate: set force.
1702 		 */
1703 		*force = true;
1704 		return true;
1705 	}
1706 
1707 	nhge = nh_res_dereference(bucket->nh_entry);
1708 
1709 	/* If the bucket is populated by an underweight or balanced
1710 	 * nexthop, do not migrate.
1711 	 */
1712 	if (!nh_res_nhge_is_ow(nhge))
1713 		return false;
1714 
1715 	/* At this point we know that the bucket is populated with an
1716 	 * overweight nexthop. It needs to be migrated to a new nexthop if
1717 	 * the idle timer of unbalanced timer expired.
1718 	 */
1719 
1720 	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1721 	if (time_after_eq(now, idle_point)) {
1722 		/* The bucket is idle. We _can_ migrate: unset force. */
1723 		*force = false;
1724 		return true;
1725 	}
1726 
1727 	/* Unbalanced timer of 0 means "never force". */
1728 	if (res_table->unbalanced_timer) {
1729 		unsigned long unb_point;
1730 
1731 		unb_point = nh_res_table_unb_point(res_table);
1732 		if (time_after(now, unb_point)) {
1733 			/* The bucket is not idle, but the unbalanced timer
1734 			 * expired. We _can_ migrate, but set force anyway,
1735 			 * so that drivers know to ignore activity reports
1736 			 * from the HW.
1737 			 */
1738 			*force = true;
1739 			return true;
1740 		}
1741 
1742 		nh_res_time_set_deadline(unb_point, deadline);
1743 	}
1744 
1745 	nh_res_time_set_deadline(idle_point, deadline);
1746 	return false;
1747 }
1748 
1749 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1750 				  u16 bucket_index, bool notify,
1751 				  bool notify_nl, bool force)
1752 {
1753 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1754 	struct nh_grp_entry *new_nhge;
1755 	struct netlink_ext_ack extack;
1756 	int err;
1757 
1758 	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1759 					    struct nh_grp_entry,
1760 					    res.uw_nh_entry);
1761 	if (WARN_ON_ONCE(!new_nhge))
1762 		/* If this function is called, "bucket" is either not
1763 		 * occupied, or it belongs to a next hop that is
1764 		 * overweight. In either case, there ought to be a
1765 		 * corresponding underweight next hop.
1766 		 */
1767 		return false;
1768 
1769 	if (notify) {
1770 		struct nh_grp_entry *old_nhge;
1771 
1772 		old_nhge = nh_res_dereference(bucket->nh_entry);
1773 		err = call_nexthop_res_bucket_notifiers(res_table->net,
1774 							res_table->nhg_id,
1775 							bucket_index, force,
1776 							old_nhge->nh,
1777 							new_nhge->nh, &extack);
1778 		if (err) {
1779 			pr_err_ratelimited("%s\n", extack._msg);
1780 			if (!force)
1781 				return false;
1782 			/* It is not possible to veto a forced replacement, so
1783 			 * just clear the hardware flags from the nexthop
1784 			 * bucket to indicate to user space that this bucket is
1785 			 * not correctly populated in hardware.
1786 			 */
1787 			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1788 		}
1789 	}
1790 
1791 	nh_res_bucket_set_nh(bucket, new_nhge);
1792 	nh_res_bucket_set_idle(res_table, bucket);
1793 
1794 	if (notify_nl)
1795 		nexthop_bucket_notify(res_table, bucket_index);
1796 
1797 	if (nh_res_nhge_is_balanced(new_nhge))
1798 		list_del(&new_nhge->res.uw_nh_entry);
1799 	return true;
1800 }
1801 
1802 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1803 
1804 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1805 				bool notify, bool notify_nl)
1806 {
1807 	unsigned long now = jiffies;
1808 	unsigned long deadline;
1809 	u16 i;
1810 
1811 	/* Deadline is the next time that upkeep should be run. It is the
1812 	 * earliest time at which one of the buckets might be migrated.
1813 	 * Start at the most pessimistic estimate: either unbalanced_timer
1814 	 * from now, or if there is none, idle_timer from now. For each
1815 	 * encountered time point, call nh_res_time_set_deadline() to
1816 	 * refine the estimate.
1817 	 */
1818 	if (res_table->unbalanced_timer)
1819 		deadline = now + res_table->unbalanced_timer;
1820 	else
1821 		deadline = now + res_table->idle_timer;
1822 
1823 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1824 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1825 		bool force;
1826 
1827 		if (nh_res_bucket_should_migrate(res_table, bucket,
1828 						 &deadline, &force)) {
1829 			if (!nh_res_bucket_migrate(res_table, i, notify,
1830 						   notify_nl, force)) {
1831 				unsigned long idle_point;
1832 
1833 				/* A driver can override the migration
1834 				 * decision if the HW reports that the
1835 				 * bucket is actually not idle. Therefore
1836 				 * remark the bucket as busy again and
1837 				 * update the deadline.
1838 				 */
1839 				nh_res_bucket_set_busy(bucket);
1840 				idle_point = nh_res_bucket_idle_point(res_table,
1841 								      bucket,
1842 								      now);
1843 				nh_res_time_set_deadline(idle_point, &deadline);
1844 			}
1845 		}
1846 	}
1847 
1848 	/* If the group is still unbalanced, schedule the next upkeep to
1849 	 * either the deadline computed above, or the minimum deadline,
1850 	 * whichever comes later.
1851 	 */
1852 	if (!nh_res_table_is_balanced(res_table)) {
1853 		unsigned long now = jiffies;
1854 		unsigned long min_deadline;
1855 
1856 		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1857 		if (time_before(deadline, min_deadline))
1858 			deadline = min_deadline;
1859 
1860 		queue_delayed_work(system_power_efficient_wq,
1861 				   &res_table->upkeep_dw, deadline - now);
1862 	}
1863 }
1864 
1865 static void nh_res_table_upkeep_dw(struct work_struct *work)
1866 {
1867 	struct delayed_work *dw = to_delayed_work(work);
1868 	struct nh_res_table *res_table;
1869 
1870 	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1871 	nh_res_table_upkeep(res_table, true, true);
1872 }
1873 
1874 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1875 {
1876 	cancel_delayed_work_sync(&res_table->upkeep_dw);
1877 }
1878 
1879 static void nh_res_group_rebalance(struct nh_group *nhg,
1880 				   struct nh_res_table *res_table)
1881 {
1882 	int prev_upper_bound = 0;
1883 	int total = 0;
1884 	int w = 0;
1885 	int i;
1886 
1887 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1888 
1889 	for (i = 0; i < nhg->num_nh; ++i)
1890 		total += nhg->nh_entries[i].weight;
1891 
1892 	for (i = 0; i < nhg->num_nh; ++i) {
1893 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1894 		int upper_bound;
1895 
1896 		w += nhge->weight;
1897 		upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1898 						total);
1899 		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1900 		prev_upper_bound = upper_bound;
1901 
1902 		if (nh_res_nhge_is_uw(nhge)) {
1903 			if (list_empty(&res_table->uw_nh_entries))
1904 				res_table->unbalanced_since = jiffies;
1905 			list_add(&nhge->res.uw_nh_entry,
1906 				 &res_table->uw_nh_entries);
1907 		}
1908 	}
1909 }
1910 
1911 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1912  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1913  * entry in NHG as not occupied.
1914  */
1915 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1916 					 struct nh_group *nhg)
1917 {
1918 	u16 i;
1919 
1920 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1921 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1922 		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1923 		bool found = false;
1924 		int j;
1925 
1926 		for (j = 0; j < nhg->num_nh; j++) {
1927 			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1928 
1929 			if (nhge->nh->id == id) {
1930 				nh_res_bucket_set_nh(bucket, nhge);
1931 				found = true;
1932 				break;
1933 			}
1934 		}
1935 
1936 		if (!found)
1937 			nh_res_bucket_unset_nh(bucket);
1938 	}
1939 }
1940 
1941 static void replace_nexthop_grp_res(struct nh_group *oldg,
1942 				    struct nh_group *newg)
1943 {
1944 	/* For NH group replacement, the new NHG might only have a stub
1945 	 * hash table with 0 buckets, because the number of buckets was not
1946 	 * specified. For NH removal, oldg and newg both reference the same
1947 	 * res_table. So in any case, in the following, we want to work
1948 	 * with oldg->res_table.
1949 	 */
1950 	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1951 	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1952 	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1953 
1954 	nh_res_table_cancel_upkeep(old_res_table);
1955 	nh_res_table_migrate_buckets(old_res_table, newg);
1956 	nh_res_group_rebalance(newg, old_res_table);
1957 	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1958 		old_res_table->unbalanced_since = prev_unbalanced_since;
1959 	nh_res_table_upkeep(old_res_table, true, false);
1960 }
1961 
1962 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1963 {
1964 	int total = 0;
1965 	int w = 0;
1966 	int i;
1967 
1968 	for (i = 0; i < nhg->num_nh; ++i)
1969 		total += nhg->nh_entries[i].weight;
1970 
1971 	for (i = 0; i < nhg->num_nh; ++i) {
1972 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1973 		int upper_bound;
1974 
1975 		w += nhge->weight;
1976 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1977 		atomic_set(&nhge->hthr.upper_bound, upper_bound);
1978 	}
1979 }
1980 
1981 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1982 				struct nl_info *nlinfo)
1983 {
1984 	struct nh_grp_entry *nhges, *new_nhges;
1985 	struct nexthop *nhp = nhge->nh_parent;
1986 	struct netlink_ext_ack extack;
1987 	struct nexthop *nh = nhge->nh;
1988 	struct nh_group *nhg, *newg;
1989 	int i, j, err;
1990 
1991 	WARN_ON(!nh);
1992 
1993 	nhg = rtnl_dereference(nhp->nh_grp);
1994 	newg = nhg->spare;
1995 
1996 	/* last entry, keep it visible and remove the parent */
1997 	if (nhg->num_nh == 1) {
1998 		remove_nexthop(net, nhp, nlinfo);
1999 		return;
2000 	}
2001 
2002 	newg->has_v4 = false;
2003 	newg->is_multipath = nhg->is_multipath;
2004 	newg->hash_threshold = nhg->hash_threshold;
2005 	newg->resilient = nhg->resilient;
2006 	newg->fdb_nh = nhg->fdb_nh;
2007 	newg->num_nh = nhg->num_nh;
2008 
2009 	/* copy old entries to new except the one getting removed */
2010 	nhges = nhg->nh_entries;
2011 	new_nhges = newg->nh_entries;
2012 	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2013 		struct nh_info *nhi;
2014 
2015 		/* current nexthop getting removed */
2016 		if (nhg->nh_entries[i].nh == nh) {
2017 			newg->num_nh--;
2018 			continue;
2019 		}
2020 
2021 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2022 		if (nhi->family == AF_INET)
2023 			newg->has_v4 = true;
2024 
2025 		list_del(&nhges[i].nh_list);
2026 		new_nhges[j].stats = nhges[i].stats;
2027 		new_nhges[j].nh_parent = nhges[i].nh_parent;
2028 		new_nhges[j].nh = nhges[i].nh;
2029 		new_nhges[j].weight = nhges[i].weight;
2030 		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2031 		j++;
2032 	}
2033 
2034 	if (newg->hash_threshold)
2035 		nh_hthr_group_rebalance(newg);
2036 	else if (newg->resilient)
2037 		replace_nexthop_grp_res(nhg, newg);
2038 
2039 	rcu_assign_pointer(nhp->nh_grp, newg);
2040 
2041 	list_del(&nhge->nh_list);
2042 	free_percpu(nhge->stats);
2043 	nexthop_put(nhge->nh);
2044 
2045 	/* Removal of a NH from a resilient group is notified through
2046 	 * bucket notifications.
2047 	 */
2048 	if (newg->hash_threshold) {
2049 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2050 					     &extack);
2051 		if (err)
2052 			pr_err("%s\n", extack._msg);
2053 	}
2054 
2055 	if (nlinfo)
2056 		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2057 }
2058 
2059 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2060 				       struct nl_info *nlinfo)
2061 {
2062 	struct nh_grp_entry *nhge, *tmp;
2063 
2064 	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2065 		remove_nh_grp_entry(net, nhge, nlinfo);
2066 
2067 	/* make sure all see the newly published array before releasing rtnl */
2068 	synchronize_net();
2069 }
2070 
2071 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2072 {
2073 	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2074 	struct nh_res_table *res_table;
2075 	int i, num_nh = nhg->num_nh;
2076 
2077 	for (i = 0; i < num_nh; ++i) {
2078 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2079 
2080 		if (WARN_ON(!nhge->nh))
2081 			continue;
2082 
2083 		list_del_init(&nhge->nh_list);
2084 	}
2085 
2086 	if (nhg->resilient) {
2087 		res_table = rtnl_dereference(nhg->res_table);
2088 		nh_res_table_cancel_upkeep(res_table);
2089 	}
2090 }
2091 
2092 /* not called for nexthop replace */
2093 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2094 {
2095 	struct fib6_info *f6i, *tmp;
2096 	bool do_flush = false;
2097 	struct fib_info *fi;
2098 
2099 	list_for_each_entry(fi, &nh->fi_list, nh_list) {
2100 		fi->fib_flags |= RTNH_F_DEAD;
2101 		do_flush = true;
2102 	}
2103 	if (do_flush)
2104 		fib_flush(net);
2105 
2106 	/* ip6_del_rt removes the entry from this list hence the _safe */
2107 	list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
2108 		/* __ip6_del_rt does a release, so do a hold here */
2109 		fib6_info_hold(f6i);
2110 		ipv6_stub->ip6_del_rt(net, f6i,
2111 				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2112 	}
2113 }
2114 
2115 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2116 			     struct nl_info *nlinfo)
2117 {
2118 	__remove_nexthop_fib(net, nh);
2119 
2120 	if (nh->is_group) {
2121 		remove_nexthop_group(nh, nlinfo);
2122 	} else {
2123 		struct nh_info *nhi;
2124 
2125 		nhi = rtnl_dereference(nh->nh_info);
2126 		if (nhi->fib_nhc.nhc_dev)
2127 			hlist_del(&nhi->dev_hash);
2128 
2129 		remove_nexthop_from_groups(net, nh, nlinfo);
2130 	}
2131 }
2132 
2133 static void remove_nexthop(struct net *net, struct nexthop *nh,
2134 			   struct nl_info *nlinfo)
2135 {
2136 	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2137 
2138 	/* remove from the tree */
2139 	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2140 
2141 	if (nlinfo)
2142 		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2143 
2144 	__remove_nexthop(net, nh, nlinfo);
2145 	nh_base_seq_inc(net);
2146 
2147 	nexthop_put(nh);
2148 }
2149 
2150 /* if any FIB entries reference this nexthop, any dst entries
2151  * need to be regenerated
2152  */
2153 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2154 			      struct nexthop *replaced_nh)
2155 {
2156 	struct fib6_info *f6i;
2157 	struct nh_group *nhg;
2158 	int i;
2159 
2160 	if (!list_empty(&nh->fi_list))
2161 		rt_cache_flush(net);
2162 
2163 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2164 		ipv6_stub->fib6_update_sernum(net, f6i);
2165 
2166 	/* if an IPv6 group was replaced, we have to release all old
2167 	 * dsts to make sure all refcounts are released
2168 	 */
2169 	if (!replaced_nh->is_group)
2170 		return;
2171 
2172 	nhg = rtnl_dereference(replaced_nh->nh_grp);
2173 	for (i = 0; i < nhg->num_nh; i++) {
2174 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2175 		struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2176 
2177 		if (nhi->family == AF_INET6)
2178 			ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2179 	}
2180 }
2181 
2182 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2183 			       struct nexthop *new, const struct nh_config *cfg,
2184 			       struct netlink_ext_ack *extack)
2185 {
2186 	struct nh_res_table *tmp_table = NULL;
2187 	struct nh_res_table *new_res_table;
2188 	struct nh_res_table *old_res_table;
2189 	struct nh_group *oldg, *newg;
2190 	int i, err;
2191 
2192 	if (!new->is_group) {
2193 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2194 		return -EINVAL;
2195 	}
2196 
2197 	oldg = rtnl_dereference(old->nh_grp);
2198 	newg = rtnl_dereference(new->nh_grp);
2199 
2200 	if (newg->hash_threshold != oldg->hash_threshold) {
2201 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2202 		return -EINVAL;
2203 	}
2204 
2205 	if (newg->hash_threshold) {
2206 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2207 					     extack);
2208 		if (err)
2209 			return err;
2210 	} else if (newg->resilient) {
2211 		new_res_table = rtnl_dereference(newg->res_table);
2212 		old_res_table = rtnl_dereference(oldg->res_table);
2213 
2214 		/* Accept if num_nh_buckets was not given, but if it was
2215 		 * given, demand that the value be correct.
2216 		 */
2217 		if (cfg->nh_grp_res_has_num_buckets &&
2218 		    cfg->nh_grp_res_num_buckets !=
2219 		    old_res_table->num_nh_buckets) {
2220 			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2221 			return -EINVAL;
2222 		}
2223 
2224 		/* Emit a pre-replace notification so that listeners could veto
2225 		 * a potentially unsupported configuration. Otherwise,
2226 		 * individual bucket replacement notifications would need to be
2227 		 * vetoed, which is something that should only happen if the
2228 		 * bucket is currently active.
2229 		 */
2230 		err = call_nexthop_res_table_notifiers(net, new, extack);
2231 		if (err)
2232 			return err;
2233 
2234 		if (cfg->nh_grp_res_has_idle_timer)
2235 			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2236 		if (cfg->nh_grp_res_has_unbalanced_timer)
2237 			old_res_table->unbalanced_timer =
2238 				cfg->nh_grp_res_unbalanced_timer;
2239 
2240 		replace_nexthop_grp_res(oldg, newg);
2241 
2242 		tmp_table = new_res_table;
2243 		rcu_assign_pointer(newg->res_table, old_res_table);
2244 		rcu_assign_pointer(newg->spare->res_table, old_res_table);
2245 	}
2246 
2247 	/* update parents - used by nexthop code for cleanup */
2248 	for (i = 0; i < newg->num_nh; i++)
2249 		newg->nh_entries[i].nh_parent = old;
2250 
2251 	rcu_assign_pointer(old->nh_grp, newg);
2252 
2253 	/* Make sure concurrent readers are not using 'oldg' anymore. */
2254 	synchronize_net();
2255 
2256 	if (newg->resilient) {
2257 		rcu_assign_pointer(oldg->res_table, tmp_table);
2258 		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2259 	}
2260 
2261 	for (i = 0; i < oldg->num_nh; i++)
2262 		oldg->nh_entries[i].nh_parent = new;
2263 
2264 	rcu_assign_pointer(new->nh_grp, oldg);
2265 
2266 	return 0;
2267 }
2268 
2269 static void nh_group_v4_update(struct nh_group *nhg)
2270 {
2271 	struct nh_grp_entry *nhges;
2272 	bool has_v4 = false;
2273 	int i;
2274 
2275 	nhges = nhg->nh_entries;
2276 	for (i = 0; i < nhg->num_nh; i++) {
2277 		struct nh_info *nhi;
2278 
2279 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2280 		if (nhi->family == AF_INET)
2281 			has_v4 = true;
2282 	}
2283 	nhg->has_v4 = has_v4;
2284 }
2285 
2286 static int replace_nexthop_single_notify_res(struct net *net,
2287 					     struct nh_res_table *res_table,
2288 					     struct nexthop *old,
2289 					     struct nh_info *oldi,
2290 					     struct nh_info *newi,
2291 					     struct netlink_ext_ack *extack)
2292 {
2293 	u32 nhg_id = res_table->nhg_id;
2294 	int err;
2295 	u16 i;
2296 
2297 	for (i = 0; i < res_table->num_nh_buckets; i++) {
2298 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2299 		struct nh_grp_entry *nhge;
2300 
2301 		nhge = rtnl_dereference(bucket->nh_entry);
2302 		if (nhge->nh == old) {
2303 			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2304 								  i, true,
2305 								  oldi, newi,
2306 								  extack);
2307 			if (err)
2308 				goto err_notify;
2309 		}
2310 	}
2311 
2312 	return 0;
2313 
2314 err_notify:
2315 	while (i-- > 0) {
2316 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2317 		struct nh_grp_entry *nhge;
2318 
2319 		nhge = rtnl_dereference(bucket->nh_entry);
2320 		if (nhge->nh == old)
2321 			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2322 							    true, newi, oldi,
2323 							    extack);
2324 	}
2325 	return err;
2326 }
2327 
2328 static int replace_nexthop_single_notify(struct net *net,
2329 					 struct nexthop *group_nh,
2330 					 struct nexthop *old,
2331 					 struct nh_info *oldi,
2332 					 struct nh_info *newi,
2333 					 struct netlink_ext_ack *extack)
2334 {
2335 	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2336 	struct nh_res_table *res_table;
2337 
2338 	if (nhg->hash_threshold) {
2339 		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2340 					      group_nh, extack);
2341 	} else if (nhg->resilient) {
2342 		res_table = rtnl_dereference(nhg->res_table);
2343 		return replace_nexthop_single_notify_res(net, res_table,
2344 							 old, oldi, newi,
2345 							 extack);
2346 	}
2347 
2348 	return -EINVAL;
2349 }
2350 
2351 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2352 				  struct nexthop *new,
2353 				  struct netlink_ext_ack *extack)
2354 {
2355 	u8 old_protocol, old_nh_flags;
2356 	struct nh_info *oldi, *newi;
2357 	struct nh_grp_entry *nhge;
2358 	int err;
2359 
2360 	if (new->is_group) {
2361 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2362 		return -EINVAL;
2363 	}
2364 
2365 	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2366 	if (err)
2367 		return err;
2368 
2369 	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2370 	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2371 	 */
2372 	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2373 
2374 	oldi = rtnl_dereference(old->nh_info);
2375 	newi = rtnl_dereference(new->nh_info);
2376 
2377 	newi->nh_parent = old;
2378 	oldi->nh_parent = new;
2379 
2380 	old_protocol = old->protocol;
2381 	old_nh_flags = old->nh_flags;
2382 
2383 	old->protocol = new->protocol;
2384 	old->nh_flags = new->nh_flags;
2385 
2386 	rcu_assign_pointer(old->nh_info, newi);
2387 	rcu_assign_pointer(new->nh_info, oldi);
2388 
2389 	/* Send a replace notification for all the groups using the nexthop. */
2390 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2391 		struct nexthop *nhp = nhge->nh_parent;
2392 
2393 		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2394 						    extack);
2395 		if (err)
2396 			goto err_notify;
2397 	}
2398 
2399 	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2400 	 * update IPv4 indication in all the groups using the nexthop.
2401 	 */
2402 	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2403 		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2404 			struct nexthop *nhp = nhge->nh_parent;
2405 			struct nh_group *nhg;
2406 
2407 			nhg = rtnl_dereference(nhp->nh_grp);
2408 			nh_group_v4_update(nhg);
2409 		}
2410 	}
2411 
2412 	return 0;
2413 
2414 err_notify:
2415 	rcu_assign_pointer(new->nh_info, newi);
2416 	rcu_assign_pointer(old->nh_info, oldi);
2417 	old->nh_flags = old_nh_flags;
2418 	old->protocol = old_protocol;
2419 	oldi->nh_parent = old;
2420 	newi->nh_parent = new;
2421 	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2422 		struct nexthop *nhp = nhge->nh_parent;
2423 
2424 		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2425 	}
2426 	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2427 	return err;
2428 }
2429 
2430 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2431 				     struct nl_info *info)
2432 {
2433 	struct fib6_info *f6i;
2434 
2435 	if (!list_empty(&nh->fi_list)) {
2436 		struct fib_info *fi;
2437 
2438 		/* expectation is a few fib_info per nexthop and then
2439 		 * a lot of routes per fib_info. So mark the fib_info
2440 		 * and then walk the fib tables once
2441 		 */
2442 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2443 			fi->nh_updated = true;
2444 
2445 		fib_info_notify_update(net, info);
2446 
2447 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2448 			fi->nh_updated = false;
2449 	}
2450 
2451 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2452 		ipv6_stub->fib6_rt_update(net, f6i, info);
2453 }
2454 
2455 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2456  * linked to this nexthop and for all groups that the nexthop
2457  * is a member of
2458  */
2459 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2460 				   struct nl_info *info)
2461 {
2462 	struct nh_grp_entry *nhge;
2463 
2464 	__nexthop_replace_notify(net, nh, info);
2465 
2466 	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2467 		__nexthop_replace_notify(net, nhge->nh_parent, info);
2468 }
2469 
2470 static int replace_nexthop(struct net *net, struct nexthop *old,
2471 			   struct nexthop *new, const struct nh_config *cfg,
2472 			   struct netlink_ext_ack *extack)
2473 {
2474 	bool new_is_reject = false;
2475 	struct nh_grp_entry *nhge;
2476 	int err;
2477 
2478 	/* check that existing FIB entries are ok with the
2479 	 * new nexthop definition
2480 	 */
2481 	err = fib_check_nh_list(old, new, extack);
2482 	if (err)
2483 		return err;
2484 
2485 	err = fib6_check_nh_list(old, new, extack);
2486 	if (err)
2487 		return err;
2488 
2489 	if (!new->is_group) {
2490 		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2491 
2492 		new_is_reject = nhi->reject_nh;
2493 	}
2494 
2495 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2496 		/* if new nexthop is a blackhole, any groups using this
2497 		 * nexthop cannot have more than 1 path
2498 		 */
2499 		if (new_is_reject &&
2500 		    nexthop_num_path(nhge->nh_parent) > 1) {
2501 			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2502 			return -EINVAL;
2503 		}
2504 
2505 		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2506 		if (err)
2507 			return err;
2508 
2509 		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2510 		if (err)
2511 			return err;
2512 	}
2513 
2514 	if (old->is_group)
2515 		err = replace_nexthop_grp(net, old, new, cfg, extack);
2516 	else
2517 		err = replace_nexthop_single(net, old, new, extack);
2518 
2519 	if (!err) {
2520 		nh_rt_cache_flush(net, old, new);
2521 
2522 		__remove_nexthop(net, new, NULL);
2523 		nexthop_put(new);
2524 	}
2525 
2526 	return err;
2527 }
2528 
2529 /* called with rtnl_lock held */
2530 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2531 			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2532 {
2533 	struct rb_node **pp, *parent = NULL, *next;
2534 	struct rb_root *root = &net->nexthop.rb_root;
2535 	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2536 	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2537 	u32 new_id = new_nh->id;
2538 	int replace_notify = 0;
2539 	int rc = -EEXIST;
2540 
2541 	pp = &root->rb_node;
2542 	while (1) {
2543 		struct nexthop *nh;
2544 
2545 		next = *pp;
2546 		if (!next)
2547 			break;
2548 
2549 		parent = next;
2550 
2551 		nh = rb_entry(parent, struct nexthop, rb_node);
2552 		if (new_id < nh->id) {
2553 			pp = &next->rb_left;
2554 		} else if (new_id > nh->id) {
2555 			pp = &next->rb_right;
2556 		} else if (replace) {
2557 			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2558 			if (!rc) {
2559 				new_nh = nh; /* send notification with old nh */
2560 				replace_notify = 1;
2561 			}
2562 			goto out;
2563 		} else {
2564 			/* id already exists and not a replace */
2565 			goto out;
2566 		}
2567 	}
2568 
2569 	if (replace && !create) {
2570 		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2571 		rc = -ENOENT;
2572 		goto out;
2573 	}
2574 
2575 	if (new_nh->is_group) {
2576 		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2577 		struct nh_res_table *res_table;
2578 
2579 		if (nhg->resilient) {
2580 			res_table = rtnl_dereference(nhg->res_table);
2581 
2582 			/* Not passing the number of buckets is OK when
2583 			 * replacing, but not when creating a new group.
2584 			 */
2585 			if (!cfg->nh_grp_res_has_num_buckets) {
2586 				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2587 				rc = -EINVAL;
2588 				goto out;
2589 			}
2590 
2591 			nh_res_group_rebalance(nhg, res_table);
2592 
2593 			/* Do not send bucket notifications, we do full
2594 			 * notification below.
2595 			 */
2596 			nh_res_table_upkeep(res_table, false, false);
2597 		}
2598 	}
2599 
2600 	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2601 	rb_insert_color(&new_nh->rb_node, root);
2602 
2603 	/* The initial insertion is a full notification for hash-threshold as
2604 	 * well as resilient groups.
2605 	 */
2606 	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2607 	if (rc)
2608 		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2609 
2610 out:
2611 	if (!rc) {
2612 		nh_base_seq_inc(net);
2613 		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2614 		if (replace_notify &&
2615 		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2616 			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2617 	}
2618 
2619 	return rc;
2620 }
2621 
2622 /* rtnl */
2623 /* remove all nexthops tied to a device being deleted */
2624 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2625 {
2626 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2627 	struct net *net = dev_net(dev);
2628 	struct hlist_head *head = &net->nexthop.devhash[hash];
2629 	struct hlist_node *n;
2630 	struct nh_info *nhi;
2631 
2632 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2633 		if (nhi->fib_nhc.nhc_dev != dev)
2634 			continue;
2635 
2636 		if (nhi->reject_nh &&
2637 		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2638 			continue;
2639 
2640 		remove_nexthop(net, nhi->nh_parent, NULL);
2641 	}
2642 }
2643 
2644 /* rtnl; called when net namespace is deleted */
2645 static void flush_all_nexthops(struct net *net)
2646 {
2647 	struct rb_root *root = &net->nexthop.rb_root;
2648 	struct rb_node *node;
2649 	struct nexthop *nh;
2650 
2651 	while ((node = rb_first(root))) {
2652 		nh = rb_entry(node, struct nexthop, rb_node);
2653 		remove_nexthop(net, nh, NULL);
2654 		cond_resched();
2655 	}
2656 }
2657 
2658 static struct nexthop *nexthop_create_group(struct net *net,
2659 					    struct nh_config *cfg)
2660 {
2661 	struct nlattr *grps_attr = cfg->nh_grp;
2662 	struct nexthop_grp *entry = nla_data(grps_attr);
2663 	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2664 	struct nh_group *nhg;
2665 	struct nexthop *nh;
2666 	int err;
2667 	int i;
2668 
2669 	if (WARN_ON(!num_nh))
2670 		return ERR_PTR(-EINVAL);
2671 
2672 	nh = nexthop_alloc();
2673 	if (!nh)
2674 		return ERR_PTR(-ENOMEM);
2675 
2676 	nh->is_group = 1;
2677 
2678 	nhg = nexthop_grp_alloc(num_nh);
2679 	if (!nhg) {
2680 		kfree(nh);
2681 		return ERR_PTR(-ENOMEM);
2682 	}
2683 
2684 	/* spare group used for removals */
2685 	nhg->spare = nexthop_grp_alloc(num_nh);
2686 	if (!nhg->spare) {
2687 		kfree(nhg);
2688 		kfree(nh);
2689 		return ERR_PTR(-ENOMEM);
2690 	}
2691 	nhg->spare->spare = nhg;
2692 
2693 	for (i = 0; i < nhg->num_nh; ++i) {
2694 		struct nexthop *nhe;
2695 		struct nh_info *nhi;
2696 
2697 		nhe = nexthop_find_by_id(net, entry[i].id);
2698 		if (!nexthop_get(nhe)) {
2699 			err = -ENOENT;
2700 			goto out_no_nh;
2701 		}
2702 
2703 		nhi = rtnl_dereference(nhe->nh_info);
2704 		if (nhi->family == AF_INET)
2705 			nhg->has_v4 = true;
2706 
2707 		nhg->nh_entries[i].stats =
2708 			netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2709 		if (!nhg->nh_entries[i].stats) {
2710 			err = -ENOMEM;
2711 			nexthop_put(nhe);
2712 			goto out_no_nh;
2713 		}
2714 		nhg->nh_entries[i].nh = nhe;
2715 		nhg->nh_entries[i].weight = entry[i].weight + 1;
2716 		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2717 		nhg->nh_entries[i].nh_parent = nh;
2718 	}
2719 
2720 	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2721 		nhg->hash_threshold = 1;
2722 		nhg->is_multipath = true;
2723 	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2724 		struct nh_res_table *res_table;
2725 
2726 		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2727 		if (!res_table) {
2728 			err = -ENOMEM;
2729 			goto out_no_nh;
2730 		}
2731 
2732 		rcu_assign_pointer(nhg->spare->res_table, res_table);
2733 		rcu_assign_pointer(nhg->res_table, res_table);
2734 		nhg->resilient = true;
2735 		nhg->is_multipath = true;
2736 	}
2737 
2738 	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2739 
2740 	if (nhg->hash_threshold)
2741 		nh_hthr_group_rebalance(nhg);
2742 
2743 	if (cfg->nh_fdb)
2744 		nhg->fdb_nh = 1;
2745 
2746 	if (cfg->nh_hw_stats)
2747 		nhg->hw_stats = true;
2748 
2749 	rcu_assign_pointer(nh->nh_grp, nhg);
2750 
2751 	return nh;
2752 
2753 out_no_nh:
2754 	for (i--; i >= 0; --i) {
2755 		list_del(&nhg->nh_entries[i].nh_list);
2756 		free_percpu(nhg->nh_entries[i].stats);
2757 		nexthop_put(nhg->nh_entries[i].nh);
2758 	}
2759 
2760 	kfree(nhg->spare);
2761 	kfree(nhg);
2762 	kfree(nh);
2763 
2764 	return ERR_PTR(err);
2765 }
2766 
2767 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2768 			  struct nh_info *nhi, struct nh_config *cfg,
2769 			  struct netlink_ext_ack *extack)
2770 {
2771 	struct fib_nh *fib_nh = &nhi->fib_nh;
2772 	struct fib_config fib_cfg = {
2773 		.fc_oif   = cfg->nh_ifindex,
2774 		.fc_gw4   = cfg->gw.ipv4,
2775 		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2776 		.fc_flags = cfg->nh_flags,
2777 		.fc_nlinfo = cfg->nlinfo,
2778 		.fc_encap = cfg->nh_encap,
2779 		.fc_encap_type = cfg->nh_encap_type,
2780 	};
2781 	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2782 	int err;
2783 
2784 	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2785 	if (err) {
2786 		fib_nh_release(net, fib_nh);
2787 		goto out;
2788 	}
2789 
2790 	if (nhi->fdb_nh)
2791 		goto out;
2792 
2793 	/* sets nh_dev if successful */
2794 	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2795 	if (!err) {
2796 		nh->nh_flags = fib_nh->fib_nh_flags;
2797 		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2798 					  !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2799 	} else {
2800 		fib_nh_release(net, fib_nh);
2801 	}
2802 out:
2803 	return err;
2804 }
2805 
2806 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2807 			  struct nh_info *nhi, struct nh_config *cfg,
2808 			  struct netlink_ext_ack *extack)
2809 {
2810 	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2811 	struct fib6_config fib6_cfg = {
2812 		.fc_table = l3mdev_fib_table(cfg->dev),
2813 		.fc_ifindex = cfg->nh_ifindex,
2814 		.fc_gateway = cfg->gw.ipv6,
2815 		.fc_flags = cfg->nh_flags,
2816 		.fc_nlinfo = cfg->nlinfo,
2817 		.fc_encap = cfg->nh_encap,
2818 		.fc_encap_type = cfg->nh_encap_type,
2819 		.fc_is_fdb = cfg->nh_fdb,
2820 	};
2821 	int err;
2822 
2823 	if (!ipv6_addr_any(&cfg->gw.ipv6))
2824 		fib6_cfg.fc_flags |= RTF_GATEWAY;
2825 
2826 	/* sets nh_dev if successful */
2827 	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2828 				      extack);
2829 	if (err) {
2830 		/* IPv6 is not enabled, don't call fib6_nh_release */
2831 		if (err == -EAFNOSUPPORT)
2832 			goto out;
2833 		ipv6_stub->fib6_nh_release(fib6_nh);
2834 	} else {
2835 		nh->nh_flags = fib6_nh->fib_nh_flags;
2836 	}
2837 out:
2838 	return err;
2839 }
2840 
2841 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2842 				      struct netlink_ext_ack *extack)
2843 {
2844 	struct nh_info *nhi;
2845 	struct nexthop *nh;
2846 	int err = 0;
2847 
2848 	nh = nexthop_alloc();
2849 	if (!nh)
2850 		return ERR_PTR(-ENOMEM);
2851 
2852 	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2853 	if (!nhi) {
2854 		kfree(nh);
2855 		return ERR_PTR(-ENOMEM);
2856 	}
2857 
2858 	nh->nh_flags = cfg->nh_flags;
2859 	nh->net = net;
2860 
2861 	nhi->nh_parent = nh;
2862 	nhi->family = cfg->nh_family;
2863 	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2864 
2865 	if (cfg->nh_fdb)
2866 		nhi->fdb_nh = 1;
2867 
2868 	if (cfg->nh_blackhole) {
2869 		nhi->reject_nh = 1;
2870 		cfg->nh_ifindex = net->loopback_dev->ifindex;
2871 	}
2872 
2873 	switch (cfg->nh_family) {
2874 	case AF_INET:
2875 		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2876 		break;
2877 	case AF_INET6:
2878 		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2879 		break;
2880 	}
2881 
2882 	if (err) {
2883 		kfree(nhi);
2884 		kfree(nh);
2885 		return ERR_PTR(err);
2886 	}
2887 
2888 	/* add the entry to the device based hash */
2889 	if (!nhi->fdb_nh)
2890 		nexthop_devhash_add(net, nhi);
2891 
2892 	rcu_assign_pointer(nh->nh_info, nhi);
2893 
2894 	return nh;
2895 }
2896 
2897 /* called with rtnl lock held */
2898 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2899 				   struct netlink_ext_ack *extack)
2900 {
2901 	struct nexthop *nh;
2902 	int err;
2903 
2904 	if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2905 		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2906 		return ERR_PTR(-EINVAL);
2907 	}
2908 
2909 	if (!cfg->nh_id) {
2910 		cfg->nh_id = nh_find_unused_id(net);
2911 		if (!cfg->nh_id) {
2912 			NL_SET_ERR_MSG(extack, "No unused id");
2913 			return ERR_PTR(-EINVAL);
2914 		}
2915 	}
2916 
2917 	if (cfg->nh_grp)
2918 		nh = nexthop_create_group(net, cfg);
2919 	else
2920 		nh = nexthop_create(net, cfg, extack);
2921 
2922 	if (IS_ERR(nh))
2923 		return nh;
2924 
2925 	refcount_set(&nh->refcnt, 1);
2926 	nh->id = cfg->nh_id;
2927 	nh->protocol = cfg->nh_protocol;
2928 	nh->net = net;
2929 
2930 	err = insert_nexthop(net, nh, cfg, extack);
2931 	if (err) {
2932 		__remove_nexthop(net, nh, NULL);
2933 		nexthop_put(nh);
2934 		nh = ERR_PTR(err);
2935 	}
2936 
2937 	return nh;
2938 }
2939 
2940 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2941 			    unsigned long *timer_p, bool *has_p,
2942 			    struct netlink_ext_ack *extack)
2943 {
2944 	unsigned long timer;
2945 	u32 value;
2946 
2947 	if (!attr) {
2948 		*timer_p = fallback;
2949 		*has_p = false;
2950 		return 0;
2951 	}
2952 
2953 	value = nla_get_u32(attr);
2954 	timer = clock_t_to_jiffies(value);
2955 	if (timer == ~0UL) {
2956 		NL_SET_ERR_MSG(extack, "Timer value too large");
2957 		return -EINVAL;
2958 	}
2959 
2960 	*timer_p = timer;
2961 	*has_p = true;
2962 	return 0;
2963 }
2964 
2965 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2966 				    struct netlink_ext_ack *extack)
2967 {
2968 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2969 	int err;
2970 
2971 	if (res) {
2972 		err = nla_parse_nested(tb,
2973 				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2974 				       res, rtm_nh_res_policy_new, extack);
2975 		if (err < 0)
2976 			return err;
2977 	}
2978 
2979 	if (tb[NHA_RES_GROUP_BUCKETS]) {
2980 		cfg->nh_grp_res_num_buckets =
2981 			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2982 		cfg->nh_grp_res_has_num_buckets = true;
2983 		if (!cfg->nh_grp_res_num_buckets) {
2984 			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2985 			return -EINVAL;
2986 		}
2987 	}
2988 
2989 	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2990 			       NH_RES_DEFAULT_IDLE_TIMER,
2991 			       &cfg->nh_grp_res_idle_timer,
2992 			       &cfg->nh_grp_res_has_idle_timer,
2993 			       extack);
2994 	if (err)
2995 		return err;
2996 
2997 	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2998 				NH_RES_DEFAULT_UNBALANCED_TIMER,
2999 				&cfg->nh_grp_res_unbalanced_timer,
3000 				&cfg->nh_grp_res_has_unbalanced_timer,
3001 				extack);
3002 }
3003 
3004 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3005 			    struct nlmsghdr *nlh, struct nh_config *cfg,
3006 			    struct netlink_ext_ack *extack)
3007 {
3008 	struct nhmsg *nhm = nlmsg_data(nlh);
3009 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3010 	int err;
3011 
3012 	err = nlmsg_parse(nlh, sizeof(*nhm), tb,
3013 			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
3014 			  rtm_nh_policy_new, extack);
3015 	if (err < 0)
3016 		return err;
3017 
3018 	err = -EINVAL;
3019 	if (nhm->resvd || nhm->nh_scope) {
3020 		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3021 		goto out;
3022 	}
3023 	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3024 		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3025 		goto out;
3026 	}
3027 
3028 	switch (nhm->nh_family) {
3029 	case AF_INET:
3030 	case AF_INET6:
3031 		break;
3032 	case AF_UNSPEC:
3033 		if (tb[NHA_GROUP])
3034 			break;
3035 		fallthrough;
3036 	default:
3037 		NL_SET_ERR_MSG(extack, "Invalid address family");
3038 		goto out;
3039 	}
3040 
3041 	memset(cfg, 0, sizeof(*cfg));
3042 	cfg->nlflags = nlh->nlmsg_flags;
3043 	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3044 	cfg->nlinfo.nlh = nlh;
3045 	cfg->nlinfo.nl_net = net;
3046 
3047 	cfg->nh_family = nhm->nh_family;
3048 	cfg->nh_protocol = nhm->nh_protocol;
3049 	cfg->nh_flags = nhm->nh_flags;
3050 
3051 	if (tb[NHA_ID])
3052 		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3053 
3054 	if (tb[NHA_FDB]) {
3055 		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3056 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
3057 			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3058 			goto out;
3059 		}
3060 		if (nhm->nh_flags) {
3061 			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3062 			goto out;
3063 		}
3064 		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3065 	}
3066 
3067 	if (tb[NHA_GROUP]) {
3068 		if (nhm->nh_family != AF_UNSPEC) {
3069 			NL_SET_ERR_MSG(extack, "Invalid family for group");
3070 			goto out;
3071 		}
3072 		cfg->nh_grp = tb[NHA_GROUP];
3073 
3074 		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3075 		if (tb[NHA_GROUP_TYPE])
3076 			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3077 
3078 		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3079 			NL_SET_ERR_MSG(extack, "Invalid group type");
3080 			goto out;
3081 		}
3082 		err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
3083 					  cfg->nh_grp_type, extack);
3084 		if (err)
3085 			goto out;
3086 
3087 		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3088 			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3089 						       cfg, extack);
3090 
3091 		if (tb[NHA_HW_STATS_ENABLE])
3092 			cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3093 
3094 		/* no other attributes should be set */
3095 		goto out;
3096 	}
3097 
3098 	if (tb[NHA_BLACKHOLE]) {
3099 		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3100 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3101 			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3102 			goto out;
3103 		}
3104 
3105 		cfg->nh_blackhole = 1;
3106 		err = 0;
3107 		goto out;
3108 	}
3109 
3110 	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3111 		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3112 		goto out;
3113 	}
3114 
3115 	if (!cfg->nh_fdb && tb[NHA_OIF]) {
3116 		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3117 		if (cfg->nh_ifindex)
3118 			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3119 
3120 		if (!cfg->dev) {
3121 			NL_SET_ERR_MSG(extack, "Invalid device index");
3122 			goto out;
3123 		} else if (!(cfg->dev->flags & IFF_UP)) {
3124 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3125 			err = -ENETDOWN;
3126 			goto out;
3127 		} else if (!netif_carrier_ok(cfg->dev)) {
3128 			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3129 			err = -ENETDOWN;
3130 			goto out;
3131 		}
3132 	}
3133 
3134 	err = -EINVAL;
3135 	if (tb[NHA_GATEWAY]) {
3136 		struct nlattr *gwa = tb[NHA_GATEWAY];
3137 
3138 		switch (cfg->nh_family) {
3139 		case AF_INET:
3140 			if (nla_len(gwa) != sizeof(u32)) {
3141 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3142 				goto out;
3143 			}
3144 			cfg->gw.ipv4 = nla_get_be32(gwa);
3145 			break;
3146 		case AF_INET6:
3147 			if (nla_len(gwa) != sizeof(struct in6_addr)) {
3148 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3149 				goto out;
3150 			}
3151 			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3152 			break;
3153 		default:
3154 			NL_SET_ERR_MSG(extack,
3155 				       "Unknown address family for gateway");
3156 			goto out;
3157 		}
3158 	} else {
3159 		/* device only nexthop (no gateway) */
3160 		if (cfg->nh_flags & RTNH_F_ONLINK) {
3161 			NL_SET_ERR_MSG(extack,
3162 				       "ONLINK flag can not be set for nexthop without a gateway");
3163 			goto out;
3164 		}
3165 	}
3166 
3167 	if (tb[NHA_ENCAP]) {
3168 		cfg->nh_encap = tb[NHA_ENCAP];
3169 
3170 		if (!tb[NHA_ENCAP_TYPE]) {
3171 			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3172 			goto out;
3173 		}
3174 
3175 		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3176 		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3177 		if (err < 0)
3178 			goto out;
3179 
3180 	} else if (tb[NHA_ENCAP_TYPE]) {
3181 		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3182 		goto out;
3183 	}
3184 
3185 	if (tb[NHA_HW_STATS_ENABLE]) {
3186 		NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3187 		goto out;
3188 	}
3189 
3190 	err = 0;
3191 out:
3192 	return err;
3193 }
3194 
3195 /* rtnl */
3196 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3197 			   struct netlink_ext_ack *extack)
3198 {
3199 	struct net *net = sock_net(skb->sk);
3200 	struct nh_config cfg;
3201 	struct nexthop *nh;
3202 	int err;
3203 
3204 	err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3205 	if (!err) {
3206 		nh = nexthop_add(net, &cfg, extack);
3207 		if (IS_ERR(nh))
3208 			err = PTR_ERR(nh);
3209 	}
3210 
3211 	return err;
3212 }
3213 
3214 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3215 				struct nlattr **tb, u32 *id, u32 *op_flags,
3216 				struct netlink_ext_ack *extack)
3217 {
3218 	struct nhmsg *nhm = nlmsg_data(nlh);
3219 
3220 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3221 		NL_SET_ERR_MSG(extack, "Invalid values in header");
3222 		return -EINVAL;
3223 	}
3224 
3225 	if (!tb[NHA_ID]) {
3226 		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3227 		return -EINVAL;
3228 	}
3229 
3230 	*id = nla_get_u32(tb[NHA_ID]);
3231 	if (!(*id)) {
3232 		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3233 		return -EINVAL;
3234 	}
3235 
3236 	if (op_flags) {
3237 		if (tb[NHA_OP_FLAGS])
3238 			*op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3239 		else
3240 			*op_flags = 0;
3241 	}
3242 
3243 	return 0;
3244 }
3245 
3246 /* rtnl */
3247 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3248 			   struct netlink_ext_ack *extack)
3249 {
3250 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3251 	struct net *net = sock_net(skb->sk);
3252 	struct nl_info nlinfo = {
3253 		.nlh = nlh,
3254 		.nl_net = net,
3255 		.portid = NETLINK_CB(skb).portid,
3256 	};
3257 	struct nexthop *nh;
3258 	int err;
3259 	u32 id;
3260 
3261 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3262 			  ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3263 			  extack);
3264 	if (err < 0)
3265 		return err;
3266 
3267 	err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3268 	if (err)
3269 		return err;
3270 
3271 	nh = nexthop_find_by_id(net, id);
3272 	if (!nh)
3273 		return -ENOENT;
3274 
3275 	remove_nexthop(net, nh, &nlinfo);
3276 
3277 	return 0;
3278 }
3279 
3280 /* rtnl */
3281 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3282 			   struct netlink_ext_ack *extack)
3283 {
3284 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3285 	struct net *net = sock_net(in_skb->sk);
3286 	struct sk_buff *skb = NULL;
3287 	struct nexthop *nh;
3288 	u32 op_flags;
3289 	int err;
3290 	u32 id;
3291 
3292 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3293 			  ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3294 			  extack);
3295 	if (err < 0)
3296 		return err;
3297 
3298 	err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3299 	if (err)
3300 		return err;
3301 
3302 	err = -ENOBUFS;
3303 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3304 	if (!skb)
3305 		goto out;
3306 
3307 	err = -ENOENT;
3308 	nh = nexthop_find_by_id(net, id);
3309 	if (!nh)
3310 		goto errout_free;
3311 
3312 	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3313 			   nlh->nlmsg_seq, 0, op_flags);
3314 	if (err < 0) {
3315 		WARN_ON(err == -EMSGSIZE);
3316 		goto errout_free;
3317 	}
3318 
3319 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3320 out:
3321 	return err;
3322 errout_free:
3323 	kfree_skb(skb);
3324 	goto out;
3325 }
3326 
3327 struct nh_dump_filter {
3328 	u32 nh_id;
3329 	int dev_idx;
3330 	int master_idx;
3331 	bool group_filter;
3332 	bool fdb_filter;
3333 	u32 res_bucket_nh_id;
3334 	u32 op_flags;
3335 };
3336 
3337 static bool nh_dump_filtered(struct nexthop *nh,
3338 			     struct nh_dump_filter *filter, u8 family)
3339 {
3340 	const struct net_device *dev;
3341 	const struct nh_info *nhi;
3342 
3343 	if (filter->group_filter && !nh->is_group)
3344 		return true;
3345 
3346 	if (!filter->dev_idx && !filter->master_idx && !family)
3347 		return false;
3348 
3349 	if (nh->is_group)
3350 		return true;
3351 
3352 	nhi = rtnl_dereference(nh->nh_info);
3353 	if (family && nhi->family != family)
3354 		return true;
3355 
3356 	dev = nhi->fib_nhc.nhc_dev;
3357 	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3358 		return true;
3359 
3360 	if (filter->master_idx) {
3361 		struct net_device *master;
3362 
3363 		if (!dev)
3364 			return true;
3365 
3366 		master = netdev_master_upper_dev_get((struct net_device *)dev);
3367 		if (!master || master->ifindex != filter->master_idx)
3368 			return true;
3369 	}
3370 
3371 	return false;
3372 }
3373 
3374 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3375 			       struct nh_dump_filter *filter,
3376 			       struct netlink_ext_ack *extack)
3377 {
3378 	struct nhmsg *nhm;
3379 	u32 idx;
3380 
3381 	if (tb[NHA_OIF]) {
3382 		idx = nla_get_u32(tb[NHA_OIF]);
3383 		if (idx > INT_MAX) {
3384 			NL_SET_ERR_MSG(extack, "Invalid device index");
3385 			return -EINVAL;
3386 		}
3387 		filter->dev_idx = idx;
3388 	}
3389 	if (tb[NHA_MASTER]) {
3390 		idx = nla_get_u32(tb[NHA_MASTER]);
3391 		if (idx > INT_MAX) {
3392 			NL_SET_ERR_MSG(extack, "Invalid master device index");
3393 			return -EINVAL;
3394 		}
3395 		filter->master_idx = idx;
3396 	}
3397 	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3398 	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3399 
3400 	nhm = nlmsg_data(nlh);
3401 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3402 		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3403 		return -EINVAL;
3404 	}
3405 
3406 	return 0;
3407 }
3408 
3409 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3410 			     struct nh_dump_filter *filter,
3411 			     struct netlink_callback *cb)
3412 {
3413 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3414 	int err;
3415 
3416 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3417 			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3418 			  rtm_nh_policy_dump, cb->extack);
3419 	if (err < 0)
3420 		return err;
3421 
3422 	if (tb[NHA_OP_FLAGS])
3423 		filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3424 	else
3425 		filter->op_flags = 0;
3426 
3427 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3428 }
3429 
3430 struct rtm_dump_nh_ctx {
3431 	u32 idx;
3432 };
3433 
3434 static struct rtm_dump_nh_ctx *
3435 rtm_dump_nh_ctx(struct netlink_callback *cb)
3436 {
3437 	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3438 
3439 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3440 	return ctx;
3441 }
3442 
3443 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3444 				  struct netlink_callback *cb,
3445 				  struct rb_root *root,
3446 				  struct rtm_dump_nh_ctx *ctx,
3447 				  int (*nh_cb)(struct sk_buff *skb,
3448 					       struct netlink_callback *cb,
3449 					       struct nexthop *nh, void *data),
3450 				  void *data)
3451 {
3452 	struct rb_node *node;
3453 	int s_idx;
3454 	int err;
3455 
3456 	s_idx = ctx->idx;
3457 	for (node = rb_first(root); node; node = rb_next(node)) {
3458 		struct nexthop *nh;
3459 
3460 		nh = rb_entry(node, struct nexthop, rb_node);
3461 		if (nh->id < s_idx)
3462 			continue;
3463 
3464 		ctx->idx = nh->id;
3465 		err = nh_cb(skb, cb, nh, data);
3466 		if (err)
3467 			return err;
3468 	}
3469 
3470 	return 0;
3471 }
3472 
3473 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3474 			       struct nexthop *nh, void *data)
3475 {
3476 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3477 	struct nh_dump_filter *filter = data;
3478 
3479 	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3480 		return 0;
3481 
3482 	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3483 			    NETLINK_CB(cb->skb).portid,
3484 			    cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3485 }
3486 
3487 /* rtnl */
3488 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3489 {
3490 	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3491 	struct net *net = sock_net(skb->sk);
3492 	struct rb_root *root = &net->nexthop.rb_root;
3493 	struct nh_dump_filter filter = {};
3494 	int err;
3495 
3496 	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3497 	if (err < 0)
3498 		return err;
3499 
3500 	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3501 				     &rtm_dump_nexthop_cb, &filter);
3502 
3503 	cb->seq = net->nexthop.seq;
3504 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3505 	return err;
3506 }
3507 
3508 static struct nexthop *
3509 nexthop_find_group_resilient(struct net *net, u32 id,
3510 			     struct netlink_ext_ack *extack)
3511 {
3512 	struct nh_group *nhg;
3513 	struct nexthop *nh;
3514 
3515 	nh = nexthop_find_by_id(net, id);
3516 	if (!nh)
3517 		return ERR_PTR(-ENOENT);
3518 
3519 	if (!nh->is_group) {
3520 		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3521 		return ERR_PTR(-EINVAL);
3522 	}
3523 
3524 	nhg = rtnl_dereference(nh->nh_grp);
3525 	if (!nhg->resilient) {
3526 		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3527 		return ERR_PTR(-EINVAL);
3528 	}
3529 
3530 	return nh;
3531 }
3532 
3533 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3534 			      struct netlink_ext_ack *extack)
3535 {
3536 	u32 idx;
3537 
3538 	if (attr) {
3539 		idx = nla_get_u32(attr);
3540 		if (!idx) {
3541 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3542 			return -EINVAL;
3543 		}
3544 		*nh_id_p = idx;
3545 	} else {
3546 		*nh_id_p = 0;
3547 	}
3548 
3549 	return 0;
3550 }
3551 
3552 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3553 				    struct nh_dump_filter *filter,
3554 				    struct netlink_callback *cb)
3555 {
3556 	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3557 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3558 	int err;
3559 
3560 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3561 			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3562 			  rtm_nh_policy_dump_bucket, NULL);
3563 	if (err < 0)
3564 		return err;
3565 
3566 	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3567 	if (err)
3568 		return err;
3569 
3570 	if (tb[NHA_RES_BUCKET]) {
3571 		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3572 
3573 		err = nla_parse_nested(res_tb, max,
3574 				       tb[NHA_RES_BUCKET],
3575 				       rtm_nh_res_bucket_policy_dump,
3576 				       cb->extack);
3577 		if (err < 0)
3578 			return err;
3579 
3580 		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3581 					 &filter->res_bucket_nh_id,
3582 					 cb->extack);
3583 		if (err)
3584 			return err;
3585 	}
3586 
3587 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3588 }
3589 
3590 struct rtm_dump_res_bucket_ctx {
3591 	struct rtm_dump_nh_ctx nh;
3592 	u16 bucket_index;
3593 };
3594 
3595 static struct rtm_dump_res_bucket_ctx *
3596 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3597 {
3598 	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3599 
3600 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3601 	return ctx;
3602 }
3603 
3604 struct rtm_dump_nexthop_bucket_data {
3605 	struct rtm_dump_res_bucket_ctx *ctx;
3606 	struct nh_dump_filter filter;
3607 };
3608 
3609 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3610 				      struct netlink_callback *cb,
3611 				      struct nexthop *nh,
3612 				      struct rtm_dump_nexthop_bucket_data *dd)
3613 {
3614 	u32 portid = NETLINK_CB(cb->skb).portid;
3615 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3616 	struct nh_res_table *res_table;
3617 	struct nh_group *nhg;
3618 	u16 bucket_index;
3619 	int err;
3620 
3621 	nhg = rtnl_dereference(nh->nh_grp);
3622 	res_table = rtnl_dereference(nhg->res_table);
3623 	for (bucket_index = dd->ctx->bucket_index;
3624 	     bucket_index < res_table->num_nh_buckets;
3625 	     bucket_index++) {
3626 		struct nh_res_bucket *bucket;
3627 		struct nh_grp_entry *nhge;
3628 
3629 		bucket = &res_table->nh_buckets[bucket_index];
3630 		nhge = rtnl_dereference(bucket->nh_entry);
3631 		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3632 			continue;
3633 
3634 		if (dd->filter.res_bucket_nh_id &&
3635 		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3636 			continue;
3637 
3638 		dd->ctx->bucket_index = bucket_index;
3639 		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3640 					 RTM_NEWNEXTHOPBUCKET, portid,
3641 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3642 					 cb->extack);
3643 		if (err)
3644 			return err;
3645 	}
3646 
3647 	dd->ctx->bucket_index = 0;
3648 
3649 	return 0;
3650 }
3651 
3652 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3653 				      struct netlink_callback *cb,
3654 				      struct nexthop *nh, void *data)
3655 {
3656 	struct rtm_dump_nexthop_bucket_data *dd = data;
3657 	struct nh_group *nhg;
3658 
3659 	if (!nh->is_group)
3660 		return 0;
3661 
3662 	nhg = rtnl_dereference(nh->nh_grp);
3663 	if (!nhg->resilient)
3664 		return 0;
3665 
3666 	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3667 }
3668 
3669 /* rtnl */
3670 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3671 				   struct netlink_callback *cb)
3672 {
3673 	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3674 	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3675 	struct net *net = sock_net(skb->sk);
3676 	struct nexthop *nh;
3677 	int err;
3678 
3679 	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3680 	if (err)
3681 		return err;
3682 
3683 	if (dd.filter.nh_id) {
3684 		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3685 						  cb->extack);
3686 		if (IS_ERR(nh))
3687 			return PTR_ERR(nh);
3688 		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3689 	} else {
3690 		struct rb_root *root = &net->nexthop.rb_root;
3691 
3692 		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3693 					     &rtm_dump_nexthop_bucket_cb, &dd);
3694 	}
3695 
3696 	cb->seq = net->nexthop.seq;
3697 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3698 	return err;
3699 }
3700 
3701 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3702 					      u16 *bucket_index,
3703 					      struct netlink_ext_ack *extack)
3704 {
3705 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3706 	int err;
3707 
3708 	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3709 			       res, rtm_nh_res_bucket_policy_get, extack);
3710 	if (err < 0)
3711 		return err;
3712 
3713 	if (!tb[NHA_RES_BUCKET_INDEX]) {
3714 		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3715 		return -EINVAL;
3716 	}
3717 
3718 	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3719 	return 0;
3720 }
3721 
3722 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3723 				   u32 *id, u16 *bucket_index,
3724 				   struct netlink_ext_ack *extack)
3725 {
3726 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3727 	int err;
3728 
3729 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3730 			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3731 			  rtm_nh_policy_get_bucket, extack);
3732 	if (err < 0)
3733 		return err;
3734 
3735 	err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3736 	if (err)
3737 		return err;
3738 
3739 	if (!tb[NHA_RES_BUCKET]) {
3740 		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3741 		return -EINVAL;
3742 	}
3743 
3744 	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3745 						 bucket_index, extack);
3746 	if (err)
3747 		return err;
3748 
3749 	return 0;
3750 }
3751 
3752 /* rtnl */
3753 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3754 				  struct netlink_ext_ack *extack)
3755 {
3756 	struct net *net = sock_net(in_skb->sk);
3757 	struct nh_res_table *res_table;
3758 	struct sk_buff *skb = NULL;
3759 	struct nh_group *nhg;
3760 	struct nexthop *nh;
3761 	u16 bucket_index;
3762 	int err;
3763 	u32 id;
3764 
3765 	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3766 	if (err)
3767 		return err;
3768 
3769 	nh = nexthop_find_group_resilient(net, id, extack);
3770 	if (IS_ERR(nh))
3771 		return PTR_ERR(nh);
3772 
3773 	nhg = rtnl_dereference(nh->nh_grp);
3774 	res_table = rtnl_dereference(nhg->res_table);
3775 	if (bucket_index >= res_table->num_nh_buckets) {
3776 		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3777 		return -ENOENT;
3778 	}
3779 
3780 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3781 	if (!skb)
3782 		return -ENOBUFS;
3783 
3784 	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3785 				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3786 				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3787 				 0, extack);
3788 	if (err < 0) {
3789 		WARN_ON(err == -EMSGSIZE);
3790 		goto errout_free;
3791 	}
3792 
3793 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3794 
3795 errout_free:
3796 	kfree_skb(skb);
3797 	return err;
3798 }
3799 
3800 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3801 {
3802 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3803 	struct net *net = dev_net(dev);
3804 	struct hlist_head *head = &net->nexthop.devhash[hash];
3805 	struct hlist_node *n;
3806 	struct nh_info *nhi;
3807 
3808 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3809 		if (nhi->fib_nhc.nhc_dev == dev) {
3810 			if (nhi->family == AF_INET)
3811 				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3812 						   orig_mtu);
3813 		}
3814 	}
3815 }
3816 
3817 /* rtnl */
3818 static int nh_netdev_event(struct notifier_block *this,
3819 			   unsigned long event, void *ptr)
3820 {
3821 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3822 	struct netdev_notifier_info_ext *info_ext;
3823 
3824 	switch (event) {
3825 	case NETDEV_DOWN:
3826 	case NETDEV_UNREGISTER:
3827 		nexthop_flush_dev(dev, event);
3828 		break;
3829 	case NETDEV_CHANGE:
3830 		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3831 			nexthop_flush_dev(dev, event);
3832 		break;
3833 	case NETDEV_CHANGEMTU:
3834 		info_ext = ptr;
3835 		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3836 		rt_cache_flush(dev_net(dev));
3837 		break;
3838 	}
3839 	return NOTIFY_DONE;
3840 }
3841 
3842 static struct notifier_block nh_netdev_notifier = {
3843 	.notifier_call = nh_netdev_event,
3844 };
3845 
3846 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3847 			 enum nexthop_event_type event_type,
3848 			 struct netlink_ext_ack *extack)
3849 {
3850 	struct rb_root *root = &net->nexthop.rb_root;
3851 	struct rb_node *node;
3852 	int err = 0;
3853 
3854 	for (node = rb_first(root); node; node = rb_next(node)) {
3855 		struct nexthop *nh;
3856 
3857 		nh = rb_entry(node, struct nexthop, rb_node);
3858 		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3859 		if (err)
3860 			break;
3861 	}
3862 
3863 	return err;
3864 }
3865 
3866 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3867 			      struct netlink_ext_ack *extack)
3868 {
3869 	int err;
3870 
3871 	rtnl_lock();
3872 	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3873 	if (err)
3874 		goto unlock;
3875 	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3876 					       nb);
3877 unlock:
3878 	rtnl_unlock();
3879 	return err;
3880 }
3881 EXPORT_SYMBOL(register_nexthop_notifier);
3882 
3883 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3884 {
3885 	int err;
3886 
3887 	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3888 						 nb);
3889 	if (!err)
3890 		nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3891 	return err;
3892 }
3893 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3894 
3895 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3896 {
3897 	int err;
3898 
3899 	rtnl_lock();
3900 	err = __unregister_nexthop_notifier(net, nb);
3901 	rtnl_unlock();
3902 	return err;
3903 }
3904 EXPORT_SYMBOL(unregister_nexthop_notifier);
3905 
3906 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3907 {
3908 	struct nexthop *nexthop;
3909 
3910 	rcu_read_lock();
3911 
3912 	nexthop = nexthop_find_by_id(net, id);
3913 	if (!nexthop)
3914 		goto out;
3915 
3916 	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3917 	if (offload)
3918 		nexthop->nh_flags |= RTNH_F_OFFLOAD;
3919 	if (trap)
3920 		nexthop->nh_flags |= RTNH_F_TRAP;
3921 
3922 out:
3923 	rcu_read_unlock();
3924 }
3925 EXPORT_SYMBOL(nexthop_set_hw_flags);
3926 
3927 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3928 				 bool offload, bool trap)
3929 {
3930 	struct nh_res_table *res_table;
3931 	struct nh_res_bucket *bucket;
3932 	struct nexthop *nexthop;
3933 	struct nh_group *nhg;
3934 
3935 	rcu_read_lock();
3936 
3937 	nexthop = nexthop_find_by_id(net, id);
3938 	if (!nexthop || !nexthop->is_group)
3939 		goto out;
3940 
3941 	nhg = rcu_dereference(nexthop->nh_grp);
3942 	if (!nhg->resilient)
3943 		goto out;
3944 
3945 	if (bucket_index >= nhg->res_table->num_nh_buckets)
3946 		goto out;
3947 
3948 	res_table = rcu_dereference(nhg->res_table);
3949 	bucket = &res_table->nh_buckets[bucket_index];
3950 	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3951 	if (offload)
3952 		bucket->nh_flags |= RTNH_F_OFFLOAD;
3953 	if (trap)
3954 		bucket->nh_flags |= RTNH_F_TRAP;
3955 
3956 out:
3957 	rcu_read_unlock();
3958 }
3959 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3960 
3961 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3962 				     unsigned long *activity)
3963 {
3964 	struct nh_res_table *res_table;
3965 	struct nexthop *nexthop;
3966 	struct nh_group *nhg;
3967 	u16 i;
3968 
3969 	rcu_read_lock();
3970 
3971 	nexthop = nexthop_find_by_id(net, id);
3972 	if (!nexthop || !nexthop->is_group)
3973 		goto out;
3974 
3975 	nhg = rcu_dereference(nexthop->nh_grp);
3976 	if (!nhg->resilient)
3977 		goto out;
3978 
3979 	/* Instead of silently ignoring some buckets, demand that the sizes
3980 	 * be the same.
3981 	 */
3982 	res_table = rcu_dereference(nhg->res_table);
3983 	if (num_buckets != res_table->num_nh_buckets)
3984 		goto out;
3985 
3986 	for (i = 0; i < num_buckets; i++) {
3987 		if (test_bit(i, activity))
3988 			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3989 	}
3990 
3991 out:
3992 	rcu_read_unlock();
3993 }
3994 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3995 
3996 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
3997 						   struct list_head *dev_to_kill)
3998 {
3999 	struct net *net;
4000 
4001 	ASSERT_RTNL();
4002 	list_for_each_entry(net, net_list, exit_list)
4003 		flush_all_nexthops(net);
4004 }
4005 
4006 static void __net_exit nexthop_net_exit(struct net *net)
4007 {
4008 	kfree(net->nexthop.devhash);
4009 	net->nexthop.devhash = NULL;
4010 }
4011 
4012 static int __net_init nexthop_net_init(struct net *net)
4013 {
4014 	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4015 
4016 	net->nexthop.rb_root = RB_ROOT;
4017 	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4018 	if (!net->nexthop.devhash)
4019 		return -ENOMEM;
4020 	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4021 
4022 	return 0;
4023 }
4024 
4025 static struct pernet_operations nexthop_net_ops = {
4026 	.init = nexthop_net_init,
4027 	.exit = nexthop_net_exit,
4028 	.exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
4029 };
4030 
4031 static int __init nexthop_init(void)
4032 {
4033 	register_pernet_subsys(&nexthop_net_ops);
4034 
4035 	register_netdevice_notifier(&nh_netdev_notifier);
4036 
4037 	rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4038 	rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
4039 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
4040 		      rtm_dump_nexthop, 0);
4041 
4042 	rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4043 	rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4044 
4045 	rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4046 	rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4047 
4048 	rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
4049 		      rtm_dump_nexthop_bucket, 0);
4050 
4051 	return 0;
4052 }
4053 subsys_initcall(nexthop_init);
4054