xref: /linux/net/ipv4/nexthop.c (revision a7ddedc84c59a645ef970b992f7cda5bffc70cc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7 
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19 
20 #define NH_RES_DEFAULT_IDLE_TIMER	(120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER	0	/* No forced rebalancing. */
22 
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 			   struct nl_info *nlinfo);
25 
26 #define NH_DEV_HASHBITS  8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28 
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS |		\
30 			       NHA_OP_FLAG_DUMP_HW_STATS)
31 
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 	[NHA_ID]		= { .type = NLA_U32 },
34 	[NHA_GROUP]		= { .type = NLA_BINARY },
35 	[NHA_GROUP_TYPE]	= { .type = NLA_U16 },
36 	[NHA_BLACKHOLE]		= { .type = NLA_FLAG },
37 	[NHA_OIF]		= { .type = NLA_U32 },
38 	[NHA_GATEWAY]		= { .type = NLA_BINARY },
39 	[NHA_ENCAP_TYPE]	= { .type = NLA_U16 },
40 	[NHA_ENCAP]		= { .type = NLA_NESTED },
41 	[NHA_FDB]		= { .type = NLA_FLAG },
42 	[NHA_RES_GROUP]		= { .type = NLA_NESTED },
43 	[NHA_HW_STATS_ENABLE]	= NLA_POLICY_MAX(NLA_U32, true),
44 };
45 
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 	[NHA_ID]		= { .type = NLA_U32 },
48 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
49 						  NHA_OP_FLAGS_DUMP_ALL),
50 };
51 
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 	[NHA_ID]		= { .type = NLA_U32 },
54 };
55 
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 	[NHA_OIF]		= { .type = NLA_U32 },
58 	[NHA_GROUPS]		= { .type = NLA_FLAG },
59 	[NHA_MASTER]		= { .type = NLA_U32 },
60 	[NHA_FDB]		= { .type = NLA_FLAG },
61 	[NHA_OP_FLAGS]		= NLA_POLICY_MASK(NLA_U32,
62 						  NHA_OP_FLAGS_DUMP_ALL),
63 };
64 
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 	[NHA_RES_GROUP_BUCKETS]			= { .type = NLA_U16 },
67 	[NHA_RES_GROUP_IDLE_TIMER]		= { .type = NLA_U32 },
68 	[NHA_RES_GROUP_UNBALANCED_TIMER]	= { .type = NLA_U32 },
69 };
70 
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 	[NHA_ID]		= { .type = NLA_U32 },
73 	[NHA_OIF]		= { .type = NLA_U32 },
74 	[NHA_MASTER]		= { .type = NLA_U32 },
75 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
76 };
77 
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 	[NHA_RES_BUCKET_NH_ID]	= { .type = NLA_U32 },
80 };
81 
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 	[NHA_ID]		= { .type = NLA_U32 },
84 	[NHA_RES_BUCKET]	= { .type = NLA_NESTED },
85 };
86 
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 	[NHA_RES_BUCKET_INDEX]	= { .type = NLA_U16 },
89 };
90 
91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 	return !net->nexthop.notifier_chain.head;
94 }
95 
96 static void
97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 			       const struct nh_info *nhi)
99 {
100 	nh_info->dev = nhi->fib_nhc.nhc_dev;
101 	nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 	if (nh_info->gw_family == AF_INET)
103 		nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 	else if (nh_info->gw_family == AF_INET6)
105 		nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106 
107 	nh_info->id = nhi->nh_parent->id;
108 	nh_info->is_reject = nhi->reject_nh;
109 	nh_info->is_fdb = nhi->fdb_nh;
110 	nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112 
113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 					const struct nexthop *nh)
115 {
116 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117 
118 	info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 	info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
120 	if (!info->nh)
121 		return -ENOMEM;
122 
123 	__nh_notifier_single_info_init(info->nh, nhi);
124 
125 	return 0;
126 }
127 
128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 	kfree(info->nh);
131 }
132 
133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 				       struct nh_group *nhg)
135 {
136 	u16 num_nh = nhg->num_nh;
137 	int i;
138 
139 	info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 	info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
141 			       GFP_KERNEL);
142 	if (!info->nh_grp)
143 		return -ENOMEM;
144 
145 	info->nh_grp->num_nh = num_nh;
146 	info->nh_grp->is_fdb = nhg->fdb_nh;
147 	info->nh_grp->hw_stats = nhg->hw_stats;
148 
149 	for (i = 0; i < num_nh; i++) {
150 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 		struct nh_info *nhi;
152 
153 		nhi = rtnl_dereference(nhge->nh->nh_info);
154 		info->nh_grp->nh_entries[i].weight = nhge->weight;
155 		__nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
156 					       nhi);
157 	}
158 
159 	return 0;
160 }
161 
162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 					   struct nh_group *nhg)
164 {
165 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 	u16 num_nh_buckets = res_table->num_nh_buckets;
167 	unsigned long size;
168 	u16 i;
169 
170 	info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 	size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 	info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 				       __GFP_NOWARN);
174 	if (!info->nh_res_table)
175 		return -ENOMEM;
176 
177 	info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 	info->nh_res_table->hw_stats = nhg->hw_stats;
179 
180 	for (i = 0; i < num_nh_buckets; i++) {
181 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 		struct nh_grp_entry *nhge;
183 		struct nh_info *nhi;
184 
185 		nhge = rtnl_dereference(bucket->nh_entry);
186 		nhi = rtnl_dereference(nhge->nh->nh_info);
187 		__nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
188 					       nhi);
189 	}
190 
191 	return 0;
192 }
193 
194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 				     const struct nexthop *nh)
196 {
197 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198 
199 	if (nhg->hash_threshold)
200 		return nh_notifier_mpath_info_init(info, nhg);
201 	else if (nhg->resilient)
202 		return nh_notifier_res_table_info_init(info, nhg);
203 	return -EINVAL;
204 }
205 
206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 				      const struct nexthop *nh)
208 {
209 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210 
211 	if (nhg->hash_threshold)
212 		kfree(info->nh_grp);
213 	else if (nhg->resilient)
214 		vfree(info->nh_res_table);
215 }
216 
217 static int nh_notifier_info_init(struct nh_notifier_info *info,
218 				 const struct nexthop *nh)
219 {
220 	info->id = nh->id;
221 
222 	if (nh->is_group)
223 		return nh_notifier_grp_info_init(info, nh);
224 	else
225 		return nh_notifier_single_info_init(info, nh);
226 }
227 
228 static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 				  const struct nexthop *nh)
230 {
231 	if (nh->is_group)
232 		nh_notifier_grp_info_fini(info, nh);
233 	else
234 		nh_notifier_single_info_fini(info);
235 }
236 
237 static int call_nexthop_notifiers(struct net *net,
238 				  enum nexthop_event_type event_type,
239 				  struct nexthop *nh,
240 				  struct netlink_ext_ack *extack)
241 {
242 	struct nh_notifier_info info = {
243 		.net = net,
244 		.extack = extack,
245 	};
246 	int err;
247 
248 	ASSERT_RTNL();
249 
250 	if (nexthop_notifiers_is_empty(net))
251 		return 0;
252 
253 	err = nh_notifier_info_init(&info, nh);
254 	if (err) {
255 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
256 		return err;
257 	}
258 
259 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
260 					   event_type, &info);
261 	nh_notifier_info_fini(&info, nh);
262 
263 	return notifier_to_errno(err);
264 }
265 
266 static int
267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 				      bool force, unsigned int *p_idle_timer_ms)
269 {
270 	struct nh_res_table *res_table;
271 	struct nh_group *nhg;
272 	struct nexthop *nh;
273 	int err = 0;
274 
275 	/* When 'force' is false, nexthop bucket replacement is performed
276 	 * because the bucket was deemed to be idle. In this case, capable
277 	 * listeners can choose to perform an atomic replacement: The bucket is
278 	 * only replaced if it is inactive. However, if the idle timer interval
279 	 * is smaller than the interval in which a listener is querying
280 	 * buckets' activity from the device, then atomic replacement should
281 	 * not be tried. Pass the idle timer value to listeners, so that they
282 	 * could determine which type of replacement to perform.
283 	 */
284 	if (force) {
285 		*p_idle_timer_ms = 0;
286 		return 0;
287 	}
288 
289 	rcu_read_lock();
290 
291 	nh = nexthop_find_by_id(info->net, info->id);
292 	if (!nh) {
293 		err = -EINVAL;
294 		goto out;
295 	}
296 
297 	nhg = rcu_dereference(nh->nh_grp);
298 	res_table = rcu_dereference(nhg->res_table);
299 	*p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
300 
301 out:
302 	rcu_read_unlock();
303 
304 	return err;
305 }
306 
307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 					    u16 bucket_index, bool force,
309 					    struct nh_info *oldi,
310 					    struct nh_info *newi)
311 {
312 	unsigned int idle_timer_ms;
313 	int err;
314 
315 	err = nh_notifier_res_bucket_idle_timer_get(info, force,
316 						    &idle_timer_ms);
317 	if (err)
318 		return err;
319 
320 	info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 	info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 				      GFP_KERNEL);
323 	if (!info->nh_res_bucket)
324 		return -ENOMEM;
325 
326 	info->nh_res_bucket->bucket_index = bucket_index;
327 	info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 	info->nh_res_bucket->force = force;
329 	__nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
330 	__nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
331 	return 0;
332 }
333 
334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335 {
336 	kfree(info->nh_res_bucket);
337 }
338 
339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 					       u16 bucket_index, bool force,
341 					       struct nh_info *oldi,
342 					       struct nh_info *newi,
343 					       struct netlink_ext_ack *extack)
344 {
345 	struct nh_notifier_info info = {
346 		.net = net,
347 		.extack = extack,
348 		.id = nhg_id,
349 	};
350 	int err;
351 
352 	if (nexthop_notifiers_is_empty(net))
353 		return 0;
354 
355 	err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
356 					       oldi, newi);
357 	if (err)
358 		return err;
359 
360 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
361 					   NEXTHOP_EVENT_BUCKET_REPLACE, &info);
362 	nh_notifier_res_bucket_info_fini(&info);
363 
364 	return notifier_to_errno(err);
365 }
366 
367 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
368  *
369  * 1) a collection of callbacks for NH maintenance. This operates under
370  *    RTNL,
371  * 2) the delayed work that gradually balances the resilient table,
372  * 3) and nexthop_select_path(), operating under RCU.
373  *
374  * Both the delayed work and the RTNL block are writers, and need to
375  * maintain mutual exclusion. Since there are only two and well-known
376  * writers for each table, the RTNL code can make sure it has exclusive
377  * access thus:
378  *
379  * - Have the DW operate without locking;
380  * - synchronously cancel the DW;
381  * - do the writing;
382  * - if the write was not actually a delete, call upkeep, which schedules
383  *   DW again if necessary.
384  *
385  * The functions that are always called from the RTNL context use
386  * rtnl_dereference(). The functions that can also be called from the DW do
387  * a raw dereference and rely on the above mutual exclusion scheme.
388  */
389 #define nh_res_dereference(p) (rcu_dereference_raw(p))
390 
391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 					     u16 bucket_index, bool force,
393 					     struct nexthop *old_nh,
394 					     struct nexthop *new_nh,
395 					     struct netlink_ext_ack *extack)
396 {
397 	struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 	struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399 
400 	return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 						   force, oldi, newi, extack);
402 }
403 
404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 					    struct netlink_ext_ack *extack)
406 {
407 	struct nh_notifier_info info = {
408 		.net = net,
409 		.extack = extack,
410 		.id = nh->id,
411 	};
412 	struct nh_group *nhg;
413 	int err;
414 
415 	ASSERT_RTNL();
416 
417 	if (nexthop_notifiers_is_empty(net))
418 		return 0;
419 
420 	/* At this point, the nexthop buckets are still not populated. Only
421 	 * emit a notification with the logical nexthops, so that a listener
422 	 * could potentially veto it in case of unsupported configuration.
423 	 */
424 	nhg = rtnl_dereference(nh->nh_grp);
425 	err = nh_notifier_mpath_info_init(&info, nhg);
426 	if (err) {
427 		NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
428 		return err;
429 	}
430 
431 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
432 					   NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
433 					   &info);
434 	kfree(info.nh_grp);
435 
436 	return notifier_to_errno(err);
437 }
438 
439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 				 enum nexthop_event_type event_type,
441 				 struct nexthop *nh,
442 				 struct netlink_ext_ack *extack)
443 {
444 	struct nh_notifier_info info = {
445 		.net = net,
446 		.extack = extack,
447 	};
448 	int err;
449 
450 	err = nh_notifier_info_init(&info, nh);
451 	if (err)
452 		return err;
453 
454 	err = nb->notifier_call(nb, event_type, &info);
455 	nh_notifier_info_fini(&info, nh);
456 
457 	return notifier_to_errno(err);
458 }
459 
460 static unsigned int nh_dev_hashfn(unsigned int val)
461 {
462 	unsigned int mask = NH_DEV_HASHSIZE - 1;
463 
464 	return (val ^
465 		(val >> NH_DEV_HASHBITS) ^
466 		(val >> (NH_DEV_HASHBITS * 2))) & mask;
467 }
468 
469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
470 {
471 	struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 	struct hlist_head *head;
473 	unsigned int hash;
474 
475 	WARN_ON(!dev);
476 
477 	hash = nh_dev_hashfn(dev->ifindex);
478 	head = &net->nexthop.devhash[hash];
479 	hlist_add_head(&nhi->dev_hash, head);
480 }
481 
482 static void nexthop_free_group(struct nexthop *nh)
483 {
484 	struct nh_group *nhg;
485 	int i;
486 
487 	nhg = rcu_dereference_raw(nh->nh_grp);
488 	for (i = 0; i < nhg->num_nh; ++i) {
489 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
490 
491 		WARN_ON(!list_empty(&nhge->nh_list));
492 		free_percpu(nhge->stats);
493 		nexthop_put(nhge->nh);
494 	}
495 
496 	WARN_ON(nhg->spare == nhg);
497 
498 	if (nhg->resilient)
499 		vfree(rcu_dereference_raw(nhg->res_table));
500 
501 	kfree(nhg->spare);
502 	kfree(nhg);
503 }
504 
505 static void nexthop_free_single(struct nexthop *nh)
506 {
507 	struct nh_info *nhi;
508 
509 	nhi = rcu_dereference_raw(nh->nh_info);
510 	switch (nhi->family) {
511 	case AF_INET:
512 		fib_nh_release(nh->net, &nhi->fib_nh);
513 		break;
514 	case AF_INET6:
515 		ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
516 		break;
517 	}
518 	kfree(nhi);
519 }
520 
521 void nexthop_free_rcu(struct rcu_head *head)
522 {
523 	struct nexthop *nh = container_of(head, struct nexthop, rcu);
524 
525 	if (nh->is_group)
526 		nexthop_free_group(nh);
527 	else
528 		nexthop_free_single(nh);
529 
530 	kfree(nh);
531 }
532 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
533 
534 static struct nexthop *nexthop_alloc(void)
535 {
536 	struct nexthop *nh;
537 
538 	nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
539 	if (nh) {
540 		INIT_LIST_HEAD(&nh->fi_list);
541 		INIT_LIST_HEAD(&nh->f6i_list);
542 		INIT_LIST_HEAD(&nh->grp_list);
543 		INIT_LIST_HEAD(&nh->fdb_list);
544 		spin_lock_init(&nh->lock);
545 	}
546 	return nh;
547 }
548 
549 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
550 {
551 	struct nh_group *nhg;
552 
553 	nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
554 	if (nhg)
555 		nhg->num_nh = num_nh;
556 
557 	return nhg;
558 }
559 
560 static void nh_res_table_upkeep_dw(struct work_struct *work);
561 
562 static struct nh_res_table *
563 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
564 {
565 	const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
566 	struct nh_res_table *res_table;
567 	unsigned long size;
568 
569 	size = struct_size(res_table, nh_buckets, num_nh_buckets);
570 	res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
571 	if (!res_table)
572 		return NULL;
573 
574 	res_table->net = net;
575 	res_table->nhg_id = nhg_id;
576 	INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
577 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
578 	res_table->idle_timer = cfg->nh_grp_res_idle_timer;
579 	res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
580 	res_table->num_nh_buckets = num_nh_buckets;
581 	return res_table;
582 }
583 
584 static void nh_base_seq_inc(struct net *net)
585 {
586 	while (++net->nexthop.seq == 0)
587 		;
588 }
589 
590 /* no reference taken; rcu lock or rtnl must be held */
591 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
592 {
593 	struct rb_node **pp, *parent = NULL, *next;
594 
595 	pp = &net->nexthop.rb_root.rb_node;
596 	while (1) {
597 		struct nexthop *nh;
598 
599 		next = rcu_dereference_raw(*pp);
600 		if (!next)
601 			break;
602 		parent = next;
603 
604 		nh = rb_entry(parent, struct nexthop, rb_node);
605 		if (id < nh->id)
606 			pp = &next->rb_left;
607 		else if (id > nh->id)
608 			pp = &next->rb_right;
609 		else
610 			return nh;
611 	}
612 	return NULL;
613 }
614 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
615 
616 /* used for auto id allocation; called with rtnl held */
617 static u32 nh_find_unused_id(struct net *net)
618 {
619 	u32 id_start = net->nexthop.last_id_allocated;
620 
621 	while (1) {
622 		net->nexthop.last_id_allocated++;
623 		if (net->nexthop.last_id_allocated == id_start)
624 			break;
625 
626 		if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
627 			return net->nexthop.last_id_allocated;
628 	}
629 	return 0;
630 }
631 
632 static void nh_res_time_set_deadline(unsigned long next_time,
633 				     unsigned long *deadline)
634 {
635 	if (time_before(next_time, *deadline))
636 		*deadline = next_time;
637 }
638 
639 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
640 {
641 	if (list_empty(&res_table->uw_nh_entries))
642 		return 0;
643 	return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
644 }
645 
646 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
647 {
648 	struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
649 	struct nlattr *nest;
650 
651 	nest = nla_nest_start(skb, NHA_RES_GROUP);
652 	if (!nest)
653 		return -EMSGSIZE;
654 
655 	if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
656 			res_table->num_nh_buckets) ||
657 	    nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
658 			jiffies_to_clock_t(res_table->idle_timer)) ||
659 	    nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
660 			jiffies_to_clock_t(res_table->unbalanced_timer)) ||
661 	    nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
662 			      nh_res_table_unbalanced_time(res_table),
663 			      NHA_RES_GROUP_PAD))
664 		goto nla_put_failure;
665 
666 	nla_nest_end(skb, nest);
667 	return 0;
668 
669 nla_put_failure:
670 	nla_nest_cancel(skb, nest);
671 	return -EMSGSIZE;
672 }
673 
674 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
675 {
676 	struct nh_grp_entry_stats *cpu_stats;
677 
678 	cpu_stats = get_cpu_ptr(nhge->stats);
679 	u64_stats_update_begin(&cpu_stats->syncp);
680 	u64_stats_inc(&cpu_stats->packets);
681 	u64_stats_update_end(&cpu_stats->syncp);
682 	put_cpu_ptr(cpu_stats);
683 }
684 
685 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
686 				    u64 *ret_packets)
687 {
688 	int i;
689 
690 	*ret_packets = 0;
691 
692 	for_each_possible_cpu(i) {
693 		struct nh_grp_entry_stats *cpu_stats;
694 		unsigned int start;
695 		u64 packets;
696 
697 		cpu_stats = per_cpu_ptr(nhge->stats, i);
698 		do {
699 			start = u64_stats_fetch_begin(&cpu_stats->syncp);
700 			packets = u64_stats_read(&cpu_stats->packets);
701 		} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
702 
703 		*ret_packets += packets;
704 	}
705 }
706 
707 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
708 					 const struct nexthop *nh)
709 {
710 	struct nh_group *nhg;
711 	int i;
712 
713 	ASSERT_RTNL();
714 	nhg = rtnl_dereference(nh->nh_grp);
715 
716 	info->id = nh->id;
717 	info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
718 	info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
719 						    stats, nhg->num_nh),
720 					GFP_KERNEL);
721 	if (!info->nh_grp_hw_stats)
722 		return -ENOMEM;
723 
724 	info->nh_grp_hw_stats->num_nh = nhg->num_nh;
725 	for (i = 0; i < nhg->num_nh; i++) {
726 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
727 
728 		info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
729 	}
730 
731 	return 0;
732 }
733 
734 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
735 {
736 	kfree(info->nh_grp_hw_stats);
737 }
738 
739 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
740 				  unsigned int nh_idx,
741 				  u64 delta_packets)
742 {
743 	info->hw_stats_used = true;
744 	info->stats[nh_idx].packets += delta_packets;
745 }
746 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
747 
748 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
749 					 struct nh_notifier_info *info)
750 {
751 	struct nh_group *nhg;
752 	int i;
753 
754 	ASSERT_RTNL();
755 	nhg = rtnl_dereference(nh->nh_grp);
756 
757 	for (i = 0; i < nhg->num_nh; i++) {
758 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
759 
760 		nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
761 	}
762 }
763 
764 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
765 {
766 	struct nh_notifier_info info = {
767 		.net = nh->net,
768 	};
769 	struct net *net = nh->net;
770 	int err;
771 
772 	if (nexthop_notifiers_is_empty(net)) {
773 		*hw_stats_used = false;
774 		return 0;
775 	}
776 
777 	err = nh_notifier_grp_hw_stats_init(&info, nh);
778 	if (err)
779 		return err;
780 
781 	err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
782 					   NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
783 					   &info);
784 
785 	/* Cache whatever we got, even if there was an error, otherwise the
786 	 * successful stats retrievals would get lost.
787 	 */
788 	nh_grp_hw_stats_apply_update(nh, &info);
789 	*hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
790 
791 	nh_notifier_grp_hw_stats_fini(&info);
792 	return notifier_to_errno(err);
793 }
794 
795 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
796 					struct nh_grp_entry *nhge,
797 					u32 op_flags)
798 {
799 	struct nlattr *nest;
800 	u64 packets;
801 
802 	nh_grp_entry_stats_read(nhge, &packets);
803 
804 	nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
805 	if (!nest)
806 		return -EMSGSIZE;
807 
808 	if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
809 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
810 			 packets + nhge->packets_hw))
811 		goto nla_put_failure;
812 
813 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
814 	    nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
815 			 nhge->packets_hw))
816 		goto nla_put_failure;
817 
818 	nla_nest_end(skb, nest);
819 	return 0;
820 
821 nla_put_failure:
822 	nla_nest_cancel(skb, nest);
823 	return -EMSGSIZE;
824 }
825 
826 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
827 				  u32 op_flags)
828 {
829 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
830 	struct nlattr *nest;
831 	bool hw_stats_used;
832 	int err;
833 	int i;
834 
835 	if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
836 		goto err_out;
837 
838 	if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
839 	    nhg->hw_stats) {
840 		err = nh_grp_hw_stats_update(nh, &hw_stats_used);
841 		if (err)
842 			goto out;
843 
844 		if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
845 			goto err_out;
846 	}
847 
848 	nest = nla_nest_start(skb, NHA_GROUP_STATS);
849 	if (!nest)
850 		goto err_out;
851 
852 	for (i = 0; i < nhg->num_nh; i++)
853 		if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
854 						 op_flags))
855 			goto cancel_out;
856 
857 	nla_nest_end(skb, nest);
858 	return 0;
859 
860 cancel_out:
861 	nla_nest_cancel(skb, nest);
862 err_out:
863 	err = -EMSGSIZE;
864 out:
865 	return err;
866 }
867 
868 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
869 			    u32 op_flags, u32 *resp_op_flags)
870 {
871 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
872 	struct nexthop_grp *p;
873 	size_t len = nhg->num_nh * sizeof(*p);
874 	struct nlattr *nla;
875 	u16 group_type = 0;
876 	u16 weight;
877 	int i;
878 
879 	*resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
880 
881 	if (nhg->hash_threshold)
882 		group_type = NEXTHOP_GRP_TYPE_MPATH;
883 	else if (nhg->resilient)
884 		group_type = NEXTHOP_GRP_TYPE_RES;
885 
886 	if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
887 		goto nla_put_failure;
888 
889 	nla = nla_reserve(skb, NHA_GROUP, len);
890 	if (!nla)
891 		goto nla_put_failure;
892 
893 	p = nla_data(nla);
894 	for (i = 0; i < nhg->num_nh; ++i) {
895 		weight = nhg->nh_entries[i].weight - 1;
896 
897 		*p++ = (struct nexthop_grp) {
898 			.id = nhg->nh_entries[i].nh->id,
899 			.weight = weight,
900 			.weight_high = weight >> 8,
901 		};
902 	}
903 
904 	if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
905 		goto nla_put_failure;
906 
907 	if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
908 	    (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
909 	     nla_put_nh_group_stats(skb, nh, op_flags)))
910 		goto nla_put_failure;
911 
912 	return 0;
913 
914 nla_put_failure:
915 	return -EMSGSIZE;
916 }
917 
918 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
919 			int event, u32 portid, u32 seq, unsigned int nlflags,
920 			u32 op_flags)
921 {
922 	struct fib6_nh *fib6_nh;
923 	struct fib_nh *fib_nh;
924 	struct nlmsghdr *nlh;
925 	struct nh_info *nhi;
926 	struct nhmsg *nhm;
927 
928 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
929 	if (!nlh)
930 		return -EMSGSIZE;
931 
932 	nhm = nlmsg_data(nlh);
933 	nhm->nh_family = AF_UNSPEC;
934 	nhm->nh_flags = nh->nh_flags;
935 	nhm->nh_protocol = nh->protocol;
936 	nhm->nh_scope = 0;
937 	nhm->resvd = 0;
938 
939 	if (nla_put_u32(skb, NHA_ID, nh->id))
940 		goto nla_put_failure;
941 
942 	if (nh->is_group) {
943 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
944 		u32 resp_op_flags = 0;
945 
946 		if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
947 			goto nla_put_failure;
948 		if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
949 		    nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
950 			goto nla_put_failure;
951 		goto out;
952 	}
953 
954 	nhi = rtnl_dereference(nh->nh_info);
955 	nhm->nh_family = nhi->family;
956 	if (nhi->reject_nh) {
957 		if (nla_put_flag(skb, NHA_BLACKHOLE))
958 			goto nla_put_failure;
959 		goto out;
960 	} else if (nhi->fdb_nh) {
961 		if (nla_put_flag(skb, NHA_FDB))
962 			goto nla_put_failure;
963 	} else {
964 		const struct net_device *dev;
965 
966 		dev = nhi->fib_nhc.nhc_dev;
967 		if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
968 			goto nla_put_failure;
969 	}
970 
971 	nhm->nh_scope = nhi->fib_nhc.nhc_scope;
972 	switch (nhi->family) {
973 	case AF_INET:
974 		fib_nh = &nhi->fib_nh;
975 		if (fib_nh->fib_nh_gw_family &&
976 		    nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
977 			goto nla_put_failure;
978 		break;
979 
980 	case AF_INET6:
981 		fib6_nh = &nhi->fib6_nh;
982 		if (fib6_nh->fib_nh_gw_family &&
983 		    nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
984 			goto nla_put_failure;
985 		break;
986 	}
987 
988 	if (lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
989 				NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
990 		goto nla_put_failure;
991 
992 out:
993 	nlmsg_end(skb, nlh);
994 	return 0;
995 
996 nla_put_failure:
997 	nlmsg_cancel(skb, nlh);
998 	return -EMSGSIZE;
999 }
1000 
1001 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
1002 {
1003 	return nla_total_size(0) +	/* NHA_RES_GROUP */
1004 		nla_total_size(2) +	/* NHA_RES_GROUP_BUCKETS */
1005 		nla_total_size(4) +	/* NHA_RES_GROUP_IDLE_TIMER */
1006 		nla_total_size(4) +	/* NHA_RES_GROUP_UNBALANCED_TIMER */
1007 		nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1008 }
1009 
1010 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1011 {
1012 	struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1013 	size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1014 	size_t tot = nla_total_size(sz) +
1015 		nla_total_size(2); /* NHA_GROUP_TYPE */
1016 
1017 	if (nhg->resilient)
1018 		tot += nh_nlmsg_size_grp_res(nhg);
1019 
1020 	return tot;
1021 }
1022 
1023 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1024 {
1025 	struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1026 	size_t sz;
1027 
1028 	/* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1029 	 * are mutually exclusive
1030 	 */
1031 	sz = nla_total_size(4);  /* NHA_OIF */
1032 
1033 	switch (nhi->family) {
1034 	case AF_INET:
1035 		if (nhi->fib_nh.fib_nh_gw_family)
1036 			sz += nla_total_size(4);  /* NHA_GATEWAY */
1037 		break;
1038 
1039 	case AF_INET6:
1040 		/* NHA_GATEWAY */
1041 		if (nhi->fib6_nh.fib_nh_gw_family)
1042 			sz += nla_total_size(sizeof(const struct in6_addr));
1043 		break;
1044 	}
1045 
1046 	if (nhi->fib_nhc.nhc_lwtstate) {
1047 		sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1048 		sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
1049 	}
1050 
1051 	return sz;
1052 }
1053 
1054 static size_t nh_nlmsg_size(struct nexthop *nh)
1055 {
1056 	size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1057 
1058 	sz += nla_total_size(4); /* NHA_ID */
1059 
1060 	if (nh->is_group)
1061 		sz += nh_nlmsg_size_grp(nh) +
1062 		      nla_total_size(4) +	/* NHA_OP_FLAGS */
1063 		      0;
1064 	else
1065 		sz += nh_nlmsg_size_single(nh);
1066 
1067 	return sz;
1068 }
1069 
1070 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1071 {
1072 	unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1073 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1074 	struct sk_buff *skb;
1075 	int err = -ENOBUFS;
1076 
1077 	skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1078 	if (!skb)
1079 		goto errout;
1080 
1081 	err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1082 	if (err < 0) {
1083 		/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1084 		WARN_ON(err == -EMSGSIZE);
1085 		kfree_skb(skb);
1086 		goto errout;
1087 	}
1088 
1089 	rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1090 		    info->nlh, gfp_any());
1091 	return;
1092 errout:
1093 	rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1094 }
1095 
1096 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1097 {
1098 	return (unsigned long)atomic_long_read(&bucket->used_time);
1099 }
1100 
1101 static unsigned long
1102 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1103 			 const struct nh_res_bucket *bucket,
1104 			 unsigned long now)
1105 {
1106 	unsigned long time = nh_res_bucket_used_time(bucket);
1107 
1108 	/* Bucket was not used since it was migrated. The idle time is now. */
1109 	if (time == bucket->migrated_time)
1110 		return now;
1111 
1112 	return time + res_table->idle_timer;
1113 }
1114 
1115 static unsigned long
1116 nh_res_table_unb_point(const struct nh_res_table *res_table)
1117 {
1118 	return res_table->unbalanced_since + res_table->unbalanced_timer;
1119 }
1120 
1121 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1122 				   struct nh_res_bucket *bucket)
1123 {
1124 	unsigned long now = jiffies;
1125 
1126 	atomic_long_set(&bucket->used_time, (long)now);
1127 	bucket->migrated_time = now;
1128 }
1129 
1130 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1131 {
1132 	atomic_long_set(&bucket->used_time, (long)jiffies);
1133 }
1134 
1135 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1136 {
1137 	unsigned long used_time = nh_res_bucket_used_time(bucket);
1138 
1139 	return jiffies_delta_to_clock_t(jiffies - used_time);
1140 }
1141 
1142 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1143 			      struct nh_res_bucket *bucket, u16 bucket_index,
1144 			      int event, u32 portid, u32 seq,
1145 			      unsigned int nlflags,
1146 			      struct netlink_ext_ack *extack)
1147 {
1148 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1149 	struct nlmsghdr *nlh;
1150 	struct nlattr *nest;
1151 	struct nhmsg *nhm;
1152 
1153 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1154 	if (!nlh)
1155 		return -EMSGSIZE;
1156 
1157 	nhm = nlmsg_data(nlh);
1158 	nhm->nh_family = AF_UNSPEC;
1159 	nhm->nh_flags = bucket->nh_flags;
1160 	nhm->nh_protocol = nh->protocol;
1161 	nhm->nh_scope = 0;
1162 	nhm->resvd = 0;
1163 
1164 	if (nla_put_u32(skb, NHA_ID, nh->id))
1165 		goto nla_put_failure;
1166 
1167 	nest = nla_nest_start(skb, NHA_RES_BUCKET);
1168 	if (!nest)
1169 		goto nla_put_failure;
1170 
1171 	if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1172 	    nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1173 	    nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1174 			      nh_res_bucket_idle_time(bucket),
1175 			      NHA_RES_BUCKET_PAD))
1176 		goto nla_put_failure_nest;
1177 
1178 	nla_nest_end(skb, nest);
1179 	nlmsg_end(skb, nlh);
1180 	return 0;
1181 
1182 nla_put_failure_nest:
1183 	nla_nest_cancel(skb, nest);
1184 nla_put_failure:
1185 	nlmsg_cancel(skb, nlh);
1186 	return -EMSGSIZE;
1187 }
1188 
1189 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1190 				  u16 bucket_index)
1191 {
1192 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1193 	struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1194 	struct nexthop *nh = nhge->nh_parent;
1195 	struct sk_buff *skb;
1196 	int err = -ENOBUFS;
1197 
1198 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1199 	if (!skb)
1200 		goto errout;
1201 
1202 	err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1203 				 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1204 				 NULL);
1205 	if (err < 0) {
1206 		kfree_skb(skb);
1207 		goto errout;
1208 	}
1209 
1210 	rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1211 	return;
1212 errout:
1213 	rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1214 }
1215 
1216 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1217 			   bool *is_fdb, struct netlink_ext_ack *extack)
1218 {
1219 	if (nh->is_group) {
1220 		struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1221 
1222 		/* Nesting groups within groups is not supported. */
1223 		if (nhg->hash_threshold) {
1224 			NL_SET_ERR_MSG(extack,
1225 				       "Hash-threshold group can not be a nexthop within a group");
1226 			return false;
1227 		}
1228 		if (nhg->resilient) {
1229 			NL_SET_ERR_MSG(extack,
1230 				       "Resilient group can not be a nexthop within a group");
1231 			return false;
1232 		}
1233 		*is_fdb = nhg->fdb_nh;
1234 	} else {
1235 		struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1236 
1237 		if (nhi->reject_nh && npaths > 1) {
1238 			NL_SET_ERR_MSG(extack,
1239 				       "Blackhole nexthop can not be used in a group with more than 1 path");
1240 			return false;
1241 		}
1242 		*is_fdb = nhi->fdb_nh;
1243 	}
1244 
1245 	return true;
1246 }
1247 
1248 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1249 				   struct netlink_ext_ack *extack)
1250 {
1251 	struct nh_info *nhi;
1252 
1253 	nhi = rtnl_dereference(nh->nh_info);
1254 
1255 	if (!nhi->fdb_nh) {
1256 		NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1257 		return -EINVAL;
1258 	}
1259 
1260 	if (*nh_family == AF_UNSPEC) {
1261 		*nh_family = nhi->family;
1262 	} else if (*nh_family != nhi->family) {
1263 		NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1264 		return -EINVAL;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static int nh_check_attr_group(struct net *net,
1271 			       struct nlattr *tb[], size_t tb_size,
1272 			       u16 nh_grp_type, struct netlink_ext_ack *extack)
1273 {
1274 	unsigned int len = nla_len(tb[NHA_GROUP]);
1275 	struct nexthop_grp *nhg;
1276 	unsigned int i, j;
1277 
1278 	if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1279 		NL_SET_ERR_MSG(extack,
1280 			       "Invalid length for nexthop group attribute");
1281 		return -EINVAL;
1282 	}
1283 
1284 	/* convert len to number of nexthop ids */
1285 	len /= sizeof(*nhg);
1286 
1287 	nhg = nla_data(tb[NHA_GROUP]);
1288 	for (i = 0; i < len; ++i) {
1289 		if (nhg[i].resvd2) {
1290 			NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1291 			return -EINVAL;
1292 		}
1293 		if (nexthop_grp_weight(&nhg[i]) == 0) {
1294 			/* 0xffff got passed in, representing weight of 0x10000,
1295 			 * which is too heavy.
1296 			 */
1297 			NL_SET_ERR_MSG(extack, "Invalid value for weight");
1298 			return -EINVAL;
1299 		}
1300 		for (j = i + 1; j < len; ++j) {
1301 			if (nhg[i].id == nhg[j].id) {
1302 				NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1303 				return -EINVAL;
1304 			}
1305 		}
1306 	}
1307 
1308 	nhg = nla_data(tb[NHA_GROUP]);
1309 	for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1310 		if (!tb[i])
1311 			continue;
1312 		switch (i) {
1313 		case NHA_HW_STATS_ENABLE:
1314 		case NHA_FDB:
1315 			continue;
1316 		case NHA_RES_GROUP:
1317 			if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1318 				continue;
1319 			break;
1320 		}
1321 		NL_SET_ERR_MSG(extack,
1322 			       "No other attributes can be set in nexthop groups");
1323 		return -EINVAL;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[],
1330 				    struct netlink_ext_ack *extack)
1331 {
1332 	u8 nh_family = AF_UNSPEC;
1333 	struct nexthop_grp *nhg;
1334 	unsigned int len;
1335 	unsigned int i;
1336 	u8 nhg_fdb;
1337 
1338 	len = nla_len(tb[NHA_GROUP]) / sizeof(*nhg);
1339 	nhg = nla_data(tb[NHA_GROUP]);
1340 	nhg_fdb = !!tb[NHA_FDB];
1341 
1342 	for (i = 0; i < len; i++) {
1343 		struct nexthop *nh;
1344 		bool is_fdb_nh;
1345 
1346 		nh = nexthop_find_by_id(net, nhg[i].id);
1347 		if (!nh) {
1348 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1349 			return -EINVAL;
1350 		}
1351 		if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1352 			return -EINVAL;
1353 
1354 		if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1355 			return -EINVAL;
1356 
1357 		if (!nhg_fdb && is_fdb_nh) {
1358 			NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1359 			return -EINVAL;
1360 		}
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 static bool ipv6_good_nh(const struct fib6_nh *nh)
1367 {
1368 	int state = NUD_REACHABLE;
1369 	struct neighbour *n;
1370 
1371 	rcu_read_lock();
1372 
1373 	n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1374 	if (n)
1375 		state = READ_ONCE(n->nud_state);
1376 
1377 	rcu_read_unlock();
1378 
1379 	return !!(state & NUD_VALID);
1380 }
1381 
1382 static bool ipv4_good_nh(const struct fib_nh *nh)
1383 {
1384 	int state = NUD_REACHABLE;
1385 	struct neighbour *n;
1386 
1387 	rcu_read_lock();
1388 
1389 	n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1390 				      (__force u32)nh->fib_nh_gw4);
1391 	if (n)
1392 		state = READ_ONCE(n->nud_state);
1393 
1394 	rcu_read_unlock();
1395 
1396 	return !!(state & NUD_VALID);
1397 }
1398 
1399 static bool nexthop_is_good_nh(const struct nexthop *nh)
1400 {
1401 	struct nh_info *nhi = rcu_dereference(nh->nh_info);
1402 
1403 	switch (nhi->family) {
1404 	case AF_INET:
1405 		return ipv4_good_nh(&nhi->fib_nh);
1406 	case AF_INET6:
1407 		return ipv6_good_nh(&nhi->fib6_nh);
1408 	}
1409 
1410 	return false;
1411 }
1412 
1413 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1414 {
1415 	int i;
1416 
1417 	for (i = 0; i < nhg->num_nh; i++) {
1418 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1419 
1420 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1421 			continue;
1422 
1423 		nh_grp_entry_stats_inc(nhge);
1424 		return nhge->nh;
1425 	}
1426 
1427 	WARN_ON_ONCE(1);
1428 	return NULL;
1429 }
1430 
1431 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1432 {
1433 	struct nh_grp_entry *nhge0 = NULL;
1434 	int i;
1435 
1436 	if (nhg->fdb_nh)
1437 		return nexthop_select_path_fdb(nhg, hash);
1438 
1439 	for (i = 0; i < nhg->num_nh; ++i) {
1440 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1441 
1442 		/* nexthops always check if it is good and does
1443 		 * not rely on a sysctl for this behavior
1444 		 */
1445 		if (!nexthop_is_good_nh(nhge->nh))
1446 			continue;
1447 
1448 		if (!nhge0)
1449 			nhge0 = nhge;
1450 
1451 		if (hash > atomic_read(&nhge->hthr.upper_bound))
1452 			continue;
1453 
1454 		nh_grp_entry_stats_inc(nhge);
1455 		return nhge->nh;
1456 	}
1457 
1458 	if (!nhge0)
1459 		nhge0 = &nhg->nh_entries[0];
1460 	nh_grp_entry_stats_inc(nhge0);
1461 	return nhge0->nh;
1462 }
1463 
1464 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1465 {
1466 	struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1467 	u16 bucket_index = hash % res_table->num_nh_buckets;
1468 	struct nh_res_bucket *bucket;
1469 	struct nh_grp_entry *nhge;
1470 
1471 	/* nexthop_select_path() is expected to return a non-NULL value, so
1472 	 * skip protocol validation and just hand out whatever there is.
1473 	 */
1474 	bucket = &res_table->nh_buckets[bucket_index];
1475 	nh_res_bucket_set_busy(bucket);
1476 	nhge = rcu_dereference(bucket->nh_entry);
1477 	nh_grp_entry_stats_inc(nhge);
1478 	return nhge->nh;
1479 }
1480 
1481 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1482 {
1483 	struct nh_group *nhg;
1484 
1485 	if (!nh->is_group)
1486 		return nh;
1487 
1488 	nhg = rcu_dereference(nh->nh_grp);
1489 	if (nhg->hash_threshold)
1490 		return nexthop_select_path_hthr(nhg, hash);
1491 	else if (nhg->resilient)
1492 		return nexthop_select_path_res(nhg, hash);
1493 
1494 	/* Unreachable. */
1495 	return NULL;
1496 }
1497 EXPORT_SYMBOL_GPL(nexthop_select_path);
1498 
1499 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1500 			     int (*cb)(struct fib6_nh *nh, void *arg),
1501 			     void *arg)
1502 {
1503 	struct nh_info *nhi;
1504 	int err;
1505 
1506 	if (nh->is_group) {
1507 		struct nh_group *nhg;
1508 		int i;
1509 
1510 		nhg = rcu_dereference_rtnl(nh->nh_grp);
1511 		for (i = 0; i < nhg->num_nh; i++) {
1512 			struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1513 
1514 			nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1515 			err = cb(&nhi->fib6_nh, arg);
1516 			if (err)
1517 				return err;
1518 		}
1519 	} else {
1520 		nhi = rcu_dereference_rtnl(nh->nh_info);
1521 		err = cb(&nhi->fib6_nh, arg);
1522 		if (err)
1523 			return err;
1524 	}
1525 
1526 	return 0;
1527 }
1528 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1529 
1530 static int check_src_addr(const struct in6_addr *saddr,
1531 			  struct netlink_ext_ack *extack)
1532 {
1533 	if (!ipv6_addr_any(saddr)) {
1534 		NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1535 		return -EINVAL;
1536 	}
1537 	return 0;
1538 }
1539 
1540 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1541 		       struct netlink_ext_ack *extack)
1542 {
1543 	struct nh_info *nhi;
1544 	bool is_fdb_nh;
1545 
1546 	/* fib6_src is unique to a fib6_info and limits the ability to cache
1547 	 * routes in fib6_nh within a nexthop that is potentially shared
1548 	 * across multiple fib entries. If the config wants to use source
1549 	 * routing it can not use nexthop objects. mlxsw also does not allow
1550 	 * fib6_src on routes.
1551 	 */
1552 	if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1553 		return -EINVAL;
1554 
1555 	if (nh->is_group) {
1556 		struct nh_group *nhg;
1557 
1558 		nhg = rcu_dereference_rtnl(nh->nh_grp);
1559 		if (nhg->has_v4)
1560 			goto no_v4_nh;
1561 		is_fdb_nh = nhg->fdb_nh;
1562 	} else {
1563 		nhi = rcu_dereference_rtnl(nh->nh_info);
1564 		if (nhi->family == AF_INET)
1565 			goto no_v4_nh;
1566 		is_fdb_nh = nhi->fdb_nh;
1567 	}
1568 
1569 	if (is_fdb_nh) {
1570 		NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1571 		return -EINVAL;
1572 	}
1573 
1574 	return 0;
1575 no_v4_nh:
1576 	NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1577 	return -EINVAL;
1578 }
1579 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1580 
1581 /* if existing nexthop has ipv6 routes linked to it, need
1582  * to verify this new spec works with ipv6
1583  */
1584 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1585 			      struct netlink_ext_ack *extack)
1586 {
1587 	struct fib6_info *f6i;
1588 
1589 	if (list_empty(&old->f6i_list))
1590 		return 0;
1591 
1592 	list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1593 		if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1594 			return -EINVAL;
1595 	}
1596 
1597 	return fib6_check_nexthop(new, NULL, extack);
1598 }
1599 
1600 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1601 			       struct netlink_ext_ack *extack)
1602 {
1603 	if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1604 		NL_SET_ERR_MSG(extack,
1605 			       "Route with host scope can not have a gateway");
1606 		return -EINVAL;
1607 	}
1608 
1609 	if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1610 		NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1611 		return -EINVAL;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 /* Invoked by fib add code to verify nexthop by id is ok with
1618  * config for prefix; parts of fib_check_nh not done when nexthop
1619  * object is used.
1620  */
1621 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1622 		      struct netlink_ext_ack *extack)
1623 {
1624 	struct nh_info *nhi;
1625 	int err = 0;
1626 
1627 	if (nh->is_group) {
1628 		struct nh_group *nhg;
1629 
1630 		nhg = rtnl_dereference(nh->nh_grp);
1631 		if (nhg->fdb_nh) {
1632 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1633 			err = -EINVAL;
1634 			goto out;
1635 		}
1636 
1637 		if (scope == RT_SCOPE_HOST) {
1638 			NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1639 			err = -EINVAL;
1640 			goto out;
1641 		}
1642 
1643 		/* all nexthops in a group have the same scope */
1644 		nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1645 		err = nexthop_check_scope(nhi, scope, extack);
1646 	} else {
1647 		nhi = rtnl_dereference(nh->nh_info);
1648 		if (nhi->fdb_nh) {
1649 			NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1650 			err = -EINVAL;
1651 			goto out;
1652 		}
1653 		err = nexthop_check_scope(nhi, scope, extack);
1654 	}
1655 
1656 out:
1657 	return err;
1658 }
1659 
1660 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1661 			     struct netlink_ext_ack *extack)
1662 {
1663 	struct fib_info *fi;
1664 
1665 	list_for_each_entry(fi, &old->fi_list, nh_list) {
1666 		int err;
1667 
1668 		err = fib_check_nexthop(new, fi->fib_scope, extack);
1669 		if (err)
1670 			return err;
1671 	}
1672 	return 0;
1673 }
1674 
1675 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1676 {
1677 	return nhge->res.count_buckets == nhge->res.wants_buckets;
1678 }
1679 
1680 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1681 {
1682 	return nhge->res.count_buckets > nhge->res.wants_buckets;
1683 }
1684 
1685 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1686 {
1687 	return nhge->res.count_buckets < nhge->res.wants_buckets;
1688 }
1689 
1690 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1691 {
1692 	return list_empty(&res_table->uw_nh_entries);
1693 }
1694 
1695 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1696 {
1697 	struct nh_grp_entry *nhge;
1698 
1699 	if (bucket->occupied) {
1700 		nhge = nh_res_dereference(bucket->nh_entry);
1701 		nhge->res.count_buckets--;
1702 		bucket->occupied = false;
1703 	}
1704 }
1705 
1706 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1707 				 struct nh_grp_entry *nhge)
1708 {
1709 	nh_res_bucket_unset_nh(bucket);
1710 
1711 	bucket->occupied = true;
1712 	rcu_assign_pointer(bucket->nh_entry, nhge);
1713 	nhge->res.count_buckets++;
1714 }
1715 
1716 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1717 					 struct nh_res_bucket *bucket,
1718 					 unsigned long *deadline, bool *force)
1719 {
1720 	unsigned long now = jiffies;
1721 	struct nh_grp_entry *nhge;
1722 	unsigned long idle_point;
1723 
1724 	if (!bucket->occupied) {
1725 		/* The bucket is not occupied, its NHGE pointer is either
1726 		 * NULL or obsolete. We _have to_ migrate: set force.
1727 		 */
1728 		*force = true;
1729 		return true;
1730 	}
1731 
1732 	nhge = nh_res_dereference(bucket->nh_entry);
1733 
1734 	/* If the bucket is populated by an underweight or balanced
1735 	 * nexthop, do not migrate.
1736 	 */
1737 	if (!nh_res_nhge_is_ow(nhge))
1738 		return false;
1739 
1740 	/* At this point we know that the bucket is populated with an
1741 	 * overweight nexthop. It needs to be migrated to a new nexthop if
1742 	 * the idle timer of unbalanced timer expired.
1743 	 */
1744 
1745 	idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1746 	if (time_after_eq(now, idle_point)) {
1747 		/* The bucket is idle. We _can_ migrate: unset force. */
1748 		*force = false;
1749 		return true;
1750 	}
1751 
1752 	/* Unbalanced timer of 0 means "never force". */
1753 	if (res_table->unbalanced_timer) {
1754 		unsigned long unb_point;
1755 
1756 		unb_point = nh_res_table_unb_point(res_table);
1757 		if (time_after(now, unb_point)) {
1758 			/* The bucket is not idle, but the unbalanced timer
1759 			 * expired. We _can_ migrate, but set force anyway,
1760 			 * so that drivers know to ignore activity reports
1761 			 * from the HW.
1762 			 */
1763 			*force = true;
1764 			return true;
1765 		}
1766 
1767 		nh_res_time_set_deadline(unb_point, deadline);
1768 	}
1769 
1770 	nh_res_time_set_deadline(idle_point, deadline);
1771 	return false;
1772 }
1773 
1774 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1775 				  u16 bucket_index, bool notify,
1776 				  bool notify_nl, bool force)
1777 {
1778 	struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1779 	struct nh_grp_entry *new_nhge;
1780 	struct netlink_ext_ack extack;
1781 	int err;
1782 
1783 	new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1784 					    struct nh_grp_entry,
1785 					    res.uw_nh_entry);
1786 	if (WARN_ON_ONCE(!new_nhge))
1787 		/* If this function is called, "bucket" is either not
1788 		 * occupied, or it belongs to a next hop that is
1789 		 * overweight. In either case, there ought to be a
1790 		 * corresponding underweight next hop.
1791 		 */
1792 		return false;
1793 
1794 	if (notify) {
1795 		struct nh_grp_entry *old_nhge;
1796 
1797 		old_nhge = nh_res_dereference(bucket->nh_entry);
1798 		err = call_nexthop_res_bucket_notifiers(res_table->net,
1799 							res_table->nhg_id,
1800 							bucket_index, force,
1801 							old_nhge->nh,
1802 							new_nhge->nh, &extack);
1803 		if (err) {
1804 			pr_err_ratelimited("%s\n", extack._msg);
1805 			if (!force)
1806 				return false;
1807 			/* It is not possible to veto a forced replacement, so
1808 			 * just clear the hardware flags from the nexthop
1809 			 * bucket to indicate to user space that this bucket is
1810 			 * not correctly populated in hardware.
1811 			 */
1812 			bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1813 		}
1814 	}
1815 
1816 	nh_res_bucket_set_nh(bucket, new_nhge);
1817 	nh_res_bucket_set_idle(res_table, bucket);
1818 
1819 	if (notify_nl)
1820 		nexthop_bucket_notify(res_table, bucket_index);
1821 
1822 	if (nh_res_nhge_is_balanced(new_nhge))
1823 		list_del(&new_nhge->res.uw_nh_entry);
1824 	return true;
1825 }
1826 
1827 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1828 
1829 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1830 				bool notify, bool notify_nl)
1831 {
1832 	unsigned long now = jiffies;
1833 	unsigned long deadline;
1834 	u16 i;
1835 
1836 	/* Deadline is the next time that upkeep should be run. It is the
1837 	 * earliest time at which one of the buckets might be migrated.
1838 	 * Start at the most pessimistic estimate: either unbalanced_timer
1839 	 * from now, or if there is none, idle_timer from now. For each
1840 	 * encountered time point, call nh_res_time_set_deadline() to
1841 	 * refine the estimate.
1842 	 */
1843 	if (res_table->unbalanced_timer)
1844 		deadline = now + res_table->unbalanced_timer;
1845 	else
1846 		deadline = now + res_table->idle_timer;
1847 
1848 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1849 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1850 		bool force;
1851 
1852 		if (nh_res_bucket_should_migrate(res_table, bucket,
1853 						 &deadline, &force)) {
1854 			if (!nh_res_bucket_migrate(res_table, i, notify,
1855 						   notify_nl, force)) {
1856 				unsigned long idle_point;
1857 
1858 				/* A driver can override the migration
1859 				 * decision if the HW reports that the
1860 				 * bucket is actually not idle. Therefore
1861 				 * remark the bucket as busy again and
1862 				 * update the deadline.
1863 				 */
1864 				nh_res_bucket_set_busy(bucket);
1865 				idle_point = nh_res_bucket_idle_point(res_table,
1866 								      bucket,
1867 								      now);
1868 				nh_res_time_set_deadline(idle_point, &deadline);
1869 			}
1870 		}
1871 	}
1872 
1873 	/* If the group is still unbalanced, schedule the next upkeep to
1874 	 * either the deadline computed above, or the minimum deadline,
1875 	 * whichever comes later.
1876 	 */
1877 	if (!nh_res_table_is_balanced(res_table)) {
1878 		unsigned long now = jiffies;
1879 		unsigned long min_deadline;
1880 
1881 		min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1882 		if (time_before(deadline, min_deadline))
1883 			deadline = min_deadline;
1884 
1885 		queue_delayed_work(system_power_efficient_wq,
1886 				   &res_table->upkeep_dw, deadline - now);
1887 	}
1888 }
1889 
1890 static void nh_res_table_upkeep_dw(struct work_struct *work)
1891 {
1892 	struct delayed_work *dw = to_delayed_work(work);
1893 	struct nh_res_table *res_table;
1894 
1895 	res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1896 	nh_res_table_upkeep(res_table, true, true);
1897 }
1898 
1899 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1900 {
1901 	cancel_delayed_work_sync(&res_table->upkeep_dw);
1902 }
1903 
1904 static void nh_res_group_rebalance(struct nh_group *nhg,
1905 				   struct nh_res_table *res_table)
1906 {
1907 	u16 prev_upper_bound = 0;
1908 	u32 total = 0;
1909 	u32 w = 0;
1910 	int i;
1911 
1912 	INIT_LIST_HEAD(&res_table->uw_nh_entries);
1913 
1914 	for (i = 0; i < nhg->num_nh; ++i)
1915 		total += nhg->nh_entries[i].weight;
1916 
1917 	for (i = 0; i < nhg->num_nh; ++i) {
1918 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1919 		u16 upper_bound;
1920 		u64 btw;
1921 
1922 		w += nhge->weight;
1923 		btw = ((u64)res_table->num_nh_buckets) * w;
1924 		upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1925 		nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1926 		prev_upper_bound = upper_bound;
1927 
1928 		if (nh_res_nhge_is_uw(nhge)) {
1929 			if (list_empty(&res_table->uw_nh_entries))
1930 				res_table->unbalanced_since = jiffies;
1931 			list_add(&nhge->res.uw_nh_entry,
1932 				 &res_table->uw_nh_entries);
1933 		}
1934 	}
1935 }
1936 
1937 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1938  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1939  * entry in NHG as not occupied.
1940  */
1941 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1942 					 struct nh_group *nhg)
1943 {
1944 	u16 i;
1945 
1946 	for (i = 0; i < res_table->num_nh_buckets; i++) {
1947 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1948 		u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1949 		bool found = false;
1950 		int j;
1951 
1952 		for (j = 0; j < nhg->num_nh; j++) {
1953 			struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1954 
1955 			if (nhge->nh->id == id) {
1956 				nh_res_bucket_set_nh(bucket, nhge);
1957 				found = true;
1958 				break;
1959 			}
1960 		}
1961 
1962 		if (!found)
1963 			nh_res_bucket_unset_nh(bucket);
1964 	}
1965 }
1966 
1967 static void replace_nexthop_grp_res(struct nh_group *oldg,
1968 				    struct nh_group *newg)
1969 {
1970 	/* For NH group replacement, the new NHG might only have a stub
1971 	 * hash table with 0 buckets, because the number of buckets was not
1972 	 * specified. For NH removal, oldg and newg both reference the same
1973 	 * res_table. So in any case, in the following, we want to work
1974 	 * with oldg->res_table.
1975 	 */
1976 	struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1977 	unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1978 	bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1979 
1980 	nh_res_table_cancel_upkeep(old_res_table);
1981 	nh_res_table_migrate_buckets(old_res_table, newg);
1982 	nh_res_group_rebalance(newg, old_res_table);
1983 	if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1984 		old_res_table->unbalanced_since = prev_unbalanced_since;
1985 	nh_res_table_upkeep(old_res_table, true, false);
1986 }
1987 
1988 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1989 {
1990 	u32 total = 0;
1991 	u32 w = 0;
1992 	int i;
1993 
1994 	for (i = 0; i < nhg->num_nh; ++i)
1995 		total += nhg->nh_entries[i].weight;
1996 
1997 	for (i = 0; i < nhg->num_nh; ++i) {
1998 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1999 		u32 upper_bound;
2000 
2001 		w += nhge->weight;
2002 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
2003 		atomic_set(&nhge->hthr.upper_bound, upper_bound);
2004 	}
2005 }
2006 
2007 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
2008 				struct nl_info *nlinfo)
2009 {
2010 	struct nh_grp_entry *nhges, *new_nhges;
2011 	struct nexthop *nhp = nhge->nh_parent;
2012 	struct netlink_ext_ack extack;
2013 	struct nexthop *nh = nhge->nh;
2014 	struct nh_group *nhg, *newg;
2015 	int i, j, err;
2016 
2017 	WARN_ON(!nh);
2018 
2019 	nhg = rtnl_dereference(nhp->nh_grp);
2020 	newg = nhg->spare;
2021 
2022 	/* last entry, keep it visible and remove the parent */
2023 	if (nhg->num_nh == 1) {
2024 		remove_nexthop(net, nhp, nlinfo);
2025 		return;
2026 	}
2027 
2028 	newg->has_v4 = false;
2029 	newg->is_multipath = nhg->is_multipath;
2030 	newg->hash_threshold = nhg->hash_threshold;
2031 	newg->resilient = nhg->resilient;
2032 	newg->fdb_nh = nhg->fdb_nh;
2033 	newg->num_nh = nhg->num_nh;
2034 
2035 	/* copy old entries to new except the one getting removed */
2036 	nhges = nhg->nh_entries;
2037 	new_nhges = newg->nh_entries;
2038 	for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2039 		struct nh_info *nhi;
2040 
2041 		/* current nexthop getting removed */
2042 		if (nhg->nh_entries[i].nh == nh) {
2043 			newg->num_nh--;
2044 			continue;
2045 		}
2046 
2047 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2048 		if (nhi->family == AF_INET)
2049 			newg->has_v4 = true;
2050 
2051 		list_del(&nhges[i].nh_list);
2052 		new_nhges[j].stats = nhges[i].stats;
2053 		new_nhges[j].nh_parent = nhges[i].nh_parent;
2054 		new_nhges[j].nh = nhges[i].nh;
2055 		new_nhges[j].weight = nhges[i].weight;
2056 		list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2057 		j++;
2058 	}
2059 
2060 	if (newg->hash_threshold)
2061 		nh_hthr_group_rebalance(newg);
2062 	else if (newg->resilient)
2063 		replace_nexthop_grp_res(nhg, newg);
2064 
2065 	rcu_assign_pointer(nhp->nh_grp, newg);
2066 
2067 	list_del(&nhge->nh_list);
2068 	free_percpu(nhge->stats);
2069 	nexthop_put(nhge->nh);
2070 
2071 	/* Removal of a NH from a resilient group is notified through
2072 	 * bucket notifications.
2073 	 */
2074 	if (newg->hash_threshold) {
2075 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2076 					     &extack);
2077 		if (err)
2078 			pr_err("%s\n", extack._msg);
2079 	}
2080 
2081 	if (nlinfo)
2082 		nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2083 }
2084 
2085 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2086 				       struct nl_info *nlinfo)
2087 {
2088 	struct nh_grp_entry *nhge, *tmp;
2089 
2090 	/* If there is nothing to do, let's avoid the costly call to
2091 	 * synchronize_net()
2092 	 */
2093 	if (list_empty(&nh->grp_list))
2094 		return;
2095 
2096 	list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2097 		remove_nh_grp_entry(net, nhge, nlinfo);
2098 
2099 	/* make sure all see the newly published array before releasing rtnl */
2100 	synchronize_net();
2101 }
2102 
2103 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2104 {
2105 	struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2106 	struct nh_res_table *res_table;
2107 	int i, num_nh = nhg->num_nh;
2108 
2109 	for (i = 0; i < num_nh; ++i) {
2110 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2111 
2112 		if (WARN_ON(!nhge->nh))
2113 			continue;
2114 
2115 		list_del_init(&nhge->nh_list);
2116 	}
2117 
2118 	if (nhg->resilient) {
2119 		res_table = rtnl_dereference(nhg->res_table);
2120 		nh_res_table_cancel_upkeep(res_table);
2121 	}
2122 }
2123 
2124 /* not called for nexthop replace */
2125 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2126 {
2127 	struct fib6_info *f6i;
2128 	bool do_flush = false;
2129 	struct fib_info *fi;
2130 
2131 	list_for_each_entry(fi, &nh->fi_list, nh_list) {
2132 		fi->fib_flags |= RTNH_F_DEAD;
2133 		do_flush = true;
2134 	}
2135 	if (do_flush)
2136 		fib_flush(net);
2137 
2138 	spin_lock_bh(&nh->lock);
2139 
2140 	nh->dead = true;
2141 
2142 	while (!list_empty(&nh->f6i_list)) {
2143 		f6i = list_first_entry(&nh->f6i_list, typeof(*f6i), nh_list);
2144 
2145 		/* __ip6_del_rt does a release, so do a hold here */
2146 		fib6_info_hold(f6i);
2147 
2148 		spin_unlock_bh(&nh->lock);
2149 		ipv6_stub->ip6_del_rt(net, f6i,
2150 				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2151 
2152 		spin_lock_bh(&nh->lock);
2153 	}
2154 
2155 	spin_unlock_bh(&nh->lock);
2156 }
2157 
2158 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2159 			     struct nl_info *nlinfo)
2160 {
2161 	__remove_nexthop_fib(net, nh);
2162 
2163 	if (nh->is_group) {
2164 		remove_nexthop_group(nh, nlinfo);
2165 	} else {
2166 		struct nh_info *nhi;
2167 
2168 		nhi = rtnl_dereference(nh->nh_info);
2169 		if (nhi->fib_nhc.nhc_dev)
2170 			hlist_del(&nhi->dev_hash);
2171 
2172 		remove_nexthop_from_groups(net, nh, nlinfo);
2173 	}
2174 }
2175 
2176 static void remove_nexthop(struct net *net, struct nexthop *nh,
2177 			   struct nl_info *nlinfo)
2178 {
2179 	call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2180 
2181 	/* remove from the tree */
2182 	rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2183 
2184 	if (nlinfo)
2185 		nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2186 
2187 	__remove_nexthop(net, nh, nlinfo);
2188 	nh_base_seq_inc(net);
2189 
2190 	nexthop_put(nh);
2191 }
2192 
2193 /* if any FIB entries reference this nexthop, any dst entries
2194  * need to be regenerated
2195  */
2196 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2197 			      struct nexthop *replaced_nh)
2198 {
2199 	struct fib6_info *f6i;
2200 	struct nh_group *nhg;
2201 	int i;
2202 
2203 	if (!list_empty(&nh->fi_list))
2204 		rt_cache_flush(net);
2205 
2206 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2207 		ipv6_stub->fib6_update_sernum(net, f6i);
2208 
2209 	/* if an IPv6 group was replaced, we have to release all old
2210 	 * dsts to make sure all refcounts are released
2211 	 */
2212 	if (!replaced_nh->is_group)
2213 		return;
2214 
2215 	nhg = rtnl_dereference(replaced_nh->nh_grp);
2216 	for (i = 0; i < nhg->num_nh; i++) {
2217 		struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2218 		struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2219 
2220 		if (nhi->family == AF_INET6)
2221 			ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2222 	}
2223 }
2224 
2225 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2226 			       struct nexthop *new, const struct nh_config *cfg,
2227 			       struct netlink_ext_ack *extack)
2228 {
2229 	struct nh_res_table *tmp_table = NULL;
2230 	struct nh_res_table *new_res_table;
2231 	struct nh_res_table *old_res_table;
2232 	struct nh_group *oldg, *newg;
2233 	int i, err;
2234 
2235 	if (!new->is_group) {
2236 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2237 		return -EINVAL;
2238 	}
2239 
2240 	oldg = rtnl_dereference(old->nh_grp);
2241 	newg = rtnl_dereference(new->nh_grp);
2242 
2243 	if (newg->hash_threshold != oldg->hash_threshold) {
2244 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2245 		return -EINVAL;
2246 	}
2247 
2248 	if (newg->hash_threshold) {
2249 		err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2250 					     extack);
2251 		if (err)
2252 			return err;
2253 	} else if (newg->resilient) {
2254 		new_res_table = rtnl_dereference(newg->res_table);
2255 		old_res_table = rtnl_dereference(oldg->res_table);
2256 
2257 		/* Accept if num_nh_buckets was not given, but if it was
2258 		 * given, demand that the value be correct.
2259 		 */
2260 		if (cfg->nh_grp_res_has_num_buckets &&
2261 		    cfg->nh_grp_res_num_buckets !=
2262 		    old_res_table->num_nh_buckets) {
2263 			NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2264 			return -EINVAL;
2265 		}
2266 
2267 		/* Emit a pre-replace notification so that listeners could veto
2268 		 * a potentially unsupported configuration. Otherwise,
2269 		 * individual bucket replacement notifications would need to be
2270 		 * vetoed, which is something that should only happen if the
2271 		 * bucket is currently active.
2272 		 */
2273 		err = call_nexthop_res_table_notifiers(net, new, extack);
2274 		if (err)
2275 			return err;
2276 
2277 		if (cfg->nh_grp_res_has_idle_timer)
2278 			old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2279 		if (cfg->nh_grp_res_has_unbalanced_timer)
2280 			old_res_table->unbalanced_timer =
2281 				cfg->nh_grp_res_unbalanced_timer;
2282 
2283 		replace_nexthop_grp_res(oldg, newg);
2284 
2285 		tmp_table = new_res_table;
2286 		rcu_assign_pointer(newg->res_table, old_res_table);
2287 		rcu_assign_pointer(newg->spare->res_table, old_res_table);
2288 	}
2289 
2290 	/* update parents - used by nexthop code for cleanup */
2291 	for (i = 0; i < newg->num_nh; i++)
2292 		newg->nh_entries[i].nh_parent = old;
2293 
2294 	rcu_assign_pointer(old->nh_grp, newg);
2295 
2296 	/* Make sure concurrent readers are not using 'oldg' anymore. */
2297 	synchronize_net();
2298 
2299 	if (newg->resilient) {
2300 		rcu_assign_pointer(oldg->res_table, tmp_table);
2301 		rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2302 	}
2303 
2304 	for (i = 0; i < oldg->num_nh; i++)
2305 		oldg->nh_entries[i].nh_parent = new;
2306 
2307 	rcu_assign_pointer(new->nh_grp, oldg);
2308 
2309 	return 0;
2310 }
2311 
2312 static void nh_group_v4_update(struct nh_group *nhg)
2313 {
2314 	struct nh_grp_entry *nhges;
2315 	bool has_v4 = false;
2316 	int i;
2317 
2318 	nhges = nhg->nh_entries;
2319 	for (i = 0; i < nhg->num_nh; i++) {
2320 		struct nh_info *nhi;
2321 
2322 		nhi = rtnl_dereference(nhges[i].nh->nh_info);
2323 		if (nhi->family == AF_INET)
2324 			has_v4 = true;
2325 	}
2326 	nhg->has_v4 = has_v4;
2327 }
2328 
2329 static int replace_nexthop_single_notify_res(struct net *net,
2330 					     struct nh_res_table *res_table,
2331 					     struct nexthop *old,
2332 					     struct nh_info *oldi,
2333 					     struct nh_info *newi,
2334 					     struct netlink_ext_ack *extack)
2335 {
2336 	u32 nhg_id = res_table->nhg_id;
2337 	int err;
2338 	u16 i;
2339 
2340 	for (i = 0; i < res_table->num_nh_buckets; i++) {
2341 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2342 		struct nh_grp_entry *nhge;
2343 
2344 		nhge = rtnl_dereference(bucket->nh_entry);
2345 		if (nhge->nh == old) {
2346 			err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2347 								  i, true,
2348 								  oldi, newi,
2349 								  extack);
2350 			if (err)
2351 				goto err_notify;
2352 		}
2353 	}
2354 
2355 	return 0;
2356 
2357 err_notify:
2358 	while (i-- > 0) {
2359 		struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2360 		struct nh_grp_entry *nhge;
2361 
2362 		nhge = rtnl_dereference(bucket->nh_entry);
2363 		if (nhge->nh == old)
2364 			__call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2365 							    true, newi, oldi,
2366 							    extack);
2367 	}
2368 	return err;
2369 }
2370 
2371 static int replace_nexthop_single_notify(struct net *net,
2372 					 struct nexthop *group_nh,
2373 					 struct nexthop *old,
2374 					 struct nh_info *oldi,
2375 					 struct nh_info *newi,
2376 					 struct netlink_ext_ack *extack)
2377 {
2378 	struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2379 	struct nh_res_table *res_table;
2380 
2381 	if (nhg->hash_threshold) {
2382 		return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2383 					      group_nh, extack);
2384 	} else if (nhg->resilient) {
2385 		res_table = rtnl_dereference(nhg->res_table);
2386 		return replace_nexthop_single_notify_res(net, res_table,
2387 							 old, oldi, newi,
2388 							 extack);
2389 	}
2390 
2391 	return -EINVAL;
2392 }
2393 
2394 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2395 				  struct nexthop *new,
2396 				  struct netlink_ext_ack *extack)
2397 {
2398 	u8 old_protocol, old_nh_flags;
2399 	struct nh_info *oldi, *newi;
2400 	struct nh_grp_entry *nhge;
2401 	int err;
2402 
2403 	if (new->is_group) {
2404 		NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2405 		return -EINVAL;
2406 	}
2407 
2408 	err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2409 	if (err)
2410 		return err;
2411 
2412 	/* Hardware flags were set on 'old' as 'new' is not in the red-black
2413 	 * tree. Therefore, inherit the flags from 'old' to 'new'.
2414 	 */
2415 	new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2416 
2417 	oldi = rtnl_dereference(old->nh_info);
2418 	newi = rtnl_dereference(new->nh_info);
2419 
2420 	newi->nh_parent = old;
2421 	oldi->nh_parent = new;
2422 
2423 	old_protocol = old->protocol;
2424 	old_nh_flags = old->nh_flags;
2425 
2426 	old->protocol = new->protocol;
2427 	old->nh_flags = new->nh_flags;
2428 
2429 	rcu_assign_pointer(old->nh_info, newi);
2430 	rcu_assign_pointer(new->nh_info, oldi);
2431 
2432 	/* Send a replace notification for all the groups using the nexthop. */
2433 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2434 		struct nexthop *nhp = nhge->nh_parent;
2435 
2436 		err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2437 						    extack);
2438 		if (err)
2439 			goto err_notify;
2440 	}
2441 
2442 	/* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2443 	 * update IPv4 indication in all the groups using the nexthop.
2444 	 */
2445 	if (oldi->family == AF_INET && newi->family == AF_INET6) {
2446 		list_for_each_entry(nhge, &old->grp_list, nh_list) {
2447 			struct nexthop *nhp = nhge->nh_parent;
2448 			struct nh_group *nhg;
2449 
2450 			nhg = rtnl_dereference(nhp->nh_grp);
2451 			nh_group_v4_update(nhg);
2452 		}
2453 	}
2454 
2455 	return 0;
2456 
2457 err_notify:
2458 	rcu_assign_pointer(new->nh_info, newi);
2459 	rcu_assign_pointer(old->nh_info, oldi);
2460 	old->nh_flags = old_nh_flags;
2461 	old->protocol = old_protocol;
2462 	oldi->nh_parent = old;
2463 	newi->nh_parent = new;
2464 	list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2465 		struct nexthop *nhp = nhge->nh_parent;
2466 
2467 		replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2468 	}
2469 	call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2470 	return err;
2471 }
2472 
2473 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2474 				     struct nl_info *info)
2475 {
2476 	struct fib6_info *f6i;
2477 
2478 	if (!list_empty(&nh->fi_list)) {
2479 		struct fib_info *fi;
2480 
2481 		/* expectation is a few fib_info per nexthop and then
2482 		 * a lot of routes per fib_info. So mark the fib_info
2483 		 * and then walk the fib tables once
2484 		 */
2485 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2486 			fi->nh_updated = true;
2487 
2488 		fib_info_notify_update(net, info);
2489 
2490 		list_for_each_entry(fi, &nh->fi_list, nh_list)
2491 			fi->nh_updated = false;
2492 	}
2493 
2494 	list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2495 		ipv6_stub->fib6_rt_update(net, f6i, info);
2496 }
2497 
2498 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2499  * linked to this nexthop and for all groups that the nexthop
2500  * is a member of
2501  */
2502 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2503 				   struct nl_info *info)
2504 {
2505 	struct nh_grp_entry *nhge;
2506 
2507 	__nexthop_replace_notify(net, nh, info);
2508 
2509 	list_for_each_entry(nhge, &nh->grp_list, nh_list)
2510 		__nexthop_replace_notify(net, nhge->nh_parent, info);
2511 }
2512 
2513 static int replace_nexthop(struct net *net, struct nexthop *old,
2514 			   struct nexthop *new, const struct nh_config *cfg,
2515 			   struct netlink_ext_ack *extack)
2516 {
2517 	bool new_is_reject = false;
2518 	struct nh_grp_entry *nhge;
2519 	int err;
2520 
2521 	/* check that existing FIB entries are ok with the
2522 	 * new nexthop definition
2523 	 */
2524 	err = fib_check_nh_list(old, new, extack);
2525 	if (err)
2526 		return err;
2527 
2528 	err = fib6_check_nh_list(old, new, extack);
2529 	if (err)
2530 		return err;
2531 
2532 	if (!new->is_group) {
2533 		struct nh_info *nhi = rtnl_dereference(new->nh_info);
2534 
2535 		new_is_reject = nhi->reject_nh;
2536 	}
2537 
2538 	list_for_each_entry(nhge, &old->grp_list, nh_list) {
2539 		/* if new nexthop is a blackhole, any groups using this
2540 		 * nexthop cannot have more than 1 path
2541 		 */
2542 		if (new_is_reject &&
2543 		    nexthop_num_path(nhge->nh_parent) > 1) {
2544 			NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2545 			return -EINVAL;
2546 		}
2547 
2548 		err = fib_check_nh_list(nhge->nh_parent, new, extack);
2549 		if (err)
2550 			return err;
2551 
2552 		err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2553 		if (err)
2554 			return err;
2555 	}
2556 
2557 	if (old->is_group)
2558 		err = replace_nexthop_grp(net, old, new, cfg, extack);
2559 	else
2560 		err = replace_nexthop_single(net, old, new, extack);
2561 
2562 	if (!err) {
2563 		nh_rt_cache_flush(net, old, new);
2564 
2565 		__remove_nexthop(net, new, NULL);
2566 		nexthop_put(new);
2567 	}
2568 
2569 	return err;
2570 }
2571 
2572 /* called with rtnl_lock held */
2573 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2574 			  struct nh_config *cfg, struct netlink_ext_ack *extack)
2575 {
2576 	struct rb_node **pp, *parent = NULL, *next;
2577 	struct rb_root *root = &net->nexthop.rb_root;
2578 	bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2579 	bool create = !!(cfg->nlflags & NLM_F_CREATE);
2580 	u32 new_id = new_nh->id;
2581 	int replace_notify = 0;
2582 	int rc = -EEXIST;
2583 
2584 	pp = &root->rb_node;
2585 	while (1) {
2586 		struct nexthop *nh;
2587 
2588 		next = *pp;
2589 		if (!next)
2590 			break;
2591 
2592 		parent = next;
2593 
2594 		nh = rb_entry(parent, struct nexthop, rb_node);
2595 		if (new_id < nh->id) {
2596 			pp = &next->rb_left;
2597 		} else if (new_id > nh->id) {
2598 			pp = &next->rb_right;
2599 		} else if (replace) {
2600 			rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2601 			if (!rc) {
2602 				new_nh = nh; /* send notification with old nh */
2603 				replace_notify = 1;
2604 			}
2605 			goto out;
2606 		} else {
2607 			/* id already exists and not a replace */
2608 			goto out;
2609 		}
2610 	}
2611 
2612 	if (replace && !create) {
2613 		NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2614 		rc = -ENOENT;
2615 		goto out;
2616 	}
2617 
2618 	if (new_nh->is_group) {
2619 		struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2620 		struct nh_res_table *res_table;
2621 
2622 		if (nhg->resilient) {
2623 			res_table = rtnl_dereference(nhg->res_table);
2624 
2625 			/* Not passing the number of buckets is OK when
2626 			 * replacing, but not when creating a new group.
2627 			 */
2628 			if (!cfg->nh_grp_res_has_num_buckets) {
2629 				NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2630 				rc = -EINVAL;
2631 				goto out;
2632 			}
2633 
2634 			nh_res_group_rebalance(nhg, res_table);
2635 
2636 			/* Do not send bucket notifications, we do full
2637 			 * notification below.
2638 			 */
2639 			nh_res_table_upkeep(res_table, false, false);
2640 		}
2641 	}
2642 
2643 	rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2644 	rb_insert_color(&new_nh->rb_node, root);
2645 
2646 	/* The initial insertion is a full notification for hash-threshold as
2647 	 * well as resilient groups.
2648 	 */
2649 	rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2650 	if (rc)
2651 		rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2652 
2653 out:
2654 	if (!rc) {
2655 		nh_base_seq_inc(net);
2656 		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2657 		if (replace_notify &&
2658 		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2659 			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2660 	}
2661 
2662 	return rc;
2663 }
2664 
2665 /* rtnl */
2666 /* remove all nexthops tied to a device being deleted */
2667 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2668 {
2669 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
2670 	struct net *net = dev_net(dev);
2671 	struct hlist_head *head = &net->nexthop.devhash[hash];
2672 	struct hlist_node *n;
2673 	struct nh_info *nhi;
2674 
2675 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2676 		if (nhi->fib_nhc.nhc_dev != dev)
2677 			continue;
2678 
2679 		if (nhi->reject_nh &&
2680 		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2681 			continue;
2682 
2683 		remove_nexthop(net, nhi->nh_parent, NULL);
2684 	}
2685 }
2686 
2687 /* rtnl; called when net namespace is deleted */
2688 static void flush_all_nexthops(struct net *net)
2689 {
2690 	struct rb_root *root = &net->nexthop.rb_root;
2691 	struct rb_node *node;
2692 	struct nexthop *nh;
2693 
2694 	while ((node = rb_first(root))) {
2695 		nh = rb_entry(node, struct nexthop, rb_node);
2696 		remove_nexthop(net, nh, NULL);
2697 		cond_resched();
2698 	}
2699 }
2700 
2701 static struct nexthop *nexthop_create_group(struct net *net,
2702 					    struct nh_config *cfg)
2703 {
2704 	struct nlattr *grps_attr = cfg->nh_grp;
2705 	struct nexthop_grp *entry = nla_data(grps_attr);
2706 	u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2707 	struct nh_group *nhg;
2708 	struct nexthop *nh;
2709 	int err;
2710 	int i;
2711 
2712 	nh = nexthop_alloc();
2713 	if (!nh)
2714 		return ERR_PTR(-ENOMEM);
2715 
2716 	nh->is_group = 1;
2717 
2718 	nhg = nexthop_grp_alloc(num_nh);
2719 	if (!nhg) {
2720 		kfree(nh);
2721 		return ERR_PTR(-ENOMEM);
2722 	}
2723 
2724 	/* spare group used for removals */
2725 	nhg->spare = nexthop_grp_alloc(num_nh);
2726 	if (!nhg->spare) {
2727 		kfree(nhg);
2728 		kfree(nh);
2729 		return ERR_PTR(-ENOMEM);
2730 	}
2731 	nhg->spare->spare = nhg;
2732 
2733 	for (i = 0; i < nhg->num_nh; ++i) {
2734 		struct nexthop *nhe;
2735 		struct nh_info *nhi;
2736 
2737 		nhe = nexthop_find_by_id(net, entry[i].id);
2738 		if (!nexthop_get(nhe)) {
2739 			err = -ENOENT;
2740 			goto out_no_nh;
2741 		}
2742 
2743 		nhi = rtnl_dereference(nhe->nh_info);
2744 		if (nhi->family == AF_INET)
2745 			nhg->has_v4 = true;
2746 
2747 		nhg->nh_entries[i].stats =
2748 			netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2749 		if (!nhg->nh_entries[i].stats) {
2750 			err = -ENOMEM;
2751 			nexthop_put(nhe);
2752 			goto out_no_nh;
2753 		}
2754 		nhg->nh_entries[i].nh = nhe;
2755 		nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
2756 
2757 		list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2758 		nhg->nh_entries[i].nh_parent = nh;
2759 	}
2760 
2761 	if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2762 		nhg->hash_threshold = 1;
2763 		nhg->is_multipath = true;
2764 	} else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2765 		struct nh_res_table *res_table;
2766 
2767 		res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2768 		if (!res_table) {
2769 			err = -ENOMEM;
2770 			goto out_no_nh;
2771 		}
2772 
2773 		rcu_assign_pointer(nhg->spare->res_table, res_table);
2774 		rcu_assign_pointer(nhg->res_table, res_table);
2775 		nhg->resilient = true;
2776 		nhg->is_multipath = true;
2777 	}
2778 
2779 	WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2780 
2781 	if (nhg->hash_threshold)
2782 		nh_hthr_group_rebalance(nhg);
2783 
2784 	if (cfg->nh_fdb)
2785 		nhg->fdb_nh = 1;
2786 
2787 	if (cfg->nh_hw_stats)
2788 		nhg->hw_stats = true;
2789 
2790 	rcu_assign_pointer(nh->nh_grp, nhg);
2791 
2792 	return nh;
2793 
2794 out_no_nh:
2795 	for (i--; i >= 0; --i) {
2796 		list_del(&nhg->nh_entries[i].nh_list);
2797 		free_percpu(nhg->nh_entries[i].stats);
2798 		nexthop_put(nhg->nh_entries[i].nh);
2799 	}
2800 
2801 	kfree(nhg->spare);
2802 	kfree(nhg);
2803 	kfree(nh);
2804 
2805 	return ERR_PTR(err);
2806 }
2807 
2808 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2809 			  struct nh_info *nhi, struct nh_config *cfg,
2810 			  struct netlink_ext_ack *extack)
2811 {
2812 	struct fib_nh *fib_nh = &nhi->fib_nh;
2813 	struct fib_config fib_cfg = {
2814 		.fc_oif   = cfg->nh_ifindex,
2815 		.fc_gw4   = cfg->gw.ipv4,
2816 		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2817 		.fc_flags = cfg->nh_flags,
2818 		.fc_nlinfo = cfg->nlinfo,
2819 		.fc_encap = cfg->nh_encap,
2820 		.fc_encap_type = cfg->nh_encap_type,
2821 	};
2822 	u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2823 	int err;
2824 
2825 	err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2826 	if (err) {
2827 		fib_nh_release(net, fib_nh);
2828 		goto out;
2829 	}
2830 
2831 	if (nhi->fdb_nh)
2832 		goto out;
2833 
2834 	/* sets nh_dev if successful */
2835 	err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2836 	if (!err) {
2837 		nh->nh_flags = fib_nh->fib_nh_flags;
2838 		fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2839 					  !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2840 	} else {
2841 		fib_nh_release(net, fib_nh);
2842 	}
2843 out:
2844 	return err;
2845 }
2846 
2847 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2848 			  struct nh_info *nhi, struct nh_config *cfg,
2849 			  struct netlink_ext_ack *extack)
2850 {
2851 	struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2852 	struct fib6_config fib6_cfg = {
2853 		.fc_table = l3mdev_fib_table(cfg->dev),
2854 		.fc_ifindex = cfg->nh_ifindex,
2855 		.fc_gateway = cfg->gw.ipv6,
2856 		.fc_flags = cfg->nh_flags,
2857 		.fc_nlinfo = cfg->nlinfo,
2858 		.fc_encap = cfg->nh_encap,
2859 		.fc_encap_type = cfg->nh_encap_type,
2860 		.fc_is_fdb = cfg->nh_fdb,
2861 	};
2862 	int err;
2863 
2864 	if (!ipv6_addr_any(&cfg->gw.ipv6))
2865 		fib6_cfg.fc_flags |= RTF_GATEWAY;
2866 
2867 	/* sets nh_dev if successful */
2868 	err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2869 				      extack);
2870 	if (err) {
2871 		/* IPv6 is not enabled, don't call fib6_nh_release */
2872 		if (err == -EAFNOSUPPORT)
2873 			goto out;
2874 		ipv6_stub->fib6_nh_release(fib6_nh);
2875 	} else {
2876 		nh->nh_flags = fib6_nh->fib_nh_flags;
2877 	}
2878 out:
2879 	return err;
2880 }
2881 
2882 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2883 				      struct netlink_ext_ack *extack)
2884 {
2885 	struct nh_info *nhi;
2886 	struct nexthop *nh;
2887 	int err = 0;
2888 
2889 	nh = nexthop_alloc();
2890 	if (!nh)
2891 		return ERR_PTR(-ENOMEM);
2892 
2893 	nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2894 	if (!nhi) {
2895 		kfree(nh);
2896 		return ERR_PTR(-ENOMEM);
2897 	}
2898 
2899 	nh->nh_flags = cfg->nh_flags;
2900 	nh->net = net;
2901 
2902 	nhi->nh_parent = nh;
2903 	nhi->family = cfg->nh_family;
2904 	nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2905 
2906 	if (cfg->nh_fdb)
2907 		nhi->fdb_nh = 1;
2908 
2909 	if (cfg->nh_blackhole) {
2910 		nhi->reject_nh = 1;
2911 		cfg->nh_ifindex = net->loopback_dev->ifindex;
2912 	}
2913 
2914 	switch (cfg->nh_family) {
2915 	case AF_INET:
2916 		err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2917 		break;
2918 	case AF_INET6:
2919 		err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2920 		break;
2921 	}
2922 
2923 	if (err) {
2924 		kfree(nhi);
2925 		kfree(nh);
2926 		return ERR_PTR(err);
2927 	}
2928 
2929 	/* add the entry to the device based hash */
2930 	if (!nhi->fdb_nh)
2931 		nexthop_devhash_add(net, nhi);
2932 
2933 	rcu_assign_pointer(nh->nh_info, nhi);
2934 
2935 	return nh;
2936 }
2937 
2938 /* called with rtnl lock held */
2939 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2940 				   struct netlink_ext_ack *extack)
2941 {
2942 	struct nexthop *nh;
2943 	int err;
2944 
2945 	if (!cfg->nh_id) {
2946 		cfg->nh_id = nh_find_unused_id(net);
2947 		if (!cfg->nh_id) {
2948 			NL_SET_ERR_MSG(extack, "No unused id");
2949 			return ERR_PTR(-EINVAL);
2950 		}
2951 	}
2952 
2953 	if (cfg->nh_grp)
2954 		nh = nexthop_create_group(net, cfg);
2955 	else
2956 		nh = nexthop_create(net, cfg, extack);
2957 
2958 	if (IS_ERR(nh))
2959 		return nh;
2960 
2961 	refcount_set(&nh->refcnt, 1);
2962 	nh->id = cfg->nh_id;
2963 	nh->protocol = cfg->nh_protocol;
2964 	nh->net = net;
2965 
2966 	err = insert_nexthop(net, nh, cfg, extack);
2967 	if (err) {
2968 		__remove_nexthop(net, nh, NULL);
2969 		nexthop_put(nh);
2970 		nh = ERR_PTR(err);
2971 	}
2972 
2973 	return nh;
2974 }
2975 
2976 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2977 			    unsigned long *timer_p, bool *has_p,
2978 			    struct netlink_ext_ack *extack)
2979 {
2980 	unsigned long timer;
2981 	u32 value;
2982 
2983 	if (!attr) {
2984 		*timer_p = fallback;
2985 		*has_p = false;
2986 		return 0;
2987 	}
2988 
2989 	value = nla_get_u32(attr);
2990 	timer = clock_t_to_jiffies(value);
2991 	if (timer == ~0UL) {
2992 		NL_SET_ERR_MSG(extack, "Timer value too large");
2993 		return -EINVAL;
2994 	}
2995 
2996 	*timer_p = timer;
2997 	*has_p = true;
2998 	return 0;
2999 }
3000 
3001 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
3002 				    struct netlink_ext_ack *extack)
3003 {
3004 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
3005 	int err;
3006 
3007 	if (res) {
3008 		err = nla_parse_nested(tb,
3009 				       ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
3010 				       res, rtm_nh_res_policy_new, extack);
3011 		if (err < 0)
3012 			return err;
3013 	}
3014 
3015 	if (tb[NHA_RES_GROUP_BUCKETS]) {
3016 		cfg->nh_grp_res_num_buckets =
3017 			nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
3018 		cfg->nh_grp_res_has_num_buckets = true;
3019 		if (!cfg->nh_grp_res_num_buckets) {
3020 			NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
3021 			return -EINVAL;
3022 		}
3023 	}
3024 
3025 	err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
3026 			       NH_RES_DEFAULT_IDLE_TIMER,
3027 			       &cfg->nh_grp_res_idle_timer,
3028 			       &cfg->nh_grp_res_has_idle_timer,
3029 			       extack);
3030 	if (err)
3031 		return err;
3032 
3033 	return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3034 				NH_RES_DEFAULT_UNBALANCED_TIMER,
3035 				&cfg->nh_grp_res_unbalanced_timer,
3036 				&cfg->nh_grp_res_has_unbalanced_timer,
3037 				extack);
3038 }
3039 
3040 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3041 			    struct nlmsghdr *nlh, struct nlattr **tb,
3042 			    struct nh_config *cfg,
3043 			    struct netlink_ext_ack *extack)
3044 {
3045 	struct nhmsg *nhm = nlmsg_data(nlh);
3046 	int err;
3047 
3048 	err = -EINVAL;
3049 	if (nhm->resvd || nhm->nh_scope) {
3050 		NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3051 		goto out;
3052 	}
3053 	if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3054 		NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3055 		goto out;
3056 	}
3057 
3058 	switch (nhm->nh_family) {
3059 	case AF_INET:
3060 	case AF_INET6:
3061 		break;
3062 	case AF_UNSPEC:
3063 		if (tb[NHA_GROUP])
3064 			break;
3065 		fallthrough;
3066 	default:
3067 		NL_SET_ERR_MSG(extack, "Invalid address family");
3068 		goto out;
3069 	}
3070 
3071 	memset(cfg, 0, sizeof(*cfg));
3072 	cfg->nlflags = nlh->nlmsg_flags;
3073 	cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3074 	cfg->nlinfo.nlh = nlh;
3075 	cfg->nlinfo.nl_net = net;
3076 
3077 	cfg->nh_family = nhm->nh_family;
3078 	cfg->nh_protocol = nhm->nh_protocol;
3079 	cfg->nh_flags = nhm->nh_flags;
3080 
3081 	if (tb[NHA_ID])
3082 		cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3083 
3084 	if (tb[NHA_FDB]) {
3085 		if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3086 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
3087 			NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3088 			goto out;
3089 		}
3090 		if (nhm->nh_flags) {
3091 			NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3092 			goto out;
3093 		}
3094 		cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3095 	}
3096 
3097 	if (tb[NHA_GROUP]) {
3098 		if (nhm->nh_family != AF_UNSPEC) {
3099 			NL_SET_ERR_MSG(extack, "Invalid family for group");
3100 			goto out;
3101 		}
3102 		cfg->nh_grp = tb[NHA_GROUP];
3103 
3104 		cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3105 		if (tb[NHA_GROUP_TYPE])
3106 			cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3107 
3108 		if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3109 			NL_SET_ERR_MSG(extack, "Invalid group type");
3110 			goto out;
3111 		}
3112 
3113 		err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new),
3114 					  cfg->nh_grp_type, extack);
3115 		if (err)
3116 			goto out;
3117 
3118 		if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3119 			err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3120 						       cfg, extack);
3121 
3122 		if (tb[NHA_HW_STATS_ENABLE])
3123 			cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3124 
3125 		/* no other attributes should be set */
3126 		goto out;
3127 	}
3128 
3129 	if (tb[NHA_BLACKHOLE]) {
3130 		if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3131 		    tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3132 			NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3133 			goto out;
3134 		}
3135 
3136 		cfg->nh_blackhole = 1;
3137 		err = 0;
3138 		goto out;
3139 	}
3140 
3141 	if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3142 		NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3143 		goto out;
3144 	}
3145 
3146 	err = -EINVAL;
3147 	if (tb[NHA_GATEWAY]) {
3148 		struct nlattr *gwa = tb[NHA_GATEWAY];
3149 
3150 		switch (cfg->nh_family) {
3151 		case AF_INET:
3152 			if (nla_len(gwa) != sizeof(u32)) {
3153 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3154 				goto out;
3155 			}
3156 			cfg->gw.ipv4 = nla_get_be32(gwa);
3157 			break;
3158 		case AF_INET6:
3159 			if (nla_len(gwa) != sizeof(struct in6_addr)) {
3160 				NL_SET_ERR_MSG(extack, "Invalid gateway");
3161 				goto out;
3162 			}
3163 			cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3164 			break;
3165 		default:
3166 			NL_SET_ERR_MSG(extack,
3167 				       "Unknown address family for gateway");
3168 			goto out;
3169 		}
3170 	} else {
3171 		/* device only nexthop (no gateway) */
3172 		if (cfg->nh_flags & RTNH_F_ONLINK) {
3173 			NL_SET_ERR_MSG(extack,
3174 				       "ONLINK flag can not be set for nexthop without a gateway");
3175 			goto out;
3176 		}
3177 	}
3178 
3179 	if (tb[NHA_ENCAP]) {
3180 		cfg->nh_encap = tb[NHA_ENCAP];
3181 
3182 		if (!tb[NHA_ENCAP_TYPE]) {
3183 			NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3184 			goto out;
3185 		}
3186 
3187 		cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3188 		err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3189 		if (err < 0)
3190 			goto out;
3191 
3192 	} else if (tb[NHA_ENCAP_TYPE]) {
3193 		NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3194 		goto out;
3195 	}
3196 
3197 	if (tb[NHA_HW_STATS_ENABLE]) {
3198 		NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3199 		goto out;
3200 	}
3201 
3202 	err = 0;
3203 out:
3204 	return err;
3205 }
3206 
3207 static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb,
3208 				 struct nh_config *cfg,
3209 				 struct netlink_ext_ack *extack)
3210 {
3211 	if (tb[NHA_GROUP])
3212 		return nh_check_attr_group_rtnl(net, tb, extack);
3213 
3214 	if (tb[NHA_OIF]) {
3215 		cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3216 		if (cfg->nh_ifindex)
3217 			cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3218 
3219 		if (!cfg->dev) {
3220 			NL_SET_ERR_MSG(extack, "Invalid device index");
3221 			return -EINVAL;
3222 		}
3223 
3224 		if (!(cfg->dev->flags & IFF_UP)) {
3225 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3226 			return -ENETDOWN;
3227 		}
3228 
3229 		if (!netif_carrier_ok(cfg->dev)) {
3230 			NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3231 			return -ENETDOWN;
3232 		}
3233 	}
3234 
3235 	return 0;
3236 }
3237 
3238 /* rtnl */
3239 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3240 			   struct netlink_ext_ack *extack)
3241 {
3242 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3243 	struct net *net = sock_net(skb->sk);
3244 	struct nh_config cfg;
3245 	struct nexthop *nh;
3246 	int err;
3247 
3248 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3249 			  ARRAY_SIZE(rtm_nh_policy_new) - 1,
3250 			  rtm_nh_policy_new, extack);
3251 	if (err < 0)
3252 		goto out;
3253 
3254 	err = rtm_to_nh_config(net, skb, nlh, tb, &cfg, extack);
3255 	if (err)
3256 		goto out;
3257 
3258 	if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) {
3259 		NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
3260 		err = -EINVAL;
3261 		goto out;
3262 	}
3263 
3264 	rtnl_net_lock(net);
3265 
3266 	err = rtm_to_nh_config_rtnl(net, tb, &cfg, extack);
3267 	if (err)
3268 		goto unlock;
3269 
3270 	nh = nexthop_add(net, &cfg, extack);
3271 	if (IS_ERR(nh))
3272 		err = PTR_ERR(nh);
3273 
3274 unlock:
3275 	rtnl_net_unlock(net);
3276 out:
3277 	return err;
3278 }
3279 
3280 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3281 				struct nlattr **tb, u32 *id, u32 *op_flags,
3282 				struct netlink_ext_ack *extack)
3283 {
3284 	struct nhmsg *nhm = nlmsg_data(nlh);
3285 
3286 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3287 		NL_SET_ERR_MSG(extack, "Invalid values in header");
3288 		return -EINVAL;
3289 	}
3290 
3291 	if (!tb[NHA_ID]) {
3292 		NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3293 		return -EINVAL;
3294 	}
3295 
3296 	*id = nla_get_u32(tb[NHA_ID]);
3297 	if (!(*id)) {
3298 		NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3299 		return -EINVAL;
3300 	}
3301 
3302 	if (op_flags)
3303 		*op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3304 
3305 	return 0;
3306 }
3307 
3308 /* rtnl */
3309 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3310 			   struct netlink_ext_ack *extack)
3311 {
3312 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3313 	struct net *net = sock_net(skb->sk);
3314 	struct nl_info nlinfo = {
3315 		.nlh = nlh,
3316 		.nl_net = net,
3317 		.portid = NETLINK_CB(skb).portid,
3318 	};
3319 	struct nexthop *nh;
3320 	int err;
3321 	u32 id;
3322 
3323 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3324 			  ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3325 			  extack);
3326 	if (err < 0)
3327 		return err;
3328 
3329 	err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3330 	if (err)
3331 		return err;
3332 
3333 	rtnl_net_lock(net);
3334 
3335 	nh = nexthop_find_by_id(net, id);
3336 	if (nh)
3337 		remove_nexthop(net, nh, &nlinfo);
3338 	else
3339 		err = -ENOENT;
3340 
3341 	rtnl_net_unlock(net);
3342 
3343 	return err;
3344 }
3345 
3346 /* rtnl */
3347 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3348 			   struct netlink_ext_ack *extack)
3349 {
3350 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3351 	struct net *net = sock_net(in_skb->sk);
3352 	struct sk_buff *skb = NULL;
3353 	struct nexthop *nh;
3354 	u32 op_flags;
3355 	int err;
3356 	u32 id;
3357 
3358 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3359 			  ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3360 			  extack);
3361 	if (err < 0)
3362 		return err;
3363 
3364 	err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3365 	if (err)
3366 		return err;
3367 
3368 	err = -ENOBUFS;
3369 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3370 	if (!skb)
3371 		goto out;
3372 
3373 	err = -ENOENT;
3374 	nh = nexthop_find_by_id(net, id);
3375 	if (!nh)
3376 		goto errout_free;
3377 
3378 	err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3379 			   nlh->nlmsg_seq, 0, op_flags);
3380 	if (err < 0) {
3381 		WARN_ON(err == -EMSGSIZE);
3382 		goto errout_free;
3383 	}
3384 
3385 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3386 out:
3387 	return err;
3388 errout_free:
3389 	kfree_skb(skb);
3390 	goto out;
3391 }
3392 
3393 struct nh_dump_filter {
3394 	u32 nh_id;
3395 	int dev_idx;
3396 	int master_idx;
3397 	bool group_filter;
3398 	bool fdb_filter;
3399 	u32 res_bucket_nh_id;
3400 	u32 op_flags;
3401 };
3402 
3403 static bool nh_dump_filtered(struct nexthop *nh,
3404 			     struct nh_dump_filter *filter, u8 family)
3405 {
3406 	const struct net_device *dev;
3407 	const struct nh_info *nhi;
3408 
3409 	if (filter->group_filter && !nh->is_group)
3410 		return true;
3411 
3412 	if (!filter->dev_idx && !filter->master_idx && !family)
3413 		return false;
3414 
3415 	if (nh->is_group)
3416 		return true;
3417 
3418 	nhi = rtnl_dereference(nh->nh_info);
3419 	if (family && nhi->family != family)
3420 		return true;
3421 
3422 	dev = nhi->fib_nhc.nhc_dev;
3423 	if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3424 		return true;
3425 
3426 	if (filter->master_idx) {
3427 		struct net_device *master;
3428 
3429 		if (!dev)
3430 			return true;
3431 
3432 		master = netdev_master_upper_dev_get((struct net_device *)dev);
3433 		if (!master || master->ifindex != filter->master_idx)
3434 			return true;
3435 	}
3436 
3437 	return false;
3438 }
3439 
3440 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3441 			       struct nh_dump_filter *filter,
3442 			       struct netlink_ext_ack *extack)
3443 {
3444 	struct nhmsg *nhm;
3445 	u32 idx;
3446 
3447 	if (tb[NHA_OIF]) {
3448 		idx = nla_get_u32(tb[NHA_OIF]);
3449 		if (idx > INT_MAX) {
3450 			NL_SET_ERR_MSG(extack, "Invalid device index");
3451 			return -EINVAL;
3452 		}
3453 		filter->dev_idx = idx;
3454 	}
3455 	if (tb[NHA_MASTER]) {
3456 		idx = nla_get_u32(tb[NHA_MASTER]);
3457 		if (idx > INT_MAX) {
3458 			NL_SET_ERR_MSG(extack, "Invalid master device index");
3459 			return -EINVAL;
3460 		}
3461 		filter->master_idx = idx;
3462 	}
3463 	filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3464 	filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3465 
3466 	nhm = nlmsg_data(nlh);
3467 	if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3468 		NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3469 		return -EINVAL;
3470 	}
3471 
3472 	return 0;
3473 }
3474 
3475 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3476 			     struct nh_dump_filter *filter,
3477 			     struct netlink_callback *cb)
3478 {
3479 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3480 	int err;
3481 
3482 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3483 			  ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3484 			  rtm_nh_policy_dump, cb->extack);
3485 	if (err < 0)
3486 		return err;
3487 
3488 	filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3489 
3490 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3491 }
3492 
3493 struct rtm_dump_nh_ctx {
3494 	u32 idx;
3495 };
3496 
3497 static struct rtm_dump_nh_ctx *
3498 rtm_dump_nh_ctx(struct netlink_callback *cb)
3499 {
3500 	struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3501 
3502 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3503 	return ctx;
3504 }
3505 
3506 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3507 				  struct netlink_callback *cb,
3508 				  struct rb_root *root,
3509 				  struct rtm_dump_nh_ctx *ctx,
3510 				  int (*nh_cb)(struct sk_buff *skb,
3511 					       struct netlink_callback *cb,
3512 					       struct nexthop *nh, void *data),
3513 				  void *data)
3514 {
3515 	struct rb_node *node;
3516 	int s_idx;
3517 	int err;
3518 
3519 	s_idx = ctx->idx;
3520 
3521 	/* If this is not the first invocation, ctx->idx will contain the id of
3522 	 * the last nexthop we processed. Instead of starting from the very
3523 	 * first element of the red/black tree again and linearly skipping the
3524 	 * (potentially large) set of nodes with an id smaller than s_idx, walk
3525 	 * the tree and find the left-most node whose id is >= s_idx.  This
3526 	 * provides an efficient O(log n) starting point for the dump
3527 	 * continuation.
3528 	 */
3529 	if (s_idx != 0) {
3530 		struct rb_node *tmp = root->rb_node;
3531 
3532 		node = NULL;
3533 		while (tmp) {
3534 			struct nexthop *nh;
3535 
3536 			nh = rb_entry(tmp, struct nexthop, rb_node);
3537 			if (nh->id < s_idx) {
3538 				tmp = tmp->rb_right;
3539 			} else {
3540 				/* Track current candidate and keep looking on
3541 				 * the left side to find the left-most
3542 				 * (smallest id) that is still >= s_idx.
3543 				 */
3544 				node = tmp;
3545 				tmp = tmp->rb_left;
3546 			}
3547 		}
3548 	} else {
3549 		node = rb_first(root);
3550 	}
3551 
3552 	for (; node; node = rb_next(node)) {
3553 		struct nexthop *nh;
3554 
3555 		nh = rb_entry(node, struct nexthop, rb_node);
3556 
3557 		ctx->idx = nh->id;
3558 		err = nh_cb(skb, cb, nh, data);
3559 		if (err)
3560 			return err;
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3567 			       struct nexthop *nh, void *data)
3568 {
3569 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3570 	struct nh_dump_filter *filter = data;
3571 
3572 	if (nh_dump_filtered(nh, filter, nhm->nh_family))
3573 		return 0;
3574 
3575 	return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3576 			    NETLINK_CB(cb->skb).portid,
3577 			    cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3578 }
3579 
3580 /* rtnl */
3581 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3582 {
3583 	struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3584 	struct net *net = sock_net(skb->sk);
3585 	struct rb_root *root = &net->nexthop.rb_root;
3586 	struct nh_dump_filter filter = {};
3587 	int err;
3588 
3589 	err = nh_valid_dump_req(cb->nlh, &filter, cb);
3590 	if (err < 0)
3591 		return err;
3592 
3593 	err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3594 				     &rtm_dump_nexthop_cb, &filter);
3595 
3596 	cb->seq = net->nexthop.seq;
3597 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3598 	return err;
3599 }
3600 
3601 static struct nexthop *
3602 nexthop_find_group_resilient(struct net *net, u32 id,
3603 			     struct netlink_ext_ack *extack)
3604 {
3605 	struct nh_group *nhg;
3606 	struct nexthop *nh;
3607 
3608 	nh = nexthop_find_by_id(net, id);
3609 	if (!nh)
3610 		return ERR_PTR(-ENOENT);
3611 
3612 	if (!nh->is_group) {
3613 		NL_SET_ERR_MSG(extack, "Not a nexthop group");
3614 		return ERR_PTR(-EINVAL);
3615 	}
3616 
3617 	nhg = rtnl_dereference(nh->nh_grp);
3618 	if (!nhg->resilient) {
3619 		NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3620 		return ERR_PTR(-EINVAL);
3621 	}
3622 
3623 	return nh;
3624 }
3625 
3626 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3627 			      struct netlink_ext_ack *extack)
3628 {
3629 	u32 idx;
3630 
3631 	if (attr) {
3632 		idx = nla_get_u32(attr);
3633 		if (!idx) {
3634 			NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3635 			return -EINVAL;
3636 		}
3637 		*nh_id_p = idx;
3638 	} else {
3639 		*nh_id_p = 0;
3640 	}
3641 
3642 	return 0;
3643 }
3644 
3645 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3646 				    struct nh_dump_filter *filter,
3647 				    struct netlink_callback *cb)
3648 {
3649 	struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3650 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3651 	int err;
3652 
3653 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3654 			  ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3655 			  rtm_nh_policy_dump_bucket, NULL);
3656 	if (err < 0)
3657 		return err;
3658 
3659 	err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3660 	if (err)
3661 		return err;
3662 
3663 	if (tb[NHA_RES_BUCKET]) {
3664 		size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3665 
3666 		err = nla_parse_nested(res_tb, max,
3667 				       tb[NHA_RES_BUCKET],
3668 				       rtm_nh_res_bucket_policy_dump,
3669 				       cb->extack);
3670 		if (err < 0)
3671 			return err;
3672 
3673 		err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3674 					 &filter->res_bucket_nh_id,
3675 					 cb->extack);
3676 		if (err)
3677 			return err;
3678 	}
3679 
3680 	return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3681 }
3682 
3683 struct rtm_dump_res_bucket_ctx {
3684 	struct rtm_dump_nh_ctx nh;
3685 	u16 bucket_index;
3686 };
3687 
3688 static struct rtm_dump_res_bucket_ctx *
3689 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3690 {
3691 	struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3692 
3693 	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3694 	return ctx;
3695 }
3696 
3697 struct rtm_dump_nexthop_bucket_data {
3698 	struct rtm_dump_res_bucket_ctx *ctx;
3699 	struct nh_dump_filter filter;
3700 };
3701 
3702 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3703 				      struct netlink_callback *cb,
3704 				      struct nexthop *nh,
3705 				      struct rtm_dump_nexthop_bucket_data *dd)
3706 {
3707 	u32 portid = NETLINK_CB(cb->skb).portid;
3708 	struct nhmsg *nhm = nlmsg_data(cb->nlh);
3709 	struct nh_res_table *res_table;
3710 	struct nh_group *nhg;
3711 	u16 bucket_index;
3712 	int err;
3713 
3714 	nhg = rtnl_dereference(nh->nh_grp);
3715 	res_table = rtnl_dereference(nhg->res_table);
3716 	for (bucket_index = dd->ctx->bucket_index;
3717 	     bucket_index < res_table->num_nh_buckets;
3718 	     bucket_index++) {
3719 		struct nh_res_bucket *bucket;
3720 		struct nh_grp_entry *nhge;
3721 
3722 		bucket = &res_table->nh_buckets[bucket_index];
3723 		nhge = rtnl_dereference(bucket->nh_entry);
3724 		if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3725 			continue;
3726 
3727 		if (dd->filter.res_bucket_nh_id &&
3728 		    dd->filter.res_bucket_nh_id != nhge->nh->id)
3729 			continue;
3730 
3731 		dd->ctx->bucket_index = bucket_index;
3732 		err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3733 					 RTM_NEWNEXTHOPBUCKET, portid,
3734 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3735 					 cb->extack);
3736 		if (err)
3737 			return err;
3738 	}
3739 
3740 	dd->ctx->bucket_index = 0;
3741 
3742 	return 0;
3743 }
3744 
3745 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3746 				      struct netlink_callback *cb,
3747 				      struct nexthop *nh, void *data)
3748 {
3749 	struct rtm_dump_nexthop_bucket_data *dd = data;
3750 	struct nh_group *nhg;
3751 
3752 	if (!nh->is_group)
3753 		return 0;
3754 
3755 	nhg = rtnl_dereference(nh->nh_grp);
3756 	if (!nhg->resilient)
3757 		return 0;
3758 
3759 	return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3760 }
3761 
3762 /* rtnl */
3763 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3764 				   struct netlink_callback *cb)
3765 {
3766 	struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3767 	struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3768 	struct net *net = sock_net(skb->sk);
3769 	struct nexthop *nh;
3770 	int err;
3771 
3772 	err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3773 	if (err)
3774 		return err;
3775 
3776 	if (dd.filter.nh_id) {
3777 		nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3778 						  cb->extack);
3779 		if (IS_ERR(nh))
3780 			return PTR_ERR(nh);
3781 		err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3782 	} else {
3783 		struct rb_root *root = &net->nexthop.rb_root;
3784 
3785 		err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3786 					     &rtm_dump_nexthop_bucket_cb, &dd);
3787 	}
3788 
3789 	cb->seq = net->nexthop.seq;
3790 	nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3791 	return err;
3792 }
3793 
3794 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3795 					      u16 *bucket_index,
3796 					      struct netlink_ext_ack *extack)
3797 {
3798 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3799 	int err;
3800 
3801 	err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3802 			       res, rtm_nh_res_bucket_policy_get, extack);
3803 	if (err < 0)
3804 		return err;
3805 
3806 	if (!tb[NHA_RES_BUCKET_INDEX]) {
3807 		NL_SET_ERR_MSG(extack, "Bucket index is missing");
3808 		return -EINVAL;
3809 	}
3810 
3811 	*bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3812 	return 0;
3813 }
3814 
3815 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3816 				   u32 *id, u16 *bucket_index,
3817 				   struct netlink_ext_ack *extack)
3818 {
3819 	struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3820 	int err;
3821 
3822 	err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3823 			  ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3824 			  rtm_nh_policy_get_bucket, extack);
3825 	if (err < 0)
3826 		return err;
3827 
3828 	err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3829 	if (err)
3830 		return err;
3831 
3832 	if (!tb[NHA_RES_BUCKET]) {
3833 		NL_SET_ERR_MSG(extack, "Bucket information is missing");
3834 		return -EINVAL;
3835 	}
3836 
3837 	err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3838 						 bucket_index, extack);
3839 	if (err)
3840 		return err;
3841 
3842 	return 0;
3843 }
3844 
3845 /* rtnl */
3846 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3847 				  struct netlink_ext_ack *extack)
3848 {
3849 	struct net *net = sock_net(in_skb->sk);
3850 	struct nh_res_table *res_table;
3851 	struct sk_buff *skb = NULL;
3852 	struct nh_group *nhg;
3853 	struct nexthop *nh;
3854 	u16 bucket_index;
3855 	int err;
3856 	u32 id;
3857 
3858 	err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3859 	if (err)
3860 		return err;
3861 
3862 	nh = nexthop_find_group_resilient(net, id, extack);
3863 	if (IS_ERR(nh))
3864 		return PTR_ERR(nh);
3865 
3866 	nhg = rtnl_dereference(nh->nh_grp);
3867 	res_table = rtnl_dereference(nhg->res_table);
3868 	if (bucket_index >= res_table->num_nh_buckets) {
3869 		NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3870 		return -ENOENT;
3871 	}
3872 
3873 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3874 	if (!skb)
3875 		return -ENOBUFS;
3876 
3877 	err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3878 				 bucket_index, RTM_NEWNEXTHOPBUCKET,
3879 				 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3880 				 0, extack);
3881 	if (err < 0) {
3882 		WARN_ON(err == -EMSGSIZE);
3883 		goto errout_free;
3884 	}
3885 
3886 	return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3887 
3888 errout_free:
3889 	kfree_skb(skb);
3890 	return err;
3891 }
3892 
3893 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3894 {
3895 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
3896 	struct net *net = dev_net(dev);
3897 	struct hlist_head *head = &net->nexthop.devhash[hash];
3898 	struct hlist_node *n;
3899 	struct nh_info *nhi;
3900 
3901 	hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3902 		if (nhi->fib_nhc.nhc_dev == dev) {
3903 			if (nhi->family == AF_INET)
3904 				fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3905 						   orig_mtu);
3906 		}
3907 	}
3908 }
3909 
3910 /* rtnl */
3911 static int nh_netdev_event(struct notifier_block *this,
3912 			   unsigned long event, void *ptr)
3913 {
3914 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3915 	struct netdev_notifier_info_ext *info_ext;
3916 
3917 	switch (event) {
3918 	case NETDEV_DOWN:
3919 	case NETDEV_UNREGISTER:
3920 		nexthop_flush_dev(dev, event);
3921 		break;
3922 	case NETDEV_CHANGE:
3923 		if (!(netif_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3924 			nexthop_flush_dev(dev, event);
3925 		break;
3926 	case NETDEV_CHANGEMTU:
3927 		info_ext = ptr;
3928 		nexthop_sync_mtu(dev, info_ext->ext.mtu);
3929 		rt_cache_flush(dev_net(dev));
3930 		break;
3931 	}
3932 	return NOTIFY_DONE;
3933 }
3934 
3935 static struct notifier_block nh_netdev_notifier = {
3936 	.notifier_call = nh_netdev_event,
3937 };
3938 
3939 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3940 			 enum nexthop_event_type event_type,
3941 			 struct netlink_ext_ack *extack)
3942 {
3943 	struct rb_root *root = &net->nexthop.rb_root;
3944 	struct rb_node *node;
3945 	int err = 0;
3946 
3947 	for (node = rb_first(root); node; node = rb_next(node)) {
3948 		struct nexthop *nh;
3949 
3950 		nh = rb_entry(node, struct nexthop, rb_node);
3951 		err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3952 		if (err)
3953 			break;
3954 	}
3955 
3956 	return err;
3957 }
3958 
3959 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3960 			      struct netlink_ext_ack *extack)
3961 {
3962 	int err;
3963 
3964 	rtnl_lock();
3965 	err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3966 	if (err)
3967 		goto unlock;
3968 	err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3969 					       nb);
3970 unlock:
3971 	rtnl_unlock();
3972 	return err;
3973 }
3974 EXPORT_SYMBOL(register_nexthop_notifier);
3975 
3976 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3977 {
3978 	int err;
3979 
3980 	err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3981 						 nb);
3982 	if (!err)
3983 		nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3984 	return err;
3985 }
3986 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3987 
3988 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3989 {
3990 	int err;
3991 
3992 	rtnl_lock();
3993 	err = __unregister_nexthop_notifier(net, nb);
3994 	rtnl_unlock();
3995 	return err;
3996 }
3997 EXPORT_SYMBOL(unregister_nexthop_notifier);
3998 
3999 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
4000 {
4001 	struct nexthop *nexthop;
4002 
4003 	rcu_read_lock();
4004 
4005 	nexthop = nexthop_find_by_id(net, id);
4006 	if (!nexthop)
4007 		goto out;
4008 
4009 	nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4010 	if (offload)
4011 		nexthop->nh_flags |= RTNH_F_OFFLOAD;
4012 	if (trap)
4013 		nexthop->nh_flags |= RTNH_F_TRAP;
4014 
4015 out:
4016 	rcu_read_unlock();
4017 }
4018 EXPORT_SYMBOL(nexthop_set_hw_flags);
4019 
4020 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
4021 				 bool offload, bool trap)
4022 {
4023 	struct nh_res_table *res_table;
4024 	struct nh_res_bucket *bucket;
4025 	struct nexthop *nexthop;
4026 	struct nh_group *nhg;
4027 
4028 	rcu_read_lock();
4029 
4030 	nexthop = nexthop_find_by_id(net, id);
4031 	if (!nexthop || !nexthop->is_group)
4032 		goto out;
4033 
4034 	nhg = rcu_dereference(nexthop->nh_grp);
4035 	if (!nhg->resilient)
4036 		goto out;
4037 
4038 	if (bucket_index >= nhg->res_table->num_nh_buckets)
4039 		goto out;
4040 
4041 	res_table = rcu_dereference(nhg->res_table);
4042 	bucket = &res_table->nh_buckets[bucket_index];
4043 	bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4044 	if (offload)
4045 		bucket->nh_flags |= RTNH_F_OFFLOAD;
4046 	if (trap)
4047 		bucket->nh_flags |= RTNH_F_TRAP;
4048 
4049 out:
4050 	rcu_read_unlock();
4051 }
4052 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
4053 
4054 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
4055 				     unsigned long *activity)
4056 {
4057 	struct nh_res_table *res_table;
4058 	struct nexthop *nexthop;
4059 	struct nh_group *nhg;
4060 	u16 i;
4061 
4062 	rcu_read_lock();
4063 
4064 	nexthop = nexthop_find_by_id(net, id);
4065 	if (!nexthop || !nexthop->is_group)
4066 		goto out;
4067 
4068 	nhg = rcu_dereference(nexthop->nh_grp);
4069 	if (!nhg->resilient)
4070 		goto out;
4071 
4072 	/* Instead of silently ignoring some buckets, demand that the sizes
4073 	 * be the same.
4074 	 */
4075 	res_table = rcu_dereference(nhg->res_table);
4076 	if (num_buckets != res_table->num_nh_buckets)
4077 		goto out;
4078 
4079 	for (i = 0; i < num_buckets; i++) {
4080 		if (test_bit(i, activity))
4081 			nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
4082 	}
4083 
4084 out:
4085 	rcu_read_unlock();
4086 }
4087 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4088 
4089 static void __net_exit nexthop_net_exit_rtnl(struct net *net,
4090 					     struct list_head *dev_to_kill)
4091 {
4092 	ASSERT_RTNL_NET(net);
4093 	flush_all_nexthops(net);
4094 }
4095 
4096 static void __net_exit nexthop_net_exit(struct net *net)
4097 {
4098 	kfree(net->nexthop.devhash);
4099 	net->nexthop.devhash = NULL;
4100 }
4101 
4102 static int __net_init nexthop_net_init(struct net *net)
4103 {
4104 	size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4105 
4106 	net->nexthop.rb_root = RB_ROOT;
4107 	net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4108 	if (!net->nexthop.devhash)
4109 		return -ENOMEM;
4110 	BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4111 
4112 	return 0;
4113 }
4114 
4115 static struct pernet_operations nexthop_net_ops = {
4116 	.init = nexthop_net_init,
4117 	.exit = nexthop_net_exit,
4118 	.exit_rtnl = nexthop_net_exit_rtnl,
4119 };
4120 
4121 static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {
4122 	{.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop,
4123 	 .flags = RTNL_FLAG_DOIT_PERNET},
4124 	{.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop,
4125 	 .flags = RTNL_FLAG_DOIT_PERNET},
4126 	{.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop,
4127 	 .dumpit = rtm_dump_nexthop},
4128 	{.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket,
4129 	 .dumpit = rtm_dump_nexthop_bucket},
4130 	{.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP,
4131 	 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4132 	{.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP,
4133 	 .dumpit = rtm_dump_nexthop},
4134 	{.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP,
4135 	 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4136 	{.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP,
4137 	 .dumpit = rtm_dump_nexthop},
4138 };
4139 
4140 static int __init nexthop_init(void)
4141 {
4142 	register_pernet_subsys(&nexthop_net_ops);
4143 
4144 	register_netdevice_notifier(&nh_netdev_notifier);
4145 
4146 	rtnl_register_many(nexthop_rtnl_msg_handlers);
4147 
4148 	return 0;
4149 }
4150 subsys_initcall(nexthop_init);
4151