1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3 *
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6 */
7
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19
20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
22
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
25
26 #define NH_DEV_HASHBITS 8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
30 NHA_OP_FLAG_DUMP_HW_STATS)
31
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 [NHA_ID] = { .type = NLA_U32 },
34 [NHA_GROUP] = { .type = NLA_BINARY },
35 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
36 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
37 [NHA_OIF] = { .type = NLA_U32 },
38 [NHA_GATEWAY] = { .type = NLA_BINARY },
39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
40 [NHA_ENCAP] = { .type = NLA_NESTED },
41 [NHA_FDB] = { .type = NLA_FLAG },
42 [NHA_RES_GROUP] = { .type = NLA_NESTED },
43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
44 };
45
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 [NHA_ID] = { .type = NLA_U32 },
48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
49 NHA_OP_FLAGS_DUMP_ALL),
50 };
51
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 [NHA_ID] = { .type = NLA_U32 },
54 };
55
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 [NHA_OIF] = { .type = NLA_U32 },
58 [NHA_GROUPS] = { .type = NLA_FLAG },
59 [NHA_MASTER] = { .type = NLA_U32 },
60 [NHA_FDB] = { .type = NLA_FLAG },
61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
62 NHA_OP_FLAGS_DUMP_ALL),
63 };
64
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
69 };
70
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 [NHA_ID] = { .type = NLA_U32 },
73 [NHA_OIF] = { .type = NLA_U32 },
74 [NHA_MASTER] = { .type = NLA_U32 },
75 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
76 };
77
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
80 };
81
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 [NHA_ID] = { .type = NLA_U32 },
84 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
85 };
86
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
89 };
90
nexthop_notifiers_is_empty(struct net * net)91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 return !net->nexthop.notifier_chain.head;
94 }
95
96 static void
__nh_notifier_single_info_init(struct nh_notifier_single_info * nh_info,const struct nh_info * nhi)97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 const struct nh_info *nhi)
99 {
100 nh_info->dev = nhi->fib_nhc.nhc_dev;
101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 if (nh_info->gw_family == AF_INET)
103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 else if (nh_info->gw_family == AF_INET6)
105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106
107 nh_info->id = nhi->nh_parent->id;
108 nh_info->is_reject = nhi->reject_nh;
109 nh_info->is_fdb = nhi->fdb_nh;
110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112
nh_notifier_single_info_init(struct nh_notifier_info * info,const struct nexthop * nh)113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 const struct nexthop *nh)
115 {
116 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117
118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 info->nh = kzalloc_obj(*info->nh);
120 if (!info->nh)
121 return -ENOMEM;
122
123 __nh_notifier_single_info_init(info->nh, nhi);
124
125 return 0;
126 }
127
nh_notifier_single_info_fini(struct nh_notifier_info * info)128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 kfree(info->nh);
131 }
132
nh_notifier_mpath_info_init(struct nh_notifier_info * info,struct nh_group * nhg)133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 struct nh_group *nhg)
135 {
136 u16 num_nh = nhg->num_nh;
137 int i;
138
139 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh);
141 if (!info->nh_grp)
142 return -ENOMEM;
143
144 info->nh_grp->num_nh = num_nh;
145 info->nh_grp->is_fdb = nhg->fdb_nh;
146 info->nh_grp->hw_stats = nhg->hw_stats;
147
148 for (i = 0; i < num_nh; i++) {
149 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
150 struct nh_info *nhi;
151
152 nhi = rtnl_dereference(nhge->nh->nh_info);
153 info->nh_grp->nh_entries[i].weight = nhge->weight;
154 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
155 nhi);
156 }
157
158 return 0;
159 }
160
nh_notifier_res_table_info_init(struct nh_notifier_info * info,struct nh_group * nhg)161 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
162 struct nh_group *nhg)
163 {
164 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
165 u16 num_nh_buckets = res_table->num_nh_buckets;
166 unsigned long size;
167 u16 i;
168
169 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
170 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
171 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
172 __GFP_NOWARN);
173 if (!info->nh_res_table)
174 return -ENOMEM;
175
176 info->nh_res_table->num_nh_buckets = num_nh_buckets;
177 info->nh_res_table->hw_stats = nhg->hw_stats;
178
179 for (i = 0; i < num_nh_buckets; i++) {
180 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
181 struct nh_grp_entry *nhge;
182 struct nh_info *nhi;
183
184 nhge = rtnl_dereference(bucket->nh_entry);
185 nhi = rtnl_dereference(nhge->nh->nh_info);
186 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
187 nhi);
188 }
189
190 return 0;
191 }
192
nh_notifier_grp_info_init(struct nh_notifier_info * info,const struct nexthop * nh)193 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
194 const struct nexthop *nh)
195 {
196 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
197
198 if (nhg->hash_threshold)
199 return nh_notifier_mpath_info_init(info, nhg);
200 else if (nhg->resilient)
201 return nh_notifier_res_table_info_init(info, nhg);
202 return -EINVAL;
203 }
204
nh_notifier_grp_info_fini(struct nh_notifier_info * info,const struct nexthop * nh)205 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
206 const struct nexthop *nh)
207 {
208 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
209
210 if (nhg->hash_threshold)
211 kfree(info->nh_grp);
212 else if (nhg->resilient)
213 vfree(info->nh_res_table);
214 }
215
nh_notifier_info_init(struct nh_notifier_info * info,const struct nexthop * nh)216 static int nh_notifier_info_init(struct nh_notifier_info *info,
217 const struct nexthop *nh)
218 {
219 info->id = nh->id;
220
221 if (nh->is_group)
222 return nh_notifier_grp_info_init(info, nh);
223 else
224 return nh_notifier_single_info_init(info, nh);
225 }
226
nh_notifier_info_fini(struct nh_notifier_info * info,const struct nexthop * nh)227 static void nh_notifier_info_fini(struct nh_notifier_info *info,
228 const struct nexthop *nh)
229 {
230 if (nh->is_group)
231 nh_notifier_grp_info_fini(info, nh);
232 else
233 nh_notifier_single_info_fini(info);
234 }
235
call_nexthop_notifiers(struct net * net,enum nexthop_event_type event_type,struct nexthop * nh,struct netlink_ext_ack * extack)236 static int call_nexthop_notifiers(struct net *net,
237 enum nexthop_event_type event_type,
238 struct nexthop *nh,
239 struct netlink_ext_ack *extack)
240 {
241 struct nh_notifier_info info = {
242 .net = net,
243 .extack = extack,
244 };
245 int err;
246
247 ASSERT_RTNL();
248
249 if (nexthop_notifiers_is_empty(net))
250 return 0;
251
252 err = nh_notifier_info_init(&info, nh);
253 if (err) {
254 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
255 return err;
256 }
257
258 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
259 event_type, &info);
260 nh_notifier_info_fini(&info, nh);
261
262 return notifier_to_errno(err);
263 }
264
265 static int
nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info * info,bool force,unsigned int * p_idle_timer_ms)266 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
267 bool force, unsigned int *p_idle_timer_ms)
268 {
269 struct nh_res_table *res_table;
270 struct nh_group *nhg;
271 struct nexthop *nh;
272 int err = 0;
273
274 /* When 'force' is false, nexthop bucket replacement is performed
275 * because the bucket was deemed to be idle. In this case, capable
276 * listeners can choose to perform an atomic replacement: The bucket is
277 * only replaced if it is inactive. However, if the idle timer interval
278 * is smaller than the interval in which a listener is querying
279 * buckets' activity from the device, then atomic replacement should
280 * not be tried. Pass the idle timer value to listeners, so that they
281 * could determine which type of replacement to perform.
282 */
283 if (force) {
284 *p_idle_timer_ms = 0;
285 return 0;
286 }
287
288 rcu_read_lock();
289
290 nh = nexthop_find_by_id(info->net, info->id);
291 if (!nh) {
292 err = -EINVAL;
293 goto out;
294 }
295
296 nhg = rcu_dereference(nh->nh_grp);
297 res_table = rcu_dereference(nhg->res_table);
298 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
299
300 out:
301 rcu_read_unlock();
302
303 return err;
304 }
305
nh_notifier_res_bucket_info_init(struct nh_notifier_info * info,u16 bucket_index,bool force,struct nh_info * oldi,struct nh_info * newi)306 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
307 u16 bucket_index, bool force,
308 struct nh_info *oldi,
309 struct nh_info *newi)
310 {
311 unsigned int idle_timer_ms;
312 int err;
313
314 err = nh_notifier_res_bucket_idle_timer_get(info, force,
315 &idle_timer_ms);
316 if (err)
317 return err;
318
319 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
320 info->nh_res_bucket = kzalloc_obj(*info->nh_res_bucket);
321 if (!info->nh_res_bucket)
322 return -ENOMEM;
323
324 info->nh_res_bucket->bucket_index = bucket_index;
325 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
326 info->nh_res_bucket->force = force;
327 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
328 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
329 return 0;
330 }
331
nh_notifier_res_bucket_info_fini(struct nh_notifier_info * info)332 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
333 {
334 kfree(info->nh_res_bucket);
335 }
336
__call_nexthop_res_bucket_notifiers(struct net * net,u32 nhg_id,u16 bucket_index,bool force,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)337 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
338 u16 bucket_index, bool force,
339 struct nh_info *oldi,
340 struct nh_info *newi,
341 struct netlink_ext_ack *extack)
342 {
343 struct nh_notifier_info info = {
344 .net = net,
345 .extack = extack,
346 .id = nhg_id,
347 };
348 int err;
349
350 if (nexthop_notifiers_is_empty(net))
351 return 0;
352
353 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
354 oldi, newi);
355 if (err)
356 return err;
357
358 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
359 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
360 nh_notifier_res_bucket_info_fini(&info);
361
362 return notifier_to_errno(err);
363 }
364
365 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
366 *
367 * 1) a collection of callbacks for NH maintenance. This operates under
368 * RTNL,
369 * 2) the delayed work that gradually balances the resilient table,
370 * 3) and nexthop_select_path(), operating under RCU.
371 *
372 * Both the delayed work and the RTNL block are writers, and need to
373 * maintain mutual exclusion. Since there are only two and well-known
374 * writers for each table, the RTNL code can make sure it has exclusive
375 * access thus:
376 *
377 * - Have the DW operate without locking;
378 * - synchronously cancel the DW;
379 * - do the writing;
380 * - if the write was not actually a delete, call upkeep, which schedules
381 * DW again if necessary.
382 *
383 * The functions that are always called from the RTNL context use
384 * rtnl_dereference(). The functions that can also be called from the DW do
385 * a raw dereference and rely on the above mutual exclusion scheme.
386 */
387 #define nh_res_dereference(p) (rcu_dereference_raw(p))
388
call_nexthop_res_bucket_notifiers(struct net * net,u32 nhg_id,u16 bucket_index,bool force,struct nexthop * old_nh,struct nexthop * new_nh,struct netlink_ext_ack * extack)389 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
390 u16 bucket_index, bool force,
391 struct nexthop *old_nh,
392 struct nexthop *new_nh,
393 struct netlink_ext_ack *extack)
394 {
395 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
396 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
397
398 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
399 force, oldi, newi, extack);
400 }
401
call_nexthop_res_table_notifiers(struct net * net,struct nexthop * nh,struct netlink_ext_ack * extack)402 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
403 struct netlink_ext_ack *extack)
404 {
405 struct nh_notifier_info info = {
406 .net = net,
407 .extack = extack,
408 .id = nh->id,
409 };
410 struct nh_group *nhg;
411 int err;
412
413 ASSERT_RTNL();
414
415 if (nexthop_notifiers_is_empty(net))
416 return 0;
417
418 /* At this point, the nexthop buckets are still not populated. Only
419 * emit a notification with the logical nexthops, so that a listener
420 * could potentially veto it in case of unsupported configuration.
421 */
422 nhg = rtnl_dereference(nh->nh_grp);
423 err = nh_notifier_mpath_info_init(&info, nhg);
424 if (err) {
425 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
426 return err;
427 }
428
429 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
430 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
431 &info);
432 kfree(info.nh_grp);
433
434 return notifier_to_errno(err);
435 }
436
call_nexthop_notifier(struct notifier_block * nb,struct net * net,enum nexthop_event_type event_type,struct nexthop * nh,struct netlink_ext_ack * extack)437 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
438 enum nexthop_event_type event_type,
439 struct nexthop *nh,
440 struct netlink_ext_ack *extack)
441 {
442 struct nh_notifier_info info = {
443 .net = net,
444 .extack = extack,
445 };
446 int err;
447
448 err = nh_notifier_info_init(&info, nh);
449 if (err)
450 return err;
451
452 err = nb->notifier_call(nb, event_type, &info);
453 nh_notifier_info_fini(&info, nh);
454
455 return notifier_to_errno(err);
456 }
457
nh_dev_hashfn(unsigned int val)458 static unsigned int nh_dev_hashfn(unsigned int val)
459 {
460 unsigned int mask = NH_DEV_HASHSIZE - 1;
461
462 return (val ^
463 (val >> NH_DEV_HASHBITS) ^
464 (val >> (NH_DEV_HASHBITS * 2))) & mask;
465 }
466
nexthop_devhash_add(struct net * net,struct nh_info * nhi)467 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
468 {
469 struct net_device *dev = nhi->fib_nhc.nhc_dev;
470 struct hlist_head *head;
471 unsigned int hash;
472
473 WARN_ON(!dev);
474
475 hash = nh_dev_hashfn(dev->ifindex);
476 head = &net->nexthop.devhash[hash];
477 hlist_add_head(&nhi->dev_hash, head);
478 }
479
nexthop_free_group(struct nexthop * nh)480 static void nexthop_free_group(struct nexthop *nh)
481 {
482 struct nh_group *nhg;
483 int i;
484
485 nhg = rcu_dereference_raw(nh->nh_grp);
486 for (i = 0; i < nhg->num_nh; ++i) {
487 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
488
489 WARN_ON(!list_empty(&nhge->nh_list));
490 free_percpu(nhge->stats);
491 nexthop_put(nhge->nh);
492 }
493
494 WARN_ON(nhg->spare == nhg);
495
496 if (nhg->resilient)
497 vfree(rcu_dereference_raw(nhg->res_table));
498
499 kfree(nhg->spare);
500 kfree(nhg);
501 }
502
nexthop_free_single(struct nexthop * nh)503 static void nexthop_free_single(struct nexthop *nh)
504 {
505 struct nh_info *nhi;
506
507 nhi = rcu_dereference_raw(nh->nh_info);
508 switch (nhi->family) {
509 case AF_INET:
510 fib_nh_release(nh->net, &nhi->fib_nh);
511 break;
512 case AF_INET6:
513 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
514 break;
515 }
516 kfree(nhi);
517 }
518
nexthop_free_rcu(struct rcu_head * head)519 void nexthop_free_rcu(struct rcu_head *head)
520 {
521 struct nexthop *nh = container_of(head, struct nexthop, rcu);
522
523 if (nh->is_group)
524 nexthop_free_group(nh);
525 else
526 nexthop_free_single(nh);
527
528 kfree(nh);
529 }
530 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
531
nexthop_alloc(void)532 static struct nexthop *nexthop_alloc(void)
533 {
534 struct nexthop *nh;
535
536 nh = kzalloc_obj(struct nexthop);
537 if (nh) {
538 INIT_LIST_HEAD(&nh->fi_list);
539 INIT_LIST_HEAD(&nh->f6i_list);
540 INIT_LIST_HEAD(&nh->grp_list);
541 INIT_LIST_HEAD(&nh->fdb_list);
542 spin_lock_init(&nh->lock);
543 }
544 return nh;
545 }
546
nexthop_grp_alloc(u16 num_nh)547 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
548 {
549 struct nh_group *nhg;
550
551 nhg = kzalloc_flex(*nhg, nh_entries, num_nh);
552 if (nhg)
553 nhg->num_nh = num_nh;
554
555 return nhg;
556 }
557
558 static void nh_res_table_upkeep_dw(struct work_struct *work);
559
560 static struct nh_res_table *
nexthop_res_table_alloc(struct net * net,u32 nhg_id,struct nh_config * cfg)561 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
562 {
563 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
564 struct nh_res_table *res_table;
565 unsigned long size;
566
567 size = struct_size(res_table, nh_buckets, num_nh_buckets);
568 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
569 if (!res_table)
570 return NULL;
571
572 res_table->net = net;
573 res_table->nhg_id = nhg_id;
574 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
575 INIT_LIST_HEAD(&res_table->uw_nh_entries);
576 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
577 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
578 res_table->num_nh_buckets = num_nh_buckets;
579 return res_table;
580 }
581
nh_base_seq_inc(struct net * net)582 static void nh_base_seq_inc(struct net *net)
583 {
584 while (++net->nexthop.seq == 0)
585 ;
586 }
587
588 /* no reference taken; rcu lock or rtnl must be held */
nexthop_find_by_id(struct net * net,u32 id)589 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
590 {
591 struct rb_node **pp, *parent = NULL, *next;
592
593 pp = &net->nexthop.rb_root.rb_node;
594 while (1) {
595 struct nexthop *nh;
596
597 next = rcu_dereference_raw(*pp);
598 if (!next)
599 break;
600 parent = next;
601
602 nh = rb_entry(parent, struct nexthop, rb_node);
603 if (id < nh->id)
604 pp = &next->rb_left;
605 else if (id > nh->id)
606 pp = &next->rb_right;
607 else
608 return nh;
609 }
610 return NULL;
611 }
612 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
613
614 /* used for auto id allocation; called with rtnl held */
nh_find_unused_id(struct net * net)615 static u32 nh_find_unused_id(struct net *net)
616 {
617 u32 id_start = net->nexthop.last_id_allocated;
618
619 while (1) {
620 net->nexthop.last_id_allocated++;
621 if (net->nexthop.last_id_allocated == id_start)
622 break;
623
624 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
625 return net->nexthop.last_id_allocated;
626 }
627 return 0;
628 }
629
nh_res_time_set_deadline(unsigned long next_time,unsigned long * deadline)630 static void nh_res_time_set_deadline(unsigned long next_time,
631 unsigned long *deadline)
632 {
633 if (time_before(next_time, *deadline))
634 *deadline = next_time;
635 }
636
nh_res_table_unbalanced_time(struct nh_res_table * res_table)637 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
638 {
639 if (list_empty(&res_table->uw_nh_entries))
640 return 0;
641 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
642 }
643
nla_put_nh_group_res(struct sk_buff * skb,struct nh_group * nhg)644 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
645 {
646 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
647 struct nlattr *nest;
648
649 nest = nla_nest_start(skb, NHA_RES_GROUP);
650 if (!nest)
651 return -EMSGSIZE;
652
653 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
654 res_table->num_nh_buckets) ||
655 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
656 jiffies_to_clock_t(res_table->idle_timer)) ||
657 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
658 jiffies_to_clock_t(res_table->unbalanced_timer)) ||
659 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
660 nh_res_table_unbalanced_time(res_table),
661 NHA_RES_GROUP_PAD))
662 goto nla_put_failure;
663
664 nla_nest_end(skb, nest);
665 return 0;
666
667 nla_put_failure:
668 nla_nest_cancel(skb, nest);
669 return -EMSGSIZE;
670 }
671
nh_grp_entry_stats_inc(struct nh_grp_entry * nhge)672 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
673 {
674 struct nh_grp_entry_stats *cpu_stats;
675
676 cpu_stats = get_cpu_ptr(nhge->stats);
677 u64_stats_update_begin(&cpu_stats->syncp);
678 u64_stats_inc(&cpu_stats->packets);
679 u64_stats_update_end(&cpu_stats->syncp);
680 put_cpu_ptr(cpu_stats);
681 }
682
nh_grp_entry_stats_read(struct nh_grp_entry * nhge,u64 * ret_packets)683 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
684 u64 *ret_packets)
685 {
686 int i;
687
688 *ret_packets = 0;
689
690 for_each_possible_cpu(i) {
691 struct nh_grp_entry_stats *cpu_stats;
692 unsigned int start;
693 u64 packets;
694
695 cpu_stats = per_cpu_ptr(nhge->stats, i);
696 do {
697 start = u64_stats_fetch_begin(&cpu_stats->syncp);
698 packets = u64_stats_read(&cpu_stats->packets);
699 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
700
701 *ret_packets += packets;
702 }
703 }
704
nh_notifier_grp_hw_stats_init(struct nh_notifier_info * info,const struct nexthop * nh)705 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
706 const struct nexthop *nh)
707 {
708 struct nh_group *nhg;
709 int i;
710
711 ASSERT_RTNL();
712 nhg = rtnl_dereference(nh->nh_grp);
713
714 info->id = nh->id;
715 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
716 info->nh_grp_hw_stats = kzalloc_flex(*info->nh_grp_hw_stats, stats,
717 nhg->num_nh);
718 if (!info->nh_grp_hw_stats)
719 return -ENOMEM;
720
721 info->nh_grp_hw_stats->num_nh = nhg->num_nh;
722 for (i = 0; i < nhg->num_nh; i++) {
723 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
724
725 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
726 }
727
728 return 0;
729 }
730
nh_notifier_grp_hw_stats_fini(struct nh_notifier_info * info)731 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
732 {
733 kfree(info->nh_grp_hw_stats);
734 }
735
nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info * info,unsigned int nh_idx,u64 delta_packets)736 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
737 unsigned int nh_idx,
738 u64 delta_packets)
739 {
740 info->hw_stats_used = true;
741 info->stats[nh_idx].packets += delta_packets;
742 }
743 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
744
nh_grp_hw_stats_apply_update(struct nexthop * nh,struct nh_notifier_info * info)745 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
746 struct nh_notifier_info *info)
747 {
748 struct nh_group *nhg;
749 int i;
750
751 ASSERT_RTNL();
752 nhg = rtnl_dereference(nh->nh_grp);
753
754 for (i = 0; i < nhg->num_nh; i++) {
755 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
756
757 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
758 }
759 }
760
nh_grp_hw_stats_update(struct nexthop * nh,bool * hw_stats_used)761 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
762 {
763 struct nh_notifier_info info = {
764 .net = nh->net,
765 };
766 struct net *net = nh->net;
767 int err;
768
769 if (nexthop_notifiers_is_empty(net)) {
770 *hw_stats_used = false;
771 return 0;
772 }
773
774 err = nh_notifier_grp_hw_stats_init(&info, nh);
775 if (err)
776 return err;
777
778 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
779 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
780 &info);
781
782 /* Cache whatever we got, even if there was an error, otherwise the
783 * successful stats retrievals would get lost.
784 */
785 nh_grp_hw_stats_apply_update(nh, &info);
786 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
787
788 nh_notifier_grp_hw_stats_fini(&info);
789 return notifier_to_errno(err);
790 }
791
nla_put_nh_group_stats_entry(struct sk_buff * skb,struct nh_grp_entry * nhge,u32 op_flags)792 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
793 struct nh_grp_entry *nhge,
794 u32 op_flags)
795 {
796 struct nlattr *nest;
797 u64 packets;
798
799 nh_grp_entry_stats_read(nhge, &packets);
800
801 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
802 if (!nest)
803 return -EMSGSIZE;
804
805 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
806 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
807 packets + nhge->packets_hw))
808 goto nla_put_failure;
809
810 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
811 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
812 nhge->packets_hw))
813 goto nla_put_failure;
814
815 nla_nest_end(skb, nest);
816 return 0;
817
818 nla_put_failure:
819 nla_nest_cancel(skb, nest);
820 return -EMSGSIZE;
821 }
822
nla_put_nh_group_stats(struct sk_buff * skb,struct nexthop * nh,u32 op_flags)823 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
824 u32 op_flags)
825 {
826 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
827 struct nlattr *nest;
828 bool hw_stats_used;
829 int err;
830 int i;
831
832 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
833 goto err_out;
834
835 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
836 nhg->hw_stats) {
837 err = nh_grp_hw_stats_update(nh, &hw_stats_used);
838 if (err)
839 goto out;
840
841 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
842 goto err_out;
843 }
844
845 nest = nla_nest_start(skb, NHA_GROUP_STATS);
846 if (!nest)
847 goto err_out;
848
849 for (i = 0; i < nhg->num_nh; i++)
850 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
851 op_flags))
852 goto cancel_out;
853
854 nla_nest_end(skb, nest);
855 return 0;
856
857 cancel_out:
858 nla_nest_cancel(skb, nest);
859 err_out:
860 err = -EMSGSIZE;
861 out:
862 return err;
863 }
864
nla_put_nh_group(struct sk_buff * skb,struct nexthop * nh,u32 op_flags,u32 * resp_op_flags)865 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
866 u32 op_flags, u32 *resp_op_flags)
867 {
868 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
869 struct nexthop_grp *p;
870 size_t len = nhg->num_nh * sizeof(*p);
871 struct nlattr *nla;
872 u16 group_type = 0;
873 u16 weight;
874 int i;
875
876 *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
877
878 if (nhg->hash_threshold)
879 group_type = NEXTHOP_GRP_TYPE_MPATH;
880 else if (nhg->resilient)
881 group_type = NEXTHOP_GRP_TYPE_RES;
882
883 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
884 goto nla_put_failure;
885
886 nla = nla_reserve(skb, NHA_GROUP, len);
887 if (!nla)
888 goto nla_put_failure;
889
890 p = nla_data(nla);
891 for (i = 0; i < nhg->num_nh; ++i) {
892 weight = nhg->nh_entries[i].weight - 1;
893
894 *p++ = (struct nexthop_grp) {
895 .id = nhg->nh_entries[i].nh->id,
896 .weight = weight,
897 .weight_high = weight >> 8,
898 };
899 }
900
901 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
902 goto nla_put_failure;
903
904 if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
905 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
906 nla_put_nh_group_stats(skb, nh, op_flags)))
907 goto nla_put_failure;
908
909 return 0;
910
911 nla_put_failure:
912 return -EMSGSIZE;
913 }
914
nh_fill_node(struct sk_buff * skb,struct nexthop * nh,int event,u32 portid,u32 seq,unsigned int nlflags,u32 op_flags)915 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
916 int event, u32 portid, u32 seq, unsigned int nlflags,
917 u32 op_flags)
918 {
919 struct fib6_nh *fib6_nh;
920 struct fib_nh *fib_nh;
921 struct nlmsghdr *nlh;
922 struct nh_info *nhi;
923 struct nhmsg *nhm;
924
925 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
926 if (!nlh)
927 return -EMSGSIZE;
928
929 nhm = nlmsg_data(nlh);
930 nhm->nh_family = AF_UNSPEC;
931 nhm->nh_flags = nh->nh_flags;
932 nhm->nh_protocol = nh->protocol;
933 nhm->nh_scope = 0;
934 nhm->resvd = 0;
935
936 if (nla_put_u32(skb, NHA_ID, nh->id))
937 goto nla_put_failure;
938
939 if (nh->is_group) {
940 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
941 u32 resp_op_flags = 0;
942
943 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
944 goto nla_put_failure;
945 if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
946 nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
947 goto nla_put_failure;
948 goto out;
949 }
950
951 nhi = rtnl_dereference(nh->nh_info);
952 nhm->nh_family = nhi->family;
953 if (nhi->reject_nh) {
954 if (nla_put_flag(skb, NHA_BLACKHOLE))
955 goto nla_put_failure;
956 goto out;
957 } else if (nhi->fdb_nh) {
958 if (nla_put_flag(skb, NHA_FDB))
959 goto nla_put_failure;
960 } else {
961 const struct net_device *dev;
962
963 dev = nhi->fib_nhc.nhc_dev;
964 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
965 goto nla_put_failure;
966 }
967
968 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
969 switch (nhi->family) {
970 case AF_INET:
971 fib_nh = &nhi->fib_nh;
972 if (fib_nh->fib_nh_gw_family &&
973 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
974 goto nla_put_failure;
975 break;
976
977 case AF_INET6:
978 fib6_nh = &nhi->fib6_nh;
979 if (fib6_nh->fib_nh_gw_family &&
980 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
981 goto nla_put_failure;
982 break;
983 }
984
985 if (lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
986 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
987 goto nla_put_failure;
988
989 out:
990 nlmsg_end(skb, nlh);
991 return 0;
992
993 nla_put_failure:
994 nlmsg_cancel(skb, nlh);
995 return -EMSGSIZE;
996 }
997
nh_nlmsg_size_grp_res(struct nh_group * nhg)998 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
999 {
1000 return nla_total_size(0) + /* NHA_RES_GROUP */
1001 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */
1002 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */
1003 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
1004 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1005 }
1006
nh_nlmsg_size_grp(struct nexthop * nh)1007 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1008 {
1009 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1010 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1011 size_t tot = nla_total_size(sz) +
1012 nla_total_size(2); /* NHA_GROUP_TYPE */
1013
1014 if (nhg->resilient)
1015 tot += nh_nlmsg_size_grp_res(nhg);
1016
1017 return tot;
1018 }
1019
nh_nlmsg_size_single(struct nexthop * nh)1020 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1021 {
1022 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1023 size_t sz;
1024
1025 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1026 * are mutually exclusive
1027 */
1028 sz = nla_total_size(4); /* NHA_OIF */
1029
1030 switch (nhi->family) {
1031 case AF_INET:
1032 if (nhi->fib_nh.fib_nh_gw_family)
1033 sz += nla_total_size(4); /* NHA_GATEWAY */
1034 break;
1035
1036 case AF_INET6:
1037 /* NHA_GATEWAY */
1038 if (nhi->fib6_nh.fib_nh_gw_family)
1039 sz += nla_total_size(sizeof(const struct in6_addr));
1040 break;
1041 }
1042
1043 if (nhi->fib_nhc.nhc_lwtstate) {
1044 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1045 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
1046 }
1047
1048 return sz;
1049 }
1050
nh_nlmsg_size(struct nexthop * nh)1051 static size_t nh_nlmsg_size(struct nexthop *nh)
1052 {
1053 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1054
1055 sz += nla_total_size(4); /* NHA_ID */
1056
1057 if (nh->is_group)
1058 sz += nh_nlmsg_size_grp(nh) +
1059 nla_total_size(4) + /* NHA_OP_FLAGS */
1060 0;
1061 else
1062 sz += nh_nlmsg_size_single(nh);
1063
1064 return sz;
1065 }
1066
nexthop_notify(int event,struct nexthop * nh,struct nl_info * info)1067 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1068 {
1069 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1070 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1071 struct sk_buff *skb;
1072 int err = -ENOBUFS;
1073
1074 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1075 if (!skb)
1076 goto errout;
1077
1078 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1079 if (err < 0) {
1080 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1081 WARN_ON(err == -EMSGSIZE);
1082 kfree_skb(skb);
1083 goto errout;
1084 }
1085
1086 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1087 info->nlh, gfp_any());
1088 return;
1089 errout:
1090 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1091 }
1092
nh_res_bucket_used_time(const struct nh_res_bucket * bucket)1093 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1094 {
1095 return (unsigned long)atomic_long_read(&bucket->used_time);
1096 }
1097
1098 static unsigned long
nh_res_bucket_idle_point(const struct nh_res_table * res_table,const struct nh_res_bucket * bucket,unsigned long now)1099 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1100 const struct nh_res_bucket *bucket,
1101 unsigned long now)
1102 {
1103 unsigned long time = nh_res_bucket_used_time(bucket);
1104
1105 /* Bucket was not used since it was migrated. The idle time is now. */
1106 if (time == bucket->migrated_time)
1107 return now;
1108
1109 return time + res_table->idle_timer;
1110 }
1111
1112 static unsigned long
nh_res_table_unb_point(const struct nh_res_table * res_table)1113 nh_res_table_unb_point(const struct nh_res_table *res_table)
1114 {
1115 return res_table->unbalanced_since + res_table->unbalanced_timer;
1116 }
1117
nh_res_bucket_set_idle(const struct nh_res_table * res_table,struct nh_res_bucket * bucket)1118 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1119 struct nh_res_bucket *bucket)
1120 {
1121 unsigned long now = jiffies;
1122
1123 atomic_long_set(&bucket->used_time, (long)now);
1124 bucket->migrated_time = now;
1125 }
1126
nh_res_bucket_set_busy(struct nh_res_bucket * bucket)1127 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1128 {
1129 atomic_long_set(&bucket->used_time, (long)jiffies);
1130 }
1131
nh_res_bucket_idle_time(const struct nh_res_bucket * bucket)1132 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1133 {
1134 unsigned long used_time = nh_res_bucket_used_time(bucket);
1135
1136 return jiffies_delta_to_clock_t(jiffies - used_time);
1137 }
1138
nh_fill_res_bucket(struct sk_buff * skb,struct nexthop * nh,struct nh_res_bucket * bucket,u16 bucket_index,int event,u32 portid,u32 seq,unsigned int nlflags,struct netlink_ext_ack * extack)1139 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1140 struct nh_res_bucket *bucket, u16 bucket_index,
1141 int event, u32 portid, u32 seq,
1142 unsigned int nlflags,
1143 struct netlink_ext_ack *extack)
1144 {
1145 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1146 struct nlmsghdr *nlh;
1147 struct nlattr *nest;
1148 struct nhmsg *nhm;
1149
1150 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1151 if (!nlh)
1152 return -EMSGSIZE;
1153
1154 nhm = nlmsg_data(nlh);
1155 nhm->nh_family = AF_UNSPEC;
1156 nhm->nh_flags = bucket->nh_flags;
1157 nhm->nh_protocol = nh->protocol;
1158 nhm->nh_scope = 0;
1159 nhm->resvd = 0;
1160
1161 if (nla_put_u32(skb, NHA_ID, nh->id))
1162 goto nla_put_failure;
1163
1164 nest = nla_nest_start(skb, NHA_RES_BUCKET);
1165 if (!nest)
1166 goto nla_put_failure;
1167
1168 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1169 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1170 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1171 nh_res_bucket_idle_time(bucket),
1172 NHA_RES_BUCKET_PAD))
1173 goto nla_put_failure_nest;
1174
1175 nla_nest_end(skb, nest);
1176 nlmsg_end(skb, nlh);
1177 return 0;
1178
1179 nla_put_failure_nest:
1180 nla_nest_cancel(skb, nest);
1181 nla_put_failure:
1182 nlmsg_cancel(skb, nlh);
1183 return -EMSGSIZE;
1184 }
1185
nexthop_bucket_notify(struct nh_res_table * res_table,u16 bucket_index)1186 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1187 u16 bucket_index)
1188 {
1189 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1190 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1191 struct nexthop *nh = nhge->nh_parent;
1192 struct sk_buff *skb;
1193 int err = -ENOBUFS;
1194
1195 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1196 if (!skb)
1197 goto errout;
1198
1199 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1200 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1201 NULL);
1202 if (err < 0) {
1203 kfree_skb(skb);
1204 goto errout;
1205 }
1206
1207 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1208 return;
1209 errout:
1210 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1211 }
1212
valid_group_nh(struct nexthop * nh,unsigned int npaths,bool * is_fdb,struct netlink_ext_ack * extack)1213 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1214 bool *is_fdb, struct netlink_ext_ack *extack)
1215 {
1216 if (nh->is_group) {
1217 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1218
1219 /* Nesting groups within groups is not supported. */
1220 if (nhg->hash_threshold) {
1221 NL_SET_ERR_MSG(extack,
1222 "Hash-threshold group can not be a nexthop within a group");
1223 return false;
1224 }
1225 if (nhg->resilient) {
1226 NL_SET_ERR_MSG(extack,
1227 "Resilient group can not be a nexthop within a group");
1228 return false;
1229 }
1230 *is_fdb = nhg->fdb_nh;
1231 } else {
1232 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1233
1234 if (nhi->reject_nh && npaths > 1) {
1235 NL_SET_ERR_MSG(extack,
1236 "Blackhole nexthop can not be used in a group with more than 1 path");
1237 return false;
1238 }
1239 *is_fdb = nhi->fdb_nh;
1240 }
1241
1242 return true;
1243 }
1244
nh_check_attr_fdb_group(struct nexthop * nh,u8 * nh_family,struct netlink_ext_ack * extack)1245 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1246 struct netlink_ext_ack *extack)
1247 {
1248 struct nh_info *nhi;
1249
1250 nhi = rtnl_dereference(nh->nh_info);
1251
1252 if (!nhi->fdb_nh) {
1253 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1254 return -EINVAL;
1255 }
1256
1257 if (*nh_family == AF_UNSPEC) {
1258 *nh_family = nhi->family;
1259 } else if (*nh_family != nhi->family) {
1260 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1261 return -EINVAL;
1262 }
1263
1264 return 0;
1265 }
1266
nh_check_attr_group(struct net * net,struct nlattr * tb[],size_t tb_size,u16 nh_grp_type,struct netlink_ext_ack * extack)1267 static int nh_check_attr_group(struct net *net,
1268 struct nlattr *tb[], size_t tb_size,
1269 u16 nh_grp_type, struct netlink_ext_ack *extack)
1270 {
1271 unsigned int len = nla_len(tb[NHA_GROUP]);
1272 struct nexthop_grp *nhg;
1273 unsigned int i, j;
1274
1275 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1276 NL_SET_ERR_MSG(extack,
1277 "Invalid length for nexthop group attribute");
1278 return -EINVAL;
1279 }
1280
1281 /* convert len to number of nexthop ids */
1282 len /= sizeof(*nhg);
1283
1284 nhg = nla_data(tb[NHA_GROUP]);
1285 for (i = 0; i < len; ++i) {
1286 if (nhg[i].resvd2) {
1287 NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1288 return -EINVAL;
1289 }
1290 if (nexthop_grp_weight(&nhg[i]) == 0) {
1291 /* 0xffff got passed in, representing weight of 0x10000,
1292 * which is too heavy.
1293 */
1294 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1295 return -EINVAL;
1296 }
1297 for (j = i + 1; j < len; ++j) {
1298 if (nhg[i].id == nhg[j].id) {
1299 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1300 return -EINVAL;
1301 }
1302 }
1303 }
1304
1305 nhg = nla_data(tb[NHA_GROUP]);
1306 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1307 if (!tb[i])
1308 continue;
1309 switch (i) {
1310 case NHA_HW_STATS_ENABLE:
1311 case NHA_FDB:
1312 continue;
1313 case NHA_RES_GROUP:
1314 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1315 continue;
1316 break;
1317 }
1318 NL_SET_ERR_MSG(extack,
1319 "No other attributes can be set in nexthop groups");
1320 return -EINVAL;
1321 }
1322
1323 return 0;
1324 }
1325
nh_check_attr_group_rtnl(struct net * net,struct nlattr * tb[],struct netlink_ext_ack * extack)1326 static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[],
1327 struct netlink_ext_ack *extack)
1328 {
1329 u8 nh_family = AF_UNSPEC;
1330 struct nexthop_grp *nhg;
1331 unsigned int len;
1332 unsigned int i;
1333 u8 nhg_fdb;
1334
1335 len = nla_len(tb[NHA_GROUP]) / sizeof(*nhg);
1336 nhg = nla_data(tb[NHA_GROUP]);
1337 nhg_fdb = !!tb[NHA_FDB];
1338
1339 for (i = 0; i < len; i++) {
1340 struct nexthop *nh;
1341 bool is_fdb_nh;
1342
1343 nh = nexthop_find_by_id(net, nhg[i].id);
1344 if (!nh) {
1345 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1346 return -EINVAL;
1347 }
1348 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1349 return -EINVAL;
1350
1351 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1352 return -EINVAL;
1353
1354 if (!nhg_fdb && is_fdb_nh) {
1355 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1356 return -EINVAL;
1357 }
1358 }
1359
1360 return 0;
1361 }
1362
ipv6_good_nh(const struct fib6_nh * nh)1363 static bool ipv6_good_nh(const struct fib6_nh *nh)
1364 {
1365 int state = NUD_REACHABLE;
1366 struct neighbour *n;
1367
1368 rcu_read_lock();
1369
1370 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1371 if (n)
1372 state = READ_ONCE(n->nud_state);
1373
1374 rcu_read_unlock();
1375
1376 return !!(state & NUD_VALID);
1377 }
1378
ipv4_good_nh(const struct fib_nh * nh)1379 static bool ipv4_good_nh(const struct fib_nh *nh)
1380 {
1381 int state = NUD_REACHABLE;
1382 struct neighbour *n;
1383
1384 rcu_read_lock();
1385
1386 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1387 (__force u32)nh->fib_nh_gw4);
1388 if (n)
1389 state = READ_ONCE(n->nud_state);
1390
1391 rcu_read_unlock();
1392
1393 return !!(state & NUD_VALID);
1394 }
1395
nexthop_is_good_nh(const struct nexthop * nh)1396 static bool nexthop_is_good_nh(const struct nexthop *nh)
1397 {
1398 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1399
1400 switch (nhi->family) {
1401 case AF_INET:
1402 return ipv4_good_nh(&nhi->fib_nh);
1403 case AF_INET6:
1404 return ipv6_good_nh(&nhi->fib6_nh);
1405 }
1406
1407 return false;
1408 }
1409
nexthop_select_path_fdb(struct nh_group * nhg,int hash)1410 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1411 {
1412 int i;
1413
1414 for (i = 0; i < nhg->num_nh; i++) {
1415 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1416
1417 if (hash > atomic_read(&nhge->hthr.upper_bound))
1418 continue;
1419
1420 nh_grp_entry_stats_inc(nhge);
1421 return nhge->nh;
1422 }
1423
1424 WARN_ON_ONCE(1);
1425 return NULL;
1426 }
1427
nexthop_select_path_hthr(struct nh_group * nhg,int hash)1428 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1429 {
1430 struct nh_grp_entry *nhge0 = NULL;
1431 int i;
1432
1433 if (nhg->fdb_nh)
1434 return nexthop_select_path_fdb(nhg, hash);
1435
1436 for (i = 0; i < nhg->num_nh; ++i) {
1437 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1438
1439 /* nexthops always check if it is good and does
1440 * not rely on a sysctl for this behavior
1441 */
1442 if (!nexthop_is_good_nh(nhge->nh))
1443 continue;
1444
1445 if (!nhge0)
1446 nhge0 = nhge;
1447
1448 if (hash > atomic_read(&nhge->hthr.upper_bound))
1449 continue;
1450
1451 nh_grp_entry_stats_inc(nhge);
1452 return nhge->nh;
1453 }
1454
1455 if (!nhge0)
1456 nhge0 = &nhg->nh_entries[0];
1457 nh_grp_entry_stats_inc(nhge0);
1458 return nhge0->nh;
1459 }
1460
nexthop_select_path_res(struct nh_group * nhg,int hash)1461 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1462 {
1463 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1464 u16 bucket_index = hash % res_table->num_nh_buckets;
1465 struct nh_res_bucket *bucket;
1466 struct nh_grp_entry *nhge;
1467
1468 /* nexthop_select_path() is expected to return a non-NULL value, so
1469 * skip protocol validation and just hand out whatever there is.
1470 */
1471 bucket = &res_table->nh_buckets[bucket_index];
1472 nh_res_bucket_set_busy(bucket);
1473 nhge = rcu_dereference(bucket->nh_entry);
1474 nh_grp_entry_stats_inc(nhge);
1475 return nhge->nh;
1476 }
1477
nexthop_select_path(struct nexthop * nh,int hash)1478 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1479 {
1480 struct nh_group *nhg;
1481
1482 if (!nh->is_group)
1483 return nh;
1484
1485 nhg = rcu_dereference(nh->nh_grp);
1486 if (nhg->hash_threshold)
1487 return nexthop_select_path_hthr(nhg, hash);
1488 else if (nhg->resilient)
1489 return nexthop_select_path_res(nhg, hash);
1490
1491 /* Unreachable. */
1492 return NULL;
1493 }
1494 EXPORT_SYMBOL_GPL(nexthop_select_path);
1495
nexthop_for_each_fib6_nh(struct nexthop * nh,int (* cb)(struct fib6_nh * nh,void * arg),void * arg)1496 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1497 int (*cb)(struct fib6_nh *nh, void *arg),
1498 void *arg)
1499 {
1500 struct nh_info *nhi;
1501 int err;
1502
1503 if (nh->is_group) {
1504 struct nh_group *nhg;
1505 int i;
1506
1507 nhg = rcu_dereference_rtnl(nh->nh_grp);
1508 for (i = 0; i < nhg->num_nh; i++) {
1509 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1510
1511 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1512 err = cb(&nhi->fib6_nh, arg);
1513 if (err)
1514 return err;
1515 }
1516 } else {
1517 nhi = rcu_dereference_rtnl(nh->nh_info);
1518 err = cb(&nhi->fib6_nh, arg);
1519 if (err)
1520 return err;
1521 }
1522
1523 return 0;
1524 }
1525 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1526
check_src_addr(const struct in6_addr * saddr,struct netlink_ext_ack * extack)1527 static int check_src_addr(const struct in6_addr *saddr,
1528 struct netlink_ext_ack *extack)
1529 {
1530 if (!ipv6_addr_any(saddr)) {
1531 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1532 return -EINVAL;
1533 }
1534 return 0;
1535 }
1536
fib6_check_nexthop(struct nexthop * nh,struct fib6_config * cfg,struct netlink_ext_ack * extack)1537 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1538 struct netlink_ext_ack *extack)
1539 {
1540 struct nh_info *nhi;
1541 bool is_fdb_nh;
1542
1543 /* fib6_src is unique to a fib6_info and limits the ability to cache
1544 * routes in fib6_nh within a nexthop that is potentially shared
1545 * across multiple fib entries. If the config wants to use source
1546 * routing it can not use nexthop objects. mlxsw also does not allow
1547 * fib6_src on routes.
1548 */
1549 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1550 return -EINVAL;
1551
1552 if (nh->is_group) {
1553 struct nh_group *nhg;
1554
1555 nhg = rcu_dereference_rtnl(nh->nh_grp);
1556 if (nhg->has_v4)
1557 goto no_v4_nh;
1558 is_fdb_nh = nhg->fdb_nh;
1559 } else {
1560 nhi = rcu_dereference_rtnl(nh->nh_info);
1561 if (nhi->family == AF_INET)
1562 goto no_v4_nh;
1563 is_fdb_nh = nhi->fdb_nh;
1564 }
1565
1566 if (is_fdb_nh) {
1567 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1568 return -EINVAL;
1569 }
1570
1571 return 0;
1572 no_v4_nh:
1573 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1574 return -EINVAL;
1575 }
1576 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1577
1578 /* if existing nexthop has ipv6 routes linked to it, need
1579 * to verify this new spec works with ipv6
1580 */
fib6_check_nh_list(struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)1581 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1582 struct netlink_ext_ack *extack)
1583 {
1584 struct fib6_info *f6i;
1585
1586 if (list_empty(&old->f6i_list))
1587 return 0;
1588
1589 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1590 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1591 return -EINVAL;
1592 }
1593
1594 return fib6_check_nexthop(new, NULL, extack);
1595 }
1596
nexthop_check_scope(struct nh_info * nhi,u8 scope,struct netlink_ext_ack * extack)1597 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1598 struct netlink_ext_ack *extack)
1599 {
1600 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1601 NL_SET_ERR_MSG(extack,
1602 "Route with host scope can not have a gateway");
1603 return -EINVAL;
1604 }
1605
1606 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1607 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1608 return -EINVAL;
1609 }
1610
1611 return 0;
1612 }
1613
1614 /* Invoked by fib add code to verify nexthop by id is ok with
1615 * config for prefix; parts of fib_check_nh not done when nexthop
1616 * object is used.
1617 */
fib_check_nexthop(struct nexthop * nh,u8 scope,struct netlink_ext_ack * extack)1618 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1619 struct netlink_ext_ack *extack)
1620 {
1621 struct nh_info *nhi;
1622 int err = 0;
1623
1624 if (nh->is_group) {
1625 struct nh_group *nhg;
1626
1627 nhg = rtnl_dereference(nh->nh_grp);
1628 if (nhg->fdb_nh) {
1629 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1630 err = -EINVAL;
1631 goto out;
1632 }
1633
1634 if (scope == RT_SCOPE_HOST) {
1635 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1636 err = -EINVAL;
1637 goto out;
1638 }
1639
1640 /* all nexthops in a group have the same scope */
1641 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1642 err = nexthop_check_scope(nhi, scope, extack);
1643 } else {
1644 nhi = rtnl_dereference(nh->nh_info);
1645 if (nhi->fdb_nh) {
1646 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1647 err = -EINVAL;
1648 goto out;
1649 }
1650 err = nexthop_check_scope(nhi, scope, extack);
1651 }
1652
1653 out:
1654 return err;
1655 }
1656
fib_check_nh_list(struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)1657 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1658 struct netlink_ext_ack *extack)
1659 {
1660 struct fib_info *fi;
1661
1662 list_for_each_entry(fi, &old->fi_list, nh_list) {
1663 int err;
1664
1665 err = fib_check_nexthop(new, fi->fib_scope, extack);
1666 if (err)
1667 return err;
1668 }
1669 return 0;
1670 }
1671
nh_res_nhge_is_balanced(const struct nh_grp_entry * nhge)1672 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1673 {
1674 return nhge->res.count_buckets == nhge->res.wants_buckets;
1675 }
1676
nh_res_nhge_is_ow(const struct nh_grp_entry * nhge)1677 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1678 {
1679 return nhge->res.count_buckets > nhge->res.wants_buckets;
1680 }
1681
nh_res_nhge_is_uw(const struct nh_grp_entry * nhge)1682 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1683 {
1684 return nhge->res.count_buckets < nhge->res.wants_buckets;
1685 }
1686
nh_res_table_is_balanced(const struct nh_res_table * res_table)1687 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1688 {
1689 return list_empty(&res_table->uw_nh_entries);
1690 }
1691
nh_res_bucket_unset_nh(struct nh_res_bucket * bucket)1692 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1693 {
1694 struct nh_grp_entry *nhge;
1695
1696 if (bucket->occupied) {
1697 nhge = nh_res_dereference(bucket->nh_entry);
1698 nhge->res.count_buckets--;
1699 bucket->occupied = false;
1700 }
1701 }
1702
nh_res_bucket_set_nh(struct nh_res_bucket * bucket,struct nh_grp_entry * nhge)1703 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1704 struct nh_grp_entry *nhge)
1705 {
1706 nh_res_bucket_unset_nh(bucket);
1707
1708 bucket->occupied = true;
1709 rcu_assign_pointer(bucket->nh_entry, nhge);
1710 nhge->res.count_buckets++;
1711 }
1712
nh_res_bucket_should_migrate(struct nh_res_table * res_table,struct nh_res_bucket * bucket,unsigned long * deadline,bool * force)1713 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1714 struct nh_res_bucket *bucket,
1715 unsigned long *deadline, bool *force)
1716 {
1717 unsigned long now = jiffies;
1718 struct nh_grp_entry *nhge;
1719 unsigned long idle_point;
1720
1721 if (!bucket->occupied) {
1722 /* The bucket is not occupied, its NHGE pointer is either
1723 * NULL or obsolete. We _have to_ migrate: set force.
1724 */
1725 *force = true;
1726 return true;
1727 }
1728
1729 nhge = nh_res_dereference(bucket->nh_entry);
1730
1731 /* If the bucket is populated by an underweight or balanced
1732 * nexthop, do not migrate.
1733 */
1734 if (!nh_res_nhge_is_ow(nhge))
1735 return false;
1736
1737 /* At this point we know that the bucket is populated with an
1738 * overweight nexthop. It needs to be migrated to a new nexthop if
1739 * the idle timer of unbalanced timer expired.
1740 */
1741
1742 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1743 if (time_after_eq(now, idle_point)) {
1744 /* The bucket is idle. We _can_ migrate: unset force. */
1745 *force = false;
1746 return true;
1747 }
1748
1749 /* Unbalanced timer of 0 means "never force". */
1750 if (res_table->unbalanced_timer) {
1751 unsigned long unb_point;
1752
1753 unb_point = nh_res_table_unb_point(res_table);
1754 if (time_after(now, unb_point)) {
1755 /* The bucket is not idle, but the unbalanced timer
1756 * expired. We _can_ migrate, but set force anyway,
1757 * so that drivers know to ignore activity reports
1758 * from the HW.
1759 */
1760 *force = true;
1761 return true;
1762 }
1763
1764 nh_res_time_set_deadline(unb_point, deadline);
1765 }
1766
1767 nh_res_time_set_deadline(idle_point, deadline);
1768 return false;
1769 }
1770
nh_res_bucket_migrate(struct nh_res_table * res_table,u16 bucket_index,bool notify,bool notify_nl,bool force)1771 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1772 u16 bucket_index, bool notify,
1773 bool notify_nl, bool force)
1774 {
1775 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1776 struct nh_grp_entry *new_nhge;
1777 struct netlink_ext_ack extack;
1778 int err;
1779
1780 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1781 struct nh_grp_entry,
1782 res.uw_nh_entry);
1783 if (WARN_ON_ONCE(!new_nhge))
1784 /* If this function is called, "bucket" is either not
1785 * occupied, or it belongs to a next hop that is
1786 * overweight. In either case, there ought to be a
1787 * corresponding underweight next hop.
1788 */
1789 return false;
1790
1791 if (notify) {
1792 struct nh_grp_entry *old_nhge;
1793
1794 old_nhge = nh_res_dereference(bucket->nh_entry);
1795 err = call_nexthop_res_bucket_notifiers(res_table->net,
1796 res_table->nhg_id,
1797 bucket_index, force,
1798 old_nhge->nh,
1799 new_nhge->nh, &extack);
1800 if (err) {
1801 pr_err_ratelimited("%s\n", extack._msg);
1802 if (!force)
1803 return false;
1804 /* It is not possible to veto a forced replacement, so
1805 * just clear the hardware flags from the nexthop
1806 * bucket to indicate to user space that this bucket is
1807 * not correctly populated in hardware.
1808 */
1809 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1810 }
1811 }
1812
1813 nh_res_bucket_set_nh(bucket, new_nhge);
1814 nh_res_bucket_set_idle(res_table, bucket);
1815
1816 if (notify_nl)
1817 nexthop_bucket_notify(res_table, bucket_index);
1818
1819 if (nh_res_nhge_is_balanced(new_nhge))
1820 list_del(&new_nhge->res.uw_nh_entry);
1821 return true;
1822 }
1823
1824 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1825
nh_res_table_upkeep(struct nh_res_table * res_table,bool notify,bool notify_nl)1826 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1827 bool notify, bool notify_nl)
1828 {
1829 unsigned long now = jiffies;
1830 unsigned long deadline;
1831 u16 i;
1832
1833 /* Deadline is the next time that upkeep should be run. It is the
1834 * earliest time at which one of the buckets might be migrated.
1835 * Start at the most pessimistic estimate: either unbalanced_timer
1836 * from now, or if there is none, idle_timer from now. For each
1837 * encountered time point, call nh_res_time_set_deadline() to
1838 * refine the estimate.
1839 */
1840 if (res_table->unbalanced_timer)
1841 deadline = now + res_table->unbalanced_timer;
1842 else
1843 deadline = now + res_table->idle_timer;
1844
1845 for (i = 0; i < res_table->num_nh_buckets; i++) {
1846 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1847 bool force;
1848
1849 if (nh_res_bucket_should_migrate(res_table, bucket,
1850 &deadline, &force)) {
1851 if (!nh_res_bucket_migrate(res_table, i, notify,
1852 notify_nl, force)) {
1853 unsigned long idle_point;
1854
1855 /* A driver can override the migration
1856 * decision if the HW reports that the
1857 * bucket is actually not idle. Therefore
1858 * remark the bucket as busy again and
1859 * update the deadline.
1860 */
1861 nh_res_bucket_set_busy(bucket);
1862 idle_point = nh_res_bucket_idle_point(res_table,
1863 bucket,
1864 now);
1865 nh_res_time_set_deadline(idle_point, &deadline);
1866 }
1867 }
1868 }
1869
1870 /* If the group is still unbalanced, schedule the next upkeep to
1871 * either the deadline computed above, or the minimum deadline,
1872 * whichever comes later.
1873 */
1874 if (!nh_res_table_is_balanced(res_table)) {
1875 unsigned long now = jiffies;
1876 unsigned long min_deadline;
1877
1878 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1879 if (time_before(deadline, min_deadline))
1880 deadline = min_deadline;
1881
1882 queue_delayed_work(system_power_efficient_wq,
1883 &res_table->upkeep_dw, deadline - now);
1884 }
1885 }
1886
nh_res_table_upkeep_dw(struct work_struct * work)1887 static void nh_res_table_upkeep_dw(struct work_struct *work)
1888 {
1889 struct delayed_work *dw = to_delayed_work(work);
1890 struct nh_res_table *res_table;
1891
1892 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1893 nh_res_table_upkeep(res_table, true, true);
1894 }
1895
nh_res_table_cancel_upkeep(struct nh_res_table * res_table)1896 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1897 {
1898 cancel_delayed_work_sync(&res_table->upkeep_dw);
1899 }
1900
nh_res_group_rebalance(struct nh_group * nhg,struct nh_res_table * res_table)1901 static void nh_res_group_rebalance(struct nh_group *nhg,
1902 struct nh_res_table *res_table)
1903 {
1904 u16 prev_upper_bound = 0;
1905 u32 total = 0;
1906 u32 w = 0;
1907 int i;
1908
1909 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1910
1911 for (i = 0; i < nhg->num_nh; ++i)
1912 total += nhg->nh_entries[i].weight;
1913
1914 for (i = 0; i < nhg->num_nh; ++i) {
1915 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1916 u16 upper_bound;
1917 u64 btw;
1918
1919 w += nhge->weight;
1920 btw = ((u64)res_table->num_nh_buckets) * w;
1921 upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1922 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1923 prev_upper_bound = upper_bound;
1924
1925 if (nh_res_nhge_is_uw(nhge)) {
1926 if (list_empty(&res_table->uw_nh_entries))
1927 res_table->unbalanced_since = jiffies;
1928 list_add(&nhge->res.uw_nh_entry,
1929 &res_table->uw_nh_entries);
1930 }
1931 }
1932 }
1933
1934 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1935 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1936 * entry in NHG as not occupied.
1937 */
nh_res_table_migrate_buckets(struct nh_res_table * res_table,struct nh_group * nhg)1938 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1939 struct nh_group *nhg)
1940 {
1941 u16 i;
1942
1943 for (i = 0; i < res_table->num_nh_buckets; i++) {
1944 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1945 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1946 bool found = false;
1947 int j;
1948
1949 for (j = 0; j < nhg->num_nh; j++) {
1950 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1951
1952 if (nhge->nh->id == id) {
1953 nh_res_bucket_set_nh(bucket, nhge);
1954 found = true;
1955 break;
1956 }
1957 }
1958
1959 if (!found)
1960 nh_res_bucket_unset_nh(bucket);
1961 }
1962 }
1963
replace_nexthop_grp_res(struct nh_group * oldg,struct nh_group * newg)1964 static void replace_nexthop_grp_res(struct nh_group *oldg,
1965 struct nh_group *newg)
1966 {
1967 /* For NH group replacement, the new NHG might only have a stub
1968 * hash table with 0 buckets, because the number of buckets was not
1969 * specified. For NH removal, oldg and newg both reference the same
1970 * res_table. So in any case, in the following, we want to work
1971 * with oldg->res_table.
1972 */
1973 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1974 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1975 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1976
1977 nh_res_table_cancel_upkeep(old_res_table);
1978 nh_res_table_migrate_buckets(old_res_table, newg);
1979 nh_res_group_rebalance(newg, old_res_table);
1980 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1981 old_res_table->unbalanced_since = prev_unbalanced_since;
1982 nh_res_table_upkeep(old_res_table, true, false);
1983 }
1984
nh_hthr_group_rebalance(struct nh_group * nhg)1985 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1986 {
1987 u32 total = 0;
1988 u32 w = 0;
1989 int i;
1990
1991 for (i = 0; i < nhg->num_nh; ++i)
1992 total += nhg->nh_entries[i].weight;
1993
1994 for (i = 0; i < nhg->num_nh; ++i) {
1995 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1996 u32 upper_bound;
1997
1998 w += nhge->weight;
1999 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
2000 atomic_set(&nhge->hthr.upper_bound, upper_bound);
2001 }
2002 }
2003
remove_nh_grp_entry(struct net * net,struct nh_grp_entry * nhge,struct nl_info * nlinfo,struct list_head * deferred_free)2004 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
2005 struct nl_info *nlinfo,
2006 struct list_head *deferred_free)
2007 {
2008 struct nh_grp_entry *nhges, *new_nhges;
2009 struct nexthop *nhp = nhge->nh_parent;
2010 struct netlink_ext_ack extack;
2011 struct nexthop *nh = nhge->nh;
2012 struct nh_group *nhg, *newg;
2013 int i, j, err;
2014
2015 WARN_ON(!nh);
2016
2017 nhg = rtnl_dereference(nhp->nh_grp);
2018 newg = nhg->spare;
2019
2020 /* last entry, keep it visible and remove the parent */
2021 if (nhg->num_nh == 1) {
2022 remove_nexthop(net, nhp, nlinfo);
2023 return;
2024 }
2025
2026 newg->has_v4 = false;
2027 newg->is_multipath = nhg->is_multipath;
2028 newg->hash_threshold = nhg->hash_threshold;
2029 newg->resilient = nhg->resilient;
2030 newg->fdb_nh = nhg->fdb_nh;
2031 newg->num_nh = nhg->num_nh;
2032
2033 /* copy old entries to new except the one getting removed */
2034 nhges = nhg->nh_entries;
2035 new_nhges = newg->nh_entries;
2036 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2037 struct nh_info *nhi;
2038
2039 /* current nexthop getting removed */
2040 if (nhg->nh_entries[i].nh == nh) {
2041 newg->num_nh--;
2042 continue;
2043 }
2044
2045 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2046 if (nhi->family == AF_INET)
2047 newg->has_v4 = true;
2048
2049 list_del(&nhges[i].nh_list);
2050 new_nhges[j].stats = nhges[i].stats;
2051 new_nhges[j].nh_parent = nhges[i].nh_parent;
2052 new_nhges[j].nh = nhges[i].nh;
2053 new_nhges[j].weight = nhges[i].weight;
2054 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2055 j++;
2056 }
2057
2058 if (newg->hash_threshold)
2059 nh_hthr_group_rebalance(newg);
2060 else if (newg->resilient)
2061 replace_nexthop_grp_res(nhg, newg);
2062
2063 rcu_assign_pointer(nhp->nh_grp, newg);
2064
2065 list_del(&nhge->nh_list);
2066 nexthop_put(nhge->nh);
2067 list_add(&nhge->nh_list, deferred_free);
2068
2069 /* Removal of a NH from a resilient group is notified through
2070 * bucket notifications.
2071 */
2072 if (newg->hash_threshold) {
2073 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2074 &extack);
2075 if (err)
2076 pr_err("%s\n", extack._msg);
2077 }
2078
2079 if (nlinfo)
2080 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2081 }
2082
remove_nexthop_from_groups(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2083 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2084 struct nl_info *nlinfo)
2085 {
2086 struct nh_grp_entry *nhge, *tmp;
2087 LIST_HEAD(deferred_free);
2088
2089 /* If there is nothing to do, let's avoid the costly call to
2090 * synchronize_net()
2091 */
2092 if (list_empty(&nh->grp_list))
2093 return;
2094
2095 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2096 remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free);
2097
2098 /* make sure all see the newly published array before releasing rtnl */
2099 synchronize_net();
2100
2101 /* Now safe to free percpu stats — all RCU readers have finished */
2102 list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) {
2103 list_del(&nhge->nh_list);
2104 free_percpu(nhge->stats);
2105 }
2106 }
2107
remove_nexthop_group(struct nexthop * nh,struct nl_info * nlinfo)2108 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2109 {
2110 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2111 struct nh_res_table *res_table;
2112 int i, num_nh = nhg->num_nh;
2113
2114 for (i = 0; i < num_nh; ++i) {
2115 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2116
2117 if (WARN_ON(!nhge->nh))
2118 continue;
2119
2120 list_del_init(&nhge->nh_list);
2121 }
2122
2123 if (nhg->resilient) {
2124 res_table = rtnl_dereference(nhg->res_table);
2125 nh_res_table_cancel_upkeep(res_table);
2126 }
2127 }
2128
2129 /* not called for nexthop replace */
__remove_nexthop_fib(struct net * net,struct nexthop * nh)2130 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2131 {
2132 struct fib6_info *f6i;
2133 bool do_flush = false;
2134 struct fib_info *fi;
2135
2136 list_for_each_entry(fi, &nh->fi_list, nh_list) {
2137 fi->fib_flags |= RTNH_F_DEAD;
2138 do_flush = true;
2139 }
2140 if (do_flush)
2141 fib_flush(net);
2142
2143 spin_lock_bh(&nh->lock);
2144
2145 nh->dead = true;
2146
2147 while (!list_empty(&nh->f6i_list)) {
2148 f6i = list_first_entry(&nh->f6i_list, typeof(*f6i), nh_list);
2149
2150 /* __ip6_del_rt does a release, so do a hold here */
2151 fib6_info_hold(f6i);
2152
2153 spin_unlock_bh(&nh->lock);
2154 ipv6_stub->ip6_del_rt(net, f6i,
2155 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2156
2157 spin_lock_bh(&nh->lock);
2158 }
2159
2160 spin_unlock_bh(&nh->lock);
2161 }
2162
__remove_nexthop(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2163 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2164 struct nl_info *nlinfo)
2165 {
2166 __remove_nexthop_fib(net, nh);
2167
2168 if (nh->is_group) {
2169 remove_nexthop_group(nh, nlinfo);
2170 } else {
2171 struct nh_info *nhi;
2172
2173 nhi = rtnl_dereference(nh->nh_info);
2174 if (nhi->fib_nhc.nhc_dev)
2175 hlist_del(&nhi->dev_hash);
2176
2177 remove_nexthop_from_groups(net, nh, nlinfo);
2178 }
2179 }
2180
remove_nexthop(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2181 static void remove_nexthop(struct net *net, struct nexthop *nh,
2182 struct nl_info *nlinfo)
2183 {
2184 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2185
2186 /* remove from the tree */
2187 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2188
2189 if (nlinfo)
2190 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2191
2192 __remove_nexthop(net, nh, nlinfo);
2193 nh_base_seq_inc(net);
2194
2195 nexthop_put(nh);
2196 }
2197
2198 /* if any FIB entries reference this nexthop, any dst entries
2199 * need to be regenerated
2200 */
nh_rt_cache_flush(struct net * net,struct nexthop * nh,struct nexthop * replaced_nh)2201 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2202 struct nexthop *replaced_nh)
2203 {
2204 struct fib6_info *f6i;
2205 struct nh_group *nhg;
2206 int i;
2207
2208 if (!list_empty(&nh->fi_list))
2209 rt_cache_flush(net);
2210
2211 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2212 ipv6_stub->fib6_update_sernum(net, f6i);
2213
2214 /* if an IPv6 group was replaced, we have to release all old
2215 * dsts to make sure all refcounts are released
2216 */
2217 if (!replaced_nh->is_group)
2218 return;
2219
2220 nhg = rtnl_dereference(replaced_nh->nh_grp);
2221 for (i = 0; i < nhg->num_nh; i++) {
2222 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2223 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2224
2225 if (nhi->family == AF_INET6)
2226 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2227 }
2228 }
2229
replace_nexthop_grp(struct net * net,struct nexthop * old,struct nexthop * new,const struct nh_config * cfg,struct netlink_ext_ack * extack)2230 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2231 struct nexthop *new, const struct nh_config *cfg,
2232 struct netlink_ext_ack *extack)
2233 {
2234 struct nh_res_table *tmp_table = NULL;
2235 struct nh_res_table *new_res_table;
2236 struct nh_res_table *old_res_table;
2237 struct nh_group *oldg, *newg;
2238 int i, err;
2239
2240 if (!new->is_group) {
2241 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2242 return -EINVAL;
2243 }
2244
2245 oldg = rtnl_dereference(old->nh_grp);
2246 newg = rtnl_dereference(new->nh_grp);
2247
2248 if (newg->hash_threshold != oldg->hash_threshold) {
2249 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2250 return -EINVAL;
2251 }
2252
2253 if (newg->hash_threshold) {
2254 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2255 extack);
2256 if (err)
2257 return err;
2258 } else if (newg->resilient) {
2259 new_res_table = rtnl_dereference(newg->res_table);
2260 old_res_table = rtnl_dereference(oldg->res_table);
2261
2262 /* Accept if num_nh_buckets was not given, but if it was
2263 * given, demand that the value be correct.
2264 */
2265 if (cfg->nh_grp_res_has_num_buckets &&
2266 cfg->nh_grp_res_num_buckets !=
2267 old_res_table->num_nh_buckets) {
2268 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2269 return -EINVAL;
2270 }
2271
2272 /* Emit a pre-replace notification so that listeners could veto
2273 * a potentially unsupported configuration. Otherwise,
2274 * individual bucket replacement notifications would need to be
2275 * vetoed, which is something that should only happen if the
2276 * bucket is currently active.
2277 */
2278 err = call_nexthop_res_table_notifiers(net, new, extack);
2279 if (err)
2280 return err;
2281
2282 if (cfg->nh_grp_res_has_idle_timer)
2283 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2284 if (cfg->nh_grp_res_has_unbalanced_timer)
2285 old_res_table->unbalanced_timer =
2286 cfg->nh_grp_res_unbalanced_timer;
2287
2288 replace_nexthop_grp_res(oldg, newg);
2289
2290 tmp_table = new_res_table;
2291 rcu_assign_pointer(newg->res_table, old_res_table);
2292 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2293 }
2294
2295 /* update parents - used by nexthop code for cleanup */
2296 for (i = 0; i < newg->num_nh; i++)
2297 newg->nh_entries[i].nh_parent = old;
2298
2299 rcu_assign_pointer(old->nh_grp, newg);
2300
2301 /* Make sure concurrent readers are not using 'oldg' anymore. */
2302 synchronize_net();
2303
2304 if (newg->resilient) {
2305 rcu_assign_pointer(oldg->res_table, tmp_table);
2306 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2307 }
2308
2309 for (i = 0; i < oldg->num_nh; i++)
2310 oldg->nh_entries[i].nh_parent = new;
2311
2312 rcu_assign_pointer(new->nh_grp, oldg);
2313
2314 return 0;
2315 }
2316
nh_group_v4_update(struct nh_group * nhg)2317 static void nh_group_v4_update(struct nh_group *nhg)
2318 {
2319 struct nh_grp_entry *nhges;
2320 bool has_v4 = false;
2321 int i;
2322
2323 nhges = nhg->nh_entries;
2324 for (i = 0; i < nhg->num_nh; i++) {
2325 struct nh_info *nhi;
2326
2327 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2328 if (nhi->family == AF_INET)
2329 has_v4 = true;
2330 }
2331 nhg->has_v4 = has_v4;
2332 }
2333
replace_nexthop_single_notify_res(struct net * net,struct nh_res_table * res_table,struct nexthop * old,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)2334 static int replace_nexthop_single_notify_res(struct net *net,
2335 struct nh_res_table *res_table,
2336 struct nexthop *old,
2337 struct nh_info *oldi,
2338 struct nh_info *newi,
2339 struct netlink_ext_ack *extack)
2340 {
2341 u32 nhg_id = res_table->nhg_id;
2342 int err;
2343 u16 i;
2344
2345 for (i = 0; i < res_table->num_nh_buckets; i++) {
2346 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2347 struct nh_grp_entry *nhge;
2348
2349 nhge = rtnl_dereference(bucket->nh_entry);
2350 if (nhge->nh == old) {
2351 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2352 i, true,
2353 oldi, newi,
2354 extack);
2355 if (err)
2356 goto err_notify;
2357 }
2358 }
2359
2360 return 0;
2361
2362 err_notify:
2363 while (i-- > 0) {
2364 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2365 struct nh_grp_entry *nhge;
2366
2367 nhge = rtnl_dereference(bucket->nh_entry);
2368 if (nhge->nh == old)
2369 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2370 true, newi, oldi,
2371 extack);
2372 }
2373 return err;
2374 }
2375
replace_nexthop_single_notify(struct net * net,struct nexthop * group_nh,struct nexthop * old,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)2376 static int replace_nexthop_single_notify(struct net *net,
2377 struct nexthop *group_nh,
2378 struct nexthop *old,
2379 struct nh_info *oldi,
2380 struct nh_info *newi,
2381 struct netlink_ext_ack *extack)
2382 {
2383 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2384 struct nh_res_table *res_table;
2385
2386 if (nhg->hash_threshold) {
2387 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2388 group_nh, extack);
2389 } else if (nhg->resilient) {
2390 res_table = rtnl_dereference(nhg->res_table);
2391 return replace_nexthop_single_notify_res(net, res_table,
2392 old, oldi, newi,
2393 extack);
2394 }
2395
2396 return -EINVAL;
2397 }
2398
replace_nexthop_single(struct net * net,struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)2399 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2400 struct nexthop *new,
2401 struct netlink_ext_ack *extack)
2402 {
2403 u8 old_protocol, old_nh_flags;
2404 struct nh_info *oldi, *newi;
2405 struct nh_grp_entry *nhge;
2406 int err;
2407
2408 if (new->is_group) {
2409 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2410 return -EINVAL;
2411 }
2412
2413 if (!list_empty(&old->grp_list) &&
2414 rtnl_dereference(new->nh_info)->fdb_nh !=
2415 rtnl_dereference(old->nh_info)->fdb_nh) {
2416 NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group");
2417 return -EINVAL;
2418 }
2419
2420 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2421 if (err)
2422 return err;
2423
2424 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2425 * tree. Therefore, inherit the flags from 'old' to 'new'.
2426 */
2427 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2428
2429 oldi = rtnl_dereference(old->nh_info);
2430 newi = rtnl_dereference(new->nh_info);
2431
2432 newi->nh_parent = old;
2433 oldi->nh_parent = new;
2434
2435 old_protocol = old->protocol;
2436 old_nh_flags = old->nh_flags;
2437
2438 old->protocol = new->protocol;
2439 old->nh_flags = new->nh_flags;
2440
2441 rcu_assign_pointer(old->nh_info, newi);
2442 rcu_assign_pointer(new->nh_info, oldi);
2443
2444 /* Send a replace notification for all the groups using the nexthop. */
2445 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2446 struct nexthop *nhp = nhge->nh_parent;
2447
2448 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2449 extack);
2450 if (err)
2451 goto err_notify;
2452 }
2453
2454 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2455 * update IPv4 indication in all the groups using the nexthop.
2456 */
2457 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2458 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2459 struct nexthop *nhp = nhge->nh_parent;
2460 struct nh_group *nhg;
2461
2462 nhg = rtnl_dereference(nhp->nh_grp);
2463 nh_group_v4_update(nhg);
2464 }
2465 }
2466
2467 return 0;
2468
2469 err_notify:
2470 rcu_assign_pointer(new->nh_info, newi);
2471 rcu_assign_pointer(old->nh_info, oldi);
2472 old->nh_flags = old_nh_flags;
2473 old->protocol = old_protocol;
2474 oldi->nh_parent = old;
2475 newi->nh_parent = new;
2476 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2477 struct nexthop *nhp = nhge->nh_parent;
2478
2479 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2480 }
2481 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2482 return err;
2483 }
2484
__nexthop_replace_notify(struct net * net,struct nexthop * nh,struct nl_info * info)2485 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2486 struct nl_info *info)
2487 {
2488 struct fib6_info *f6i;
2489
2490 if (!list_empty(&nh->fi_list)) {
2491 struct fib_info *fi;
2492
2493 /* expectation is a few fib_info per nexthop and then
2494 * a lot of routes per fib_info. So mark the fib_info
2495 * and then walk the fib tables once
2496 */
2497 list_for_each_entry(fi, &nh->fi_list, nh_list)
2498 fi->nh_updated = true;
2499
2500 fib_info_notify_update(net, info);
2501
2502 list_for_each_entry(fi, &nh->fi_list, nh_list)
2503 fi->nh_updated = false;
2504 }
2505
2506 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2507 ipv6_stub->fib6_rt_update(net, f6i, info);
2508 }
2509
2510 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2511 * linked to this nexthop and for all groups that the nexthop
2512 * is a member of
2513 */
nexthop_replace_notify(struct net * net,struct nexthop * nh,struct nl_info * info)2514 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2515 struct nl_info *info)
2516 {
2517 struct nh_grp_entry *nhge;
2518
2519 __nexthop_replace_notify(net, nh, info);
2520
2521 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2522 __nexthop_replace_notify(net, nhge->nh_parent, info);
2523 }
2524
replace_nexthop(struct net * net,struct nexthop * old,struct nexthop * new,const struct nh_config * cfg,struct netlink_ext_ack * extack)2525 static int replace_nexthop(struct net *net, struct nexthop *old,
2526 struct nexthop *new, const struct nh_config *cfg,
2527 struct netlink_ext_ack *extack)
2528 {
2529 bool new_is_reject = false;
2530 struct nh_grp_entry *nhge;
2531 int err;
2532
2533 /* check that existing FIB entries are ok with the
2534 * new nexthop definition
2535 */
2536 err = fib_check_nh_list(old, new, extack);
2537 if (err)
2538 return err;
2539
2540 err = fib6_check_nh_list(old, new, extack);
2541 if (err)
2542 return err;
2543
2544 if (!new->is_group) {
2545 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2546
2547 new_is_reject = nhi->reject_nh;
2548 }
2549
2550 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2551 /* if new nexthop is a blackhole, any groups using this
2552 * nexthop cannot have more than 1 path
2553 */
2554 if (new_is_reject &&
2555 nexthop_num_path(nhge->nh_parent) > 1) {
2556 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2557 return -EINVAL;
2558 }
2559
2560 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2561 if (err)
2562 return err;
2563
2564 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2565 if (err)
2566 return err;
2567 }
2568
2569 if (old->is_group)
2570 err = replace_nexthop_grp(net, old, new, cfg, extack);
2571 else
2572 err = replace_nexthop_single(net, old, new, extack);
2573
2574 if (!err) {
2575 nh_rt_cache_flush(net, old, new);
2576
2577 __remove_nexthop(net, new, NULL);
2578 nexthop_put(new);
2579 }
2580
2581 return err;
2582 }
2583
2584 /* called with rtnl_lock held */
insert_nexthop(struct net * net,struct nexthop * new_nh,struct nh_config * cfg,struct netlink_ext_ack * extack)2585 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2586 struct nh_config *cfg, struct netlink_ext_ack *extack)
2587 {
2588 struct rb_node **pp, *parent = NULL, *next;
2589 struct rb_root *root = &net->nexthop.rb_root;
2590 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2591 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2592 u32 new_id = new_nh->id;
2593 int replace_notify = 0;
2594 int rc = -EEXIST;
2595
2596 pp = &root->rb_node;
2597 while (1) {
2598 struct nexthop *nh;
2599
2600 next = *pp;
2601 if (!next)
2602 break;
2603
2604 parent = next;
2605
2606 nh = rb_entry(parent, struct nexthop, rb_node);
2607 if (new_id < nh->id) {
2608 pp = &next->rb_left;
2609 } else if (new_id > nh->id) {
2610 pp = &next->rb_right;
2611 } else if (replace) {
2612 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2613 if (!rc) {
2614 new_nh = nh; /* send notification with old nh */
2615 replace_notify = 1;
2616 }
2617 goto out;
2618 } else {
2619 /* id already exists and not a replace */
2620 goto out;
2621 }
2622 }
2623
2624 if (replace && !create) {
2625 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2626 rc = -ENOENT;
2627 goto out;
2628 }
2629
2630 if (new_nh->is_group) {
2631 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2632 struct nh_res_table *res_table;
2633
2634 if (nhg->resilient) {
2635 res_table = rtnl_dereference(nhg->res_table);
2636
2637 /* Not passing the number of buckets is OK when
2638 * replacing, but not when creating a new group.
2639 */
2640 if (!cfg->nh_grp_res_has_num_buckets) {
2641 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2642 rc = -EINVAL;
2643 goto out;
2644 }
2645
2646 nh_res_group_rebalance(nhg, res_table);
2647
2648 /* Do not send bucket notifications, we do full
2649 * notification below.
2650 */
2651 nh_res_table_upkeep(res_table, false, false);
2652 }
2653 }
2654
2655 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2656 rb_insert_color(&new_nh->rb_node, root);
2657
2658 /* The initial insertion is a full notification for hash-threshold as
2659 * well as resilient groups.
2660 */
2661 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2662 if (rc)
2663 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2664
2665 out:
2666 if (!rc) {
2667 nh_base_seq_inc(net);
2668 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2669 if (replace_notify &&
2670 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2671 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2672 }
2673
2674 return rc;
2675 }
2676
2677 /* rtnl */
2678 /* remove all nexthops tied to a device being deleted */
nexthop_flush_dev(struct net_device * dev,unsigned long event)2679 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2680 {
2681 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2682 struct net *net = dev_net(dev);
2683 struct hlist_head *head = &net->nexthop.devhash[hash];
2684 struct hlist_node *n;
2685 struct nh_info *nhi;
2686
2687 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2688 if (nhi->fib_nhc.nhc_dev != dev)
2689 continue;
2690
2691 if (nhi->reject_nh &&
2692 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2693 continue;
2694
2695 remove_nexthop(net, nhi->nh_parent, NULL);
2696 }
2697 }
2698
2699 /* rtnl; called when net namespace is deleted */
flush_all_nexthops(struct net * net)2700 static void flush_all_nexthops(struct net *net)
2701 {
2702 struct rb_root *root = &net->nexthop.rb_root;
2703 struct rb_node *node;
2704 struct nexthop *nh;
2705
2706 while ((node = rb_first(root))) {
2707 nh = rb_entry(node, struct nexthop, rb_node);
2708 remove_nexthop(net, nh, NULL);
2709 cond_resched();
2710 }
2711 }
2712
nexthop_create_group(struct net * net,struct nh_config * cfg)2713 static struct nexthop *nexthop_create_group(struct net *net,
2714 struct nh_config *cfg)
2715 {
2716 struct nlattr *grps_attr = cfg->nh_grp;
2717 struct nexthop_grp *entry = nla_data(grps_attr);
2718 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2719 struct nh_group *nhg;
2720 struct nexthop *nh;
2721 int err;
2722 int i;
2723
2724 nh = nexthop_alloc();
2725 if (!nh)
2726 return ERR_PTR(-ENOMEM);
2727
2728 nh->is_group = 1;
2729
2730 nhg = nexthop_grp_alloc(num_nh);
2731 if (!nhg) {
2732 kfree(nh);
2733 return ERR_PTR(-ENOMEM);
2734 }
2735
2736 /* spare group used for removals */
2737 nhg->spare = nexthop_grp_alloc(num_nh);
2738 if (!nhg->spare) {
2739 kfree(nhg);
2740 kfree(nh);
2741 return ERR_PTR(-ENOMEM);
2742 }
2743 nhg->spare->spare = nhg;
2744
2745 for (i = 0; i < nhg->num_nh; ++i) {
2746 struct nexthop *nhe;
2747 struct nh_info *nhi;
2748
2749 nhe = nexthop_find_by_id(net, entry[i].id);
2750 if (!nexthop_get(nhe)) {
2751 err = -ENOENT;
2752 goto out_no_nh;
2753 }
2754
2755 nhi = rtnl_dereference(nhe->nh_info);
2756 if (nhi->family == AF_INET)
2757 nhg->has_v4 = true;
2758
2759 nhg->nh_entries[i].stats =
2760 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2761 if (!nhg->nh_entries[i].stats) {
2762 err = -ENOMEM;
2763 nexthop_put(nhe);
2764 goto out_no_nh;
2765 }
2766 nhg->nh_entries[i].nh = nhe;
2767 nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
2768
2769 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2770 nhg->nh_entries[i].nh_parent = nh;
2771 }
2772
2773 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2774 nhg->hash_threshold = 1;
2775 nhg->is_multipath = true;
2776 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2777 struct nh_res_table *res_table;
2778
2779 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2780 if (!res_table) {
2781 err = -ENOMEM;
2782 goto out_no_nh;
2783 }
2784
2785 rcu_assign_pointer(nhg->spare->res_table, res_table);
2786 rcu_assign_pointer(nhg->res_table, res_table);
2787 nhg->resilient = true;
2788 nhg->is_multipath = true;
2789 }
2790
2791 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2792
2793 if (nhg->hash_threshold)
2794 nh_hthr_group_rebalance(nhg);
2795
2796 if (cfg->nh_fdb)
2797 nhg->fdb_nh = 1;
2798
2799 if (cfg->nh_hw_stats)
2800 nhg->hw_stats = true;
2801
2802 rcu_assign_pointer(nh->nh_grp, nhg);
2803
2804 return nh;
2805
2806 out_no_nh:
2807 for (i--; i >= 0; --i) {
2808 list_del(&nhg->nh_entries[i].nh_list);
2809 free_percpu(nhg->nh_entries[i].stats);
2810 nexthop_put(nhg->nh_entries[i].nh);
2811 }
2812
2813 kfree(nhg->spare);
2814 kfree(nhg);
2815 kfree(nh);
2816
2817 return ERR_PTR(err);
2818 }
2819
nh_create_ipv4(struct net * net,struct nexthop * nh,struct nh_info * nhi,struct nh_config * cfg,struct netlink_ext_ack * extack)2820 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2821 struct nh_info *nhi, struct nh_config *cfg,
2822 struct netlink_ext_ack *extack)
2823 {
2824 struct fib_nh *fib_nh = &nhi->fib_nh;
2825 struct fib_config fib_cfg = {
2826 .fc_oif = cfg->nh_ifindex,
2827 .fc_gw4 = cfg->gw.ipv4,
2828 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2829 .fc_flags = cfg->nh_flags,
2830 .fc_nlinfo = cfg->nlinfo,
2831 .fc_encap = cfg->nh_encap,
2832 .fc_encap_type = cfg->nh_encap_type,
2833 };
2834 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2835 int err;
2836
2837 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2838 if (err) {
2839 fib_nh_release(net, fib_nh);
2840 goto out;
2841 }
2842
2843 if (nhi->fdb_nh)
2844 goto out;
2845
2846 /* sets nh_dev if successful */
2847 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2848 if (!err) {
2849 nh->nh_flags = fib_nh->fib_nh_flags;
2850 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2851 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2852 } else {
2853 fib_nh_release(net, fib_nh);
2854 }
2855 out:
2856 return err;
2857 }
2858
nh_create_ipv6(struct net * net,struct nexthop * nh,struct nh_info * nhi,struct nh_config * cfg,struct netlink_ext_ack * extack)2859 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2860 struct nh_info *nhi, struct nh_config *cfg,
2861 struct netlink_ext_ack *extack)
2862 {
2863 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2864 struct fib6_config fib6_cfg = {
2865 .fc_table = l3mdev_fib_table(cfg->dev),
2866 .fc_ifindex = cfg->nh_ifindex,
2867 .fc_gateway = cfg->gw.ipv6,
2868 .fc_flags = cfg->nh_flags,
2869 .fc_nlinfo = cfg->nlinfo,
2870 .fc_encap = cfg->nh_encap,
2871 .fc_encap_type = cfg->nh_encap_type,
2872 .fc_is_fdb = cfg->nh_fdb,
2873 };
2874 int err;
2875
2876 if (!ipv6_addr_any(&cfg->gw.ipv6))
2877 fib6_cfg.fc_flags |= RTF_GATEWAY;
2878
2879 /* sets nh_dev if successful */
2880 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2881 extack);
2882 if (err) {
2883 /* IPv6 is not enabled, don't call fib6_nh_release */
2884 if (err == -EAFNOSUPPORT)
2885 goto out;
2886 ipv6_stub->fib6_nh_release(fib6_nh);
2887 } else {
2888 nh->nh_flags = fib6_nh->fib_nh_flags;
2889 }
2890 out:
2891 return err;
2892 }
2893
nexthop_create(struct net * net,struct nh_config * cfg,struct netlink_ext_ack * extack)2894 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2895 struct netlink_ext_ack *extack)
2896 {
2897 struct nh_info *nhi;
2898 struct nexthop *nh;
2899 int err = 0;
2900
2901 nh = nexthop_alloc();
2902 if (!nh)
2903 return ERR_PTR(-ENOMEM);
2904
2905 nhi = kzalloc_obj(*nhi);
2906 if (!nhi) {
2907 kfree(nh);
2908 return ERR_PTR(-ENOMEM);
2909 }
2910
2911 nh->nh_flags = cfg->nh_flags;
2912 nh->net = net;
2913
2914 nhi->nh_parent = nh;
2915 nhi->family = cfg->nh_family;
2916 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2917
2918 if (cfg->nh_fdb)
2919 nhi->fdb_nh = 1;
2920
2921 if (cfg->nh_blackhole) {
2922 nhi->reject_nh = 1;
2923 cfg->nh_ifindex = net->loopback_dev->ifindex;
2924 }
2925
2926 switch (cfg->nh_family) {
2927 case AF_INET:
2928 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2929 break;
2930 case AF_INET6:
2931 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2932 break;
2933 }
2934
2935 if (err) {
2936 kfree(nhi);
2937 kfree(nh);
2938 return ERR_PTR(err);
2939 }
2940
2941 /* add the entry to the device based hash */
2942 if (!nhi->fdb_nh)
2943 nexthop_devhash_add(net, nhi);
2944
2945 rcu_assign_pointer(nh->nh_info, nhi);
2946
2947 return nh;
2948 }
2949
2950 /* called with rtnl lock held */
nexthop_add(struct net * net,struct nh_config * cfg,struct netlink_ext_ack * extack)2951 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2952 struct netlink_ext_ack *extack)
2953 {
2954 struct nexthop *nh;
2955 int err;
2956
2957 if (!cfg->nh_id) {
2958 cfg->nh_id = nh_find_unused_id(net);
2959 if (!cfg->nh_id) {
2960 NL_SET_ERR_MSG(extack, "No unused id");
2961 return ERR_PTR(-EINVAL);
2962 }
2963 }
2964
2965 if (cfg->nh_grp)
2966 nh = nexthop_create_group(net, cfg);
2967 else
2968 nh = nexthop_create(net, cfg, extack);
2969
2970 if (IS_ERR(nh))
2971 return nh;
2972
2973 refcount_set(&nh->refcnt, 1);
2974 nh->id = cfg->nh_id;
2975 nh->protocol = cfg->nh_protocol;
2976 nh->net = net;
2977
2978 err = insert_nexthop(net, nh, cfg, extack);
2979 if (err) {
2980 __remove_nexthop(net, nh, NULL);
2981 nexthop_put(nh);
2982 nh = ERR_PTR(err);
2983 }
2984
2985 return nh;
2986 }
2987
rtm_nh_get_timer(struct nlattr * attr,unsigned long fallback,unsigned long * timer_p,bool * has_p,struct netlink_ext_ack * extack)2988 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2989 unsigned long *timer_p, bool *has_p,
2990 struct netlink_ext_ack *extack)
2991 {
2992 unsigned long timer;
2993 u32 value;
2994
2995 if (!attr) {
2996 *timer_p = fallback;
2997 *has_p = false;
2998 return 0;
2999 }
3000
3001 value = nla_get_u32(attr);
3002 timer = clock_t_to_jiffies(value);
3003 if (timer == ~0UL) {
3004 NL_SET_ERR_MSG(extack, "Timer value too large");
3005 return -EINVAL;
3006 }
3007
3008 *timer_p = timer;
3009 *has_p = true;
3010 return 0;
3011 }
3012
rtm_to_nh_config_grp_res(struct nlattr * res,struct nh_config * cfg,struct netlink_ext_ack * extack)3013 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
3014 struct netlink_ext_ack *extack)
3015 {
3016 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
3017 int err;
3018
3019 if (res) {
3020 err = nla_parse_nested(tb,
3021 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
3022 res, rtm_nh_res_policy_new, extack);
3023 if (err < 0)
3024 return err;
3025 }
3026
3027 if (tb[NHA_RES_GROUP_BUCKETS]) {
3028 cfg->nh_grp_res_num_buckets =
3029 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
3030 cfg->nh_grp_res_has_num_buckets = true;
3031 if (!cfg->nh_grp_res_num_buckets) {
3032 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
3033 return -EINVAL;
3034 }
3035 }
3036
3037 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
3038 NH_RES_DEFAULT_IDLE_TIMER,
3039 &cfg->nh_grp_res_idle_timer,
3040 &cfg->nh_grp_res_has_idle_timer,
3041 extack);
3042 if (err)
3043 return err;
3044
3045 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3046 NH_RES_DEFAULT_UNBALANCED_TIMER,
3047 &cfg->nh_grp_res_unbalanced_timer,
3048 &cfg->nh_grp_res_has_unbalanced_timer,
3049 extack);
3050 }
3051
rtm_to_nh_config(struct net * net,struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** tb,struct nh_config * cfg,struct netlink_ext_ack * extack)3052 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3053 struct nlmsghdr *nlh, struct nlattr **tb,
3054 struct nh_config *cfg,
3055 struct netlink_ext_ack *extack)
3056 {
3057 struct nhmsg *nhm = nlmsg_data(nlh);
3058 int err;
3059
3060 err = -EINVAL;
3061 if (nhm->resvd || nhm->nh_scope) {
3062 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3063 goto out;
3064 }
3065 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3066 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3067 goto out;
3068 }
3069
3070 switch (nhm->nh_family) {
3071 case AF_INET:
3072 case AF_INET6:
3073 break;
3074 case AF_UNSPEC:
3075 if (tb[NHA_GROUP])
3076 break;
3077 fallthrough;
3078 default:
3079 NL_SET_ERR_MSG(extack, "Invalid address family");
3080 goto out;
3081 }
3082
3083 memset(cfg, 0, sizeof(*cfg));
3084 cfg->nlflags = nlh->nlmsg_flags;
3085 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3086 cfg->nlinfo.nlh = nlh;
3087 cfg->nlinfo.nl_net = net;
3088
3089 cfg->nh_family = nhm->nh_family;
3090 cfg->nh_protocol = nhm->nh_protocol;
3091 cfg->nh_flags = nhm->nh_flags;
3092
3093 if (tb[NHA_ID])
3094 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3095
3096 if (tb[NHA_FDB]) {
3097 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3098 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
3099 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3100 goto out;
3101 }
3102 if (nhm->nh_flags) {
3103 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3104 goto out;
3105 }
3106 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3107 }
3108
3109 if (tb[NHA_GROUP]) {
3110 if (nhm->nh_family != AF_UNSPEC) {
3111 NL_SET_ERR_MSG(extack, "Invalid family for group");
3112 goto out;
3113 }
3114 cfg->nh_grp = tb[NHA_GROUP];
3115
3116 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3117 if (tb[NHA_GROUP_TYPE])
3118 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3119
3120 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3121 NL_SET_ERR_MSG(extack, "Invalid group type");
3122 goto out;
3123 }
3124
3125 err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new),
3126 cfg->nh_grp_type, extack);
3127 if (err)
3128 goto out;
3129
3130 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3131 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3132 cfg, extack);
3133
3134 if (tb[NHA_HW_STATS_ENABLE])
3135 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3136
3137 /* no other attributes should be set */
3138 goto out;
3139 }
3140
3141 if (tb[NHA_BLACKHOLE]) {
3142 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3143 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3144 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3145 goto out;
3146 }
3147
3148 cfg->nh_blackhole = 1;
3149 err = 0;
3150 goto out;
3151 }
3152
3153 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3154 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3155 goto out;
3156 }
3157
3158 err = -EINVAL;
3159 if (tb[NHA_GATEWAY]) {
3160 struct nlattr *gwa = tb[NHA_GATEWAY];
3161
3162 switch (cfg->nh_family) {
3163 case AF_INET:
3164 if (nla_len(gwa) != sizeof(u32)) {
3165 NL_SET_ERR_MSG(extack, "Invalid gateway");
3166 goto out;
3167 }
3168 cfg->gw.ipv4 = nla_get_be32(gwa);
3169 break;
3170 case AF_INET6:
3171 if (nla_len(gwa) != sizeof(struct in6_addr)) {
3172 NL_SET_ERR_MSG(extack, "Invalid gateway");
3173 goto out;
3174 }
3175 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3176 break;
3177 default:
3178 NL_SET_ERR_MSG(extack,
3179 "Unknown address family for gateway");
3180 goto out;
3181 }
3182 } else {
3183 /* device only nexthop (no gateway) */
3184 if (cfg->nh_flags & RTNH_F_ONLINK) {
3185 NL_SET_ERR_MSG(extack,
3186 "ONLINK flag can not be set for nexthop without a gateway");
3187 goto out;
3188 }
3189 }
3190
3191 if (tb[NHA_ENCAP]) {
3192 cfg->nh_encap = tb[NHA_ENCAP];
3193
3194 if (!tb[NHA_ENCAP_TYPE]) {
3195 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3196 goto out;
3197 }
3198
3199 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3200 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3201 if (err < 0)
3202 goto out;
3203
3204 } else if (tb[NHA_ENCAP_TYPE]) {
3205 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3206 goto out;
3207 }
3208
3209 if (tb[NHA_HW_STATS_ENABLE]) {
3210 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3211 goto out;
3212 }
3213
3214 err = 0;
3215 out:
3216 return err;
3217 }
3218
rtm_to_nh_config_rtnl(struct net * net,struct nlattr ** tb,struct nh_config * cfg,struct netlink_ext_ack * extack)3219 static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb,
3220 struct nh_config *cfg,
3221 struct netlink_ext_ack *extack)
3222 {
3223 if (tb[NHA_GROUP])
3224 return nh_check_attr_group_rtnl(net, tb, extack);
3225
3226 if (tb[NHA_OIF]) {
3227 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3228 if (cfg->nh_ifindex)
3229 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3230
3231 if (!cfg->dev) {
3232 NL_SET_ERR_MSG(extack, "Invalid device index");
3233 return -EINVAL;
3234 }
3235
3236 if (!(cfg->dev->flags & IFF_UP)) {
3237 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3238 return -ENETDOWN;
3239 }
3240
3241 if (!netif_carrier_ok(cfg->dev)) {
3242 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3243 return -ENETDOWN;
3244 }
3245 }
3246
3247 return 0;
3248 }
3249
3250 /* rtnl */
rtm_new_nexthop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3251 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3252 struct netlink_ext_ack *extack)
3253 {
3254 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3255 struct net *net = sock_net(skb->sk);
3256 struct nh_config cfg;
3257 struct nexthop *nh;
3258 int err;
3259
3260 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3261 ARRAY_SIZE(rtm_nh_policy_new) - 1,
3262 rtm_nh_policy_new, extack);
3263 if (err < 0)
3264 goto out;
3265
3266 err = rtm_to_nh_config(net, skb, nlh, tb, &cfg, extack);
3267 if (err)
3268 goto out;
3269
3270 if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) {
3271 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
3272 err = -EINVAL;
3273 goto out;
3274 }
3275
3276 rtnl_net_lock(net);
3277
3278 err = rtm_to_nh_config_rtnl(net, tb, &cfg, extack);
3279 if (err)
3280 goto unlock;
3281
3282 nh = nexthop_add(net, &cfg, extack);
3283 if (IS_ERR(nh))
3284 err = PTR_ERR(nh);
3285
3286 unlock:
3287 rtnl_net_unlock(net);
3288 out:
3289 return err;
3290 }
3291
nh_valid_get_del_req(const struct nlmsghdr * nlh,struct nlattr ** tb,u32 * id,u32 * op_flags,struct netlink_ext_ack * extack)3292 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3293 struct nlattr **tb, u32 *id, u32 *op_flags,
3294 struct netlink_ext_ack *extack)
3295 {
3296 struct nhmsg *nhm = nlmsg_data(nlh);
3297
3298 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3299 NL_SET_ERR_MSG(extack, "Invalid values in header");
3300 return -EINVAL;
3301 }
3302
3303 if (!tb[NHA_ID]) {
3304 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3305 return -EINVAL;
3306 }
3307
3308 *id = nla_get_u32(tb[NHA_ID]);
3309 if (!(*id)) {
3310 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3311 return -EINVAL;
3312 }
3313
3314 if (op_flags)
3315 *op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3316
3317 return 0;
3318 }
3319
3320 /* rtnl */
rtm_del_nexthop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3321 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3322 struct netlink_ext_ack *extack)
3323 {
3324 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3325 struct net *net = sock_net(skb->sk);
3326 struct nl_info nlinfo = {
3327 .nlh = nlh,
3328 .nl_net = net,
3329 .portid = NETLINK_CB(skb).portid,
3330 };
3331 struct nexthop *nh;
3332 int err;
3333 u32 id;
3334
3335 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3336 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3337 extack);
3338 if (err < 0)
3339 return err;
3340
3341 err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3342 if (err)
3343 return err;
3344
3345 rtnl_net_lock(net);
3346
3347 nh = nexthop_find_by_id(net, id);
3348 if (nh)
3349 remove_nexthop(net, nh, &nlinfo);
3350 else
3351 err = -ENOENT;
3352
3353 rtnl_net_unlock(net);
3354
3355 return err;
3356 }
3357
3358 /* rtnl */
rtm_get_nexthop(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3359 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3360 struct netlink_ext_ack *extack)
3361 {
3362 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3363 struct net *net = sock_net(in_skb->sk);
3364 struct sk_buff *skb = NULL;
3365 struct nexthop *nh;
3366 u32 op_flags;
3367 int err;
3368 u32 id;
3369
3370 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3371 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3372 extack);
3373 if (err < 0)
3374 return err;
3375
3376 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3377 if (err)
3378 return err;
3379
3380 err = -ENOBUFS;
3381 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3382 if (!skb)
3383 goto out;
3384
3385 err = -ENOENT;
3386 nh = nexthop_find_by_id(net, id);
3387 if (!nh)
3388 goto errout_free;
3389
3390 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3391 nlh->nlmsg_seq, 0, op_flags);
3392 if (err < 0) {
3393 WARN_ON(err == -EMSGSIZE);
3394 goto errout_free;
3395 }
3396
3397 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3398 out:
3399 return err;
3400 errout_free:
3401 kfree_skb(skb);
3402 goto out;
3403 }
3404
3405 struct nh_dump_filter {
3406 u32 nh_id;
3407 int dev_idx;
3408 int master_idx;
3409 bool group_filter;
3410 bool fdb_filter;
3411 u32 res_bucket_nh_id;
3412 u32 op_flags;
3413 };
3414
nh_dump_filtered(struct nexthop * nh,struct nh_dump_filter * filter,u8 family)3415 static bool nh_dump_filtered(struct nexthop *nh,
3416 struct nh_dump_filter *filter, u8 family)
3417 {
3418 const struct net_device *dev;
3419 const struct nh_info *nhi;
3420
3421 if (filter->group_filter && !nh->is_group)
3422 return true;
3423
3424 if (!filter->dev_idx && !filter->master_idx && !family)
3425 return false;
3426
3427 if (nh->is_group)
3428 return true;
3429
3430 nhi = rtnl_dereference(nh->nh_info);
3431 if (family && nhi->family != family)
3432 return true;
3433
3434 dev = nhi->fib_nhc.nhc_dev;
3435 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3436 return true;
3437
3438 if (filter->master_idx) {
3439 struct net_device *master;
3440
3441 if (!dev)
3442 return true;
3443
3444 master = netdev_master_upper_dev_get((struct net_device *)dev);
3445 if (!master || master->ifindex != filter->master_idx)
3446 return true;
3447 }
3448
3449 return false;
3450 }
3451
__nh_valid_dump_req(const struct nlmsghdr * nlh,struct nlattr ** tb,struct nh_dump_filter * filter,struct netlink_ext_ack * extack)3452 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3453 struct nh_dump_filter *filter,
3454 struct netlink_ext_ack *extack)
3455 {
3456 struct nhmsg *nhm;
3457 u32 idx;
3458
3459 if (tb[NHA_OIF]) {
3460 idx = nla_get_u32(tb[NHA_OIF]);
3461 if (idx > INT_MAX) {
3462 NL_SET_ERR_MSG(extack, "Invalid device index");
3463 return -EINVAL;
3464 }
3465 filter->dev_idx = idx;
3466 }
3467 if (tb[NHA_MASTER]) {
3468 idx = nla_get_u32(tb[NHA_MASTER]);
3469 if (idx > INT_MAX) {
3470 NL_SET_ERR_MSG(extack, "Invalid master device index");
3471 return -EINVAL;
3472 }
3473 filter->master_idx = idx;
3474 }
3475 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3476 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3477
3478 nhm = nlmsg_data(nlh);
3479 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3480 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3481 return -EINVAL;
3482 }
3483
3484 return 0;
3485 }
3486
nh_valid_dump_req(const struct nlmsghdr * nlh,struct nh_dump_filter * filter,struct netlink_callback * cb)3487 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3488 struct nh_dump_filter *filter,
3489 struct netlink_callback *cb)
3490 {
3491 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3492 int err;
3493
3494 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3495 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3496 rtm_nh_policy_dump, cb->extack);
3497 if (err < 0)
3498 return err;
3499
3500 filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3501
3502 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3503 }
3504
3505 struct rtm_dump_nh_ctx {
3506 u32 idx;
3507 };
3508
3509 static struct rtm_dump_nh_ctx *
rtm_dump_nh_ctx(struct netlink_callback * cb)3510 rtm_dump_nh_ctx(struct netlink_callback *cb)
3511 {
3512 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3513
3514 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3515 return ctx;
3516 }
3517
rtm_dump_walk_nexthops(struct sk_buff * skb,struct netlink_callback * cb,struct rb_root * root,struct rtm_dump_nh_ctx * ctx,int (* nh_cb)(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data),void * data)3518 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3519 struct netlink_callback *cb,
3520 struct rb_root *root,
3521 struct rtm_dump_nh_ctx *ctx,
3522 int (*nh_cb)(struct sk_buff *skb,
3523 struct netlink_callback *cb,
3524 struct nexthop *nh, void *data),
3525 void *data)
3526 {
3527 struct rb_node *node;
3528 int s_idx;
3529 int err;
3530
3531 s_idx = ctx->idx;
3532
3533 /* If this is not the first invocation, ctx->idx will contain the id of
3534 * the last nexthop we processed. Instead of starting from the very
3535 * first element of the red/black tree again and linearly skipping the
3536 * (potentially large) set of nodes with an id smaller than s_idx, walk
3537 * the tree and find the left-most node whose id is >= s_idx. This
3538 * provides an efficient O(log n) starting point for the dump
3539 * continuation.
3540 */
3541 if (s_idx != 0) {
3542 struct rb_node *tmp = root->rb_node;
3543
3544 node = NULL;
3545 while (tmp) {
3546 struct nexthop *nh;
3547
3548 nh = rb_entry(tmp, struct nexthop, rb_node);
3549 if (nh->id < s_idx) {
3550 tmp = tmp->rb_right;
3551 } else {
3552 /* Track current candidate and keep looking on
3553 * the left side to find the left-most
3554 * (smallest id) that is still >= s_idx.
3555 */
3556 node = tmp;
3557 tmp = tmp->rb_left;
3558 }
3559 }
3560 } else {
3561 node = rb_first(root);
3562 }
3563
3564 for (; node; node = rb_next(node)) {
3565 struct nexthop *nh;
3566
3567 nh = rb_entry(node, struct nexthop, rb_node);
3568
3569 ctx->idx = nh->id;
3570 err = nh_cb(skb, cb, nh, data);
3571 if (err)
3572 return err;
3573 }
3574
3575 return 0;
3576 }
3577
rtm_dump_nexthop_cb(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data)3578 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3579 struct nexthop *nh, void *data)
3580 {
3581 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3582 struct nh_dump_filter *filter = data;
3583
3584 if (nh_dump_filtered(nh, filter, nhm->nh_family))
3585 return 0;
3586
3587 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3588 NETLINK_CB(cb->skb).portid,
3589 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3590 }
3591
3592 /* rtnl */
rtm_dump_nexthop(struct sk_buff * skb,struct netlink_callback * cb)3593 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3594 {
3595 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3596 struct net *net = sock_net(skb->sk);
3597 struct rb_root *root = &net->nexthop.rb_root;
3598 struct nh_dump_filter filter = {};
3599 int err;
3600
3601 err = nh_valid_dump_req(cb->nlh, &filter, cb);
3602 if (err < 0)
3603 return err;
3604
3605 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3606 &rtm_dump_nexthop_cb, &filter);
3607
3608 cb->seq = net->nexthop.seq;
3609 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3610 return err;
3611 }
3612
3613 static struct nexthop *
nexthop_find_group_resilient(struct net * net,u32 id,struct netlink_ext_ack * extack)3614 nexthop_find_group_resilient(struct net *net, u32 id,
3615 struct netlink_ext_ack *extack)
3616 {
3617 struct nh_group *nhg;
3618 struct nexthop *nh;
3619
3620 nh = nexthop_find_by_id(net, id);
3621 if (!nh)
3622 return ERR_PTR(-ENOENT);
3623
3624 if (!nh->is_group) {
3625 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3626 return ERR_PTR(-EINVAL);
3627 }
3628
3629 nhg = rtnl_dereference(nh->nh_grp);
3630 if (!nhg->resilient) {
3631 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3632 return ERR_PTR(-EINVAL);
3633 }
3634
3635 return nh;
3636 }
3637
nh_valid_dump_nhid(struct nlattr * attr,u32 * nh_id_p,struct netlink_ext_ack * extack)3638 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3639 struct netlink_ext_ack *extack)
3640 {
3641 u32 idx;
3642
3643 if (attr) {
3644 idx = nla_get_u32(attr);
3645 if (!idx) {
3646 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3647 return -EINVAL;
3648 }
3649 *nh_id_p = idx;
3650 } else {
3651 *nh_id_p = 0;
3652 }
3653
3654 return 0;
3655 }
3656
nh_valid_dump_bucket_req(const struct nlmsghdr * nlh,struct nh_dump_filter * filter,struct netlink_callback * cb)3657 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3658 struct nh_dump_filter *filter,
3659 struct netlink_callback *cb)
3660 {
3661 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3662 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3663 int err;
3664
3665 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3666 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3667 rtm_nh_policy_dump_bucket, NULL);
3668 if (err < 0)
3669 return err;
3670
3671 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3672 if (err)
3673 return err;
3674
3675 if (tb[NHA_RES_BUCKET]) {
3676 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3677
3678 err = nla_parse_nested(res_tb, max,
3679 tb[NHA_RES_BUCKET],
3680 rtm_nh_res_bucket_policy_dump,
3681 cb->extack);
3682 if (err < 0)
3683 return err;
3684
3685 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3686 &filter->res_bucket_nh_id,
3687 cb->extack);
3688 if (err)
3689 return err;
3690 }
3691
3692 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3693 }
3694
3695 struct rtm_dump_res_bucket_ctx {
3696 struct rtm_dump_nh_ctx nh;
3697 u16 bucket_index;
3698 };
3699
3700 static struct rtm_dump_res_bucket_ctx *
rtm_dump_res_bucket_ctx(struct netlink_callback * cb)3701 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3702 {
3703 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3704
3705 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3706 return ctx;
3707 }
3708
3709 struct rtm_dump_nexthop_bucket_data {
3710 struct rtm_dump_res_bucket_ctx *ctx;
3711 struct nh_dump_filter filter;
3712 };
3713
rtm_dump_nexthop_bucket_nh(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,struct rtm_dump_nexthop_bucket_data * dd)3714 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3715 struct netlink_callback *cb,
3716 struct nexthop *nh,
3717 struct rtm_dump_nexthop_bucket_data *dd)
3718 {
3719 u32 portid = NETLINK_CB(cb->skb).portid;
3720 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3721 struct nh_res_table *res_table;
3722 struct nh_group *nhg;
3723 u16 bucket_index;
3724 int err;
3725
3726 nhg = rtnl_dereference(nh->nh_grp);
3727 res_table = rtnl_dereference(nhg->res_table);
3728 for (bucket_index = dd->ctx->bucket_index;
3729 bucket_index < res_table->num_nh_buckets;
3730 bucket_index++) {
3731 struct nh_res_bucket *bucket;
3732 struct nh_grp_entry *nhge;
3733
3734 bucket = &res_table->nh_buckets[bucket_index];
3735 nhge = rtnl_dereference(bucket->nh_entry);
3736 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3737 continue;
3738
3739 if (dd->filter.res_bucket_nh_id &&
3740 dd->filter.res_bucket_nh_id != nhge->nh->id)
3741 continue;
3742
3743 dd->ctx->bucket_index = bucket_index;
3744 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3745 RTM_NEWNEXTHOPBUCKET, portid,
3746 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3747 cb->extack);
3748 if (err)
3749 return err;
3750 }
3751
3752 dd->ctx->bucket_index = 0;
3753
3754 return 0;
3755 }
3756
rtm_dump_nexthop_bucket_cb(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data)3757 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3758 struct netlink_callback *cb,
3759 struct nexthop *nh, void *data)
3760 {
3761 struct rtm_dump_nexthop_bucket_data *dd = data;
3762 struct nh_group *nhg;
3763
3764 if (!nh->is_group)
3765 return 0;
3766
3767 nhg = rtnl_dereference(nh->nh_grp);
3768 if (!nhg->resilient)
3769 return 0;
3770
3771 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3772 }
3773
3774 /* rtnl */
rtm_dump_nexthop_bucket(struct sk_buff * skb,struct netlink_callback * cb)3775 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3776 struct netlink_callback *cb)
3777 {
3778 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3779 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3780 struct net *net = sock_net(skb->sk);
3781 struct nexthop *nh;
3782 int err;
3783
3784 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3785 if (err)
3786 return err;
3787
3788 if (dd.filter.nh_id) {
3789 nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3790 cb->extack);
3791 if (IS_ERR(nh))
3792 return PTR_ERR(nh);
3793 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3794 } else {
3795 struct rb_root *root = &net->nexthop.rb_root;
3796
3797 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3798 &rtm_dump_nexthop_bucket_cb, &dd);
3799 }
3800
3801 cb->seq = net->nexthop.seq;
3802 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3803 return err;
3804 }
3805
nh_valid_get_bucket_req_res_bucket(struct nlattr * res,u16 * bucket_index,struct netlink_ext_ack * extack)3806 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3807 u16 *bucket_index,
3808 struct netlink_ext_ack *extack)
3809 {
3810 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3811 int err;
3812
3813 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3814 res, rtm_nh_res_bucket_policy_get, extack);
3815 if (err < 0)
3816 return err;
3817
3818 if (!tb[NHA_RES_BUCKET_INDEX]) {
3819 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3820 return -EINVAL;
3821 }
3822
3823 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3824 return 0;
3825 }
3826
nh_valid_get_bucket_req(const struct nlmsghdr * nlh,u32 * id,u16 * bucket_index,struct netlink_ext_ack * extack)3827 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3828 u32 *id, u16 *bucket_index,
3829 struct netlink_ext_ack *extack)
3830 {
3831 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3832 int err;
3833
3834 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3835 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3836 rtm_nh_policy_get_bucket, extack);
3837 if (err < 0)
3838 return err;
3839
3840 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3841 if (err)
3842 return err;
3843
3844 if (!tb[NHA_RES_BUCKET]) {
3845 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3846 return -EINVAL;
3847 }
3848
3849 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3850 bucket_index, extack);
3851 if (err)
3852 return err;
3853
3854 return 0;
3855 }
3856
3857 /* rtnl */
rtm_get_nexthop_bucket(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3858 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3859 struct netlink_ext_ack *extack)
3860 {
3861 struct net *net = sock_net(in_skb->sk);
3862 struct nh_res_table *res_table;
3863 struct sk_buff *skb = NULL;
3864 struct nh_group *nhg;
3865 struct nexthop *nh;
3866 u16 bucket_index;
3867 int err;
3868 u32 id;
3869
3870 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3871 if (err)
3872 return err;
3873
3874 nh = nexthop_find_group_resilient(net, id, extack);
3875 if (IS_ERR(nh))
3876 return PTR_ERR(nh);
3877
3878 nhg = rtnl_dereference(nh->nh_grp);
3879 res_table = rtnl_dereference(nhg->res_table);
3880 if (bucket_index >= res_table->num_nh_buckets) {
3881 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3882 return -ENOENT;
3883 }
3884
3885 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3886 if (!skb)
3887 return -ENOBUFS;
3888
3889 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3890 bucket_index, RTM_NEWNEXTHOPBUCKET,
3891 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3892 0, extack);
3893 if (err < 0) {
3894 WARN_ON(err == -EMSGSIZE);
3895 goto errout_free;
3896 }
3897
3898 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3899
3900 errout_free:
3901 kfree_skb(skb);
3902 return err;
3903 }
3904
nexthop_sync_mtu(struct net_device * dev,u32 orig_mtu)3905 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3906 {
3907 unsigned int hash = nh_dev_hashfn(dev->ifindex);
3908 struct net *net = dev_net(dev);
3909 struct hlist_head *head = &net->nexthop.devhash[hash];
3910 struct hlist_node *n;
3911 struct nh_info *nhi;
3912
3913 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3914 if (nhi->fib_nhc.nhc_dev == dev) {
3915 if (nhi->family == AF_INET)
3916 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3917 orig_mtu);
3918 }
3919 }
3920 }
3921
3922 /* rtnl */
nh_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)3923 static int nh_netdev_event(struct notifier_block *this,
3924 unsigned long event, void *ptr)
3925 {
3926 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3927 struct netdev_notifier_info_ext *info_ext;
3928
3929 switch (event) {
3930 case NETDEV_DOWN:
3931 case NETDEV_UNREGISTER:
3932 nexthop_flush_dev(dev, event);
3933 break;
3934 case NETDEV_CHANGE:
3935 if (!(netif_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3936 nexthop_flush_dev(dev, event);
3937 break;
3938 case NETDEV_CHANGEMTU:
3939 info_ext = ptr;
3940 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3941 rt_cache_flush(dev_net(dev));
3942 break;
3943 }
3944 return NOTIFY_DONE;
3945 }
3946
3947 static struct notifier_block nh_netdev_notifier = {
3948 .notifier_call = nh_netdev_event,
3949 };
3950
nexthops_dump(struct net * net,struct notifier_block * nb,enum nexthop_event_type event_type,struct netlink_ext_ack * extack)3951 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3952 enum nexthop_event_type event_type,
3953 struct netlink_ext_ack *extack)
3954 {
3955 struct rb_root *root = &net->nexthop.rb_root;
3956 struct rb_node *node;
3957 int err = 0;
3958
3959 for (node = rb_first(root); node; node = rb_next(node)) {
3960 struct nexthop *nh;
3961
3962 nh = rb_entry(node, struct nexthop, rb_node);
3963 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3964 if (err)
3965 break;
3966 }
3967
3968 return err;
3969 }
3970
register_nexthop_notifier(struct net * net,struct notifier_block * nb,struct netlink_ext_ack * extack)3971 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3972 struct netlink_ext_ack *extack)
3973 {
3974 int err;
3975
3976 rtnl_lock();
3977 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3978 if (err)
3979 goto unlock;
3980 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3981 nb);
3982 unlock:
3983 rtnl_unlock();
3984 return err;
3985 }
3986 EXPORT_SYMBOL(register_nexthop_notifier);
3987
__unregister_nexthop_notifier(struct net * net,struct notifier_block * nb)3988 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3989 {
3990 int err;
3991
3992 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3993 nb);
3994 if (!err)
3995 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3996 return err;
3997 }
3998 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3999
unregister_nexthop_notifier(struct net * net,struct notifier_block * nb)4000 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
4001 {
4002 int err;
4003
4004 rtnl_lock();
4005 err = __unregister_nexthop_notifier(net, nb);
4006 rtnl_unlock();
4007 return err;
4008 }
4009 EXPORT_SYMBOL(unregister_nexthop_notifier);
4010
nexthop_set_hw_flags(struct net * net,u32 id,bool offload,bool trap)4011 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
4012 {
4013 struct nexthop *nexthop;
4014
4015 rcu_read_lock();
4016
4017 nexthop = nexthop_find_by_id(net, id);
4018 if (!nexthop)
4019 goto out;
4020
4021 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4022 if (offload)
4023 nexthop->nh_flags |= RTNH_F_OFFLOAD;
4024 if (trap)
4025 nexthop->nh_flags |= RTNH_F_TRAP;
4026
4027 out:
4028 rcu_read_unlock();
4029 }
4030 EXPORT_SYMBOL(nexthop_set_hw_flags);
4031
nexthop_bucket_set_hw_flags(struct net * net,u32 id,u16 bucket_index,bool offload,bool trap)4032 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
4033 bool offload, bool trap)
4034 {
4035 struct nh_res_table *res_table;
4036 struct nh_res_bucket *bucket;
4037 struct nexthop *nexthop;
4038 struct nh_group *nhg;
4039
4040 rcu_read_lock();
4041
4042 nexthop = nexthop_find_by_id(net, id);
4043 if (!nexthop || !nexthop->is_group)
4044 goto out;
4045
4046 nhg = rcu_dereference(nexthop->nh_grp);
4047 if (!nhg->resilient)
4048 goto out;
4049
4050 if (bucket_index >= nhg->res_table->num_nh_buckets)
4051 goto out;
4052
4053 res_table = rcu_dereference(nhg->res_table);
4054 bucket = &res_table->nh_buckets[bucket_index];
4055 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4056 if (offload)
4057 bucket->nh_flags |= RTNH_F_OFFLOAD;
4058 if (trap)
4059 bucket->nh_flags |= RTNH_F_TRAP;
4060
4061 out:
4062 rcu_read_unlock();
4063 }
4064 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
4065
nexthop_res_grp_activity_update(struct net * net,u32 id,u16 num_buckets,unsigned long * activity)4066 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
4067 unsigned long *activity)
4068 {
4069 struct nh_res_table *res_table;
4070 struct nexthop *nexthop;
4071 struct nh_group *nhg;
4072 u16 i;
4073
4074 rcu_read_lock();
4075
4076 nexthop = nexthop_find_by_id(net, id);
4077 if (!nexthop || !nexthop->is_group)
4078 goto out;
4079
4080 nhg = rcu_dereference(nexthop->nh_grp);
4081 if (!nhg->resilient)
4082 goto out;
4083
4084 /* Instead of silently ignoring some buckets, demand that the sizes
4085 * be the same.
4086 */
4087 res_table = rcu_dereference(nhg->res_table);
4088 if (num_buckets != res_table->num_nh_buckets)
4089 goto out;
4090
4091 for (i = 0; i < num_buckets; i++) {
4092 if (test_bit(i, activity))
4093 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
4094 }
4095
4096 out:
4097 rcu_read_unlock();
4098 }
4099 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4100
nexthop_net_exit_rtnl(struct net * net,struct list_head * dev_to_kill)4101 static void __net_exit nexthop_net_exit_rtnl(struct net *net,
4102 struct list_head *dev_to_kill)
4103 {
4104 ASSERT_RTNL_NET(net);
4105 flush_all_nexthops(net);
4106 }
4107
nexthop_net_exit(struct net * net)4108 static void __net_exit nexthop_net_exit(struct net *net)
4109 {
4110 kfree(net->nexthop.devhash);
4111 net->nexthop.devhash = NULL;
4112 }
4113
nexthop_net_init(struct net * net)4114 static int __net_init nexthop_net_init(struct net *net)
4115 {
4116 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4117
4118 net->nexthop.rb_root = RB_ROOT;
4119 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4120 if (!net->nexthop.devhash)
4121 return -ENOMEM;
4122 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4123
4124 return 0;
4125 }
4126
4127 static struct pernet_operations nexthop_net_ops = {
4128 .init = nexthop_net_init,
4129 .exit = nexthop_net_exit,
4130 .exit_rtnl = nexthop_net_exit_rtnl,
4131 };
4132
4133 static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {
4134 {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop,
4135 .flags = RTNL_FLAG_DOIT_PERNET},
4136 {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop,
4137 .flags = RTNL_FLAG_DOIT_PERNET},
4138 {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop,
4139 .dumpit = rtm_dump_nexthop},
4140 {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket,
4141 .dumpit = rtm_dump_nexthop_bucket},
4142 {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP,
4143 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4144 {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP,
4145 .dumpit = rtm_dump_nexthop},
4146 {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP,
4147 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4148 {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP,
4149 .dumpit = rtm_dump_nexthop},
4150 };
4151
nexthop_init(void)4152 static int __init nexthop_init(void)
4153 {
4154 register_pernet_subsys(&nexthop_net_ops);
4155
4156 register_netdevice_notifier(&nh_netdev_notifier);
4157
4158 rtnl_register_many(nexthop_rtnl_msg_handlers);
4159
4160 return 0;
4161 }
4162 subsys_initcall(nexthop_init);
4163