1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * net/core/fib_rules.c Generic Routing Rules 4 * 5 * Authors: Thomas Graf <tgraf@suug.ch> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/list.h> 12 #include <linux/module.h> 13 #include <net/net_namespace.h> 14 #include <net/inet_dscp.h> 15 #include <net/sock.h> 16 #include <net/fib_rules.h> 17 #include <net/ip_tunnels.h> 18 #include <linux/indirect_call_wrapper.h> 19 20 #if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES) 21 #ifdef CONFIG_IP_MULTIPLE_TABLES 22 #define INDIRECT_CALL_MT(f, f2, f1, ...) \ 23 INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) 24 #else 25 #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) 26 #endif 27 #elif defined(CONFIG_IP_MULTIPLE_TABLES) 28 #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) 29 #else 30 #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__) 31 #endif 32 33 static const struct fib_kuid_range fib_kuid_range_unset = { 34 KUIDT_INIT(0), 35 KUIDT_INIT(~0), 36 }; 37 38 bool fib_rule_matchall(const struct fib_rule *rule) 39 { 40 if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) || 41 rule->mark || rule->tun_id || rule->flags) 42 return false; 43 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) 44 return false; 45 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || 46 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) 47 return false; 48 if (fib_rule_port_range_set(&rule->sport_range)) 49 return false; 50 if (fib_rule_port_range_set(&rule->dport_range)) 51 return false; 52 return true; 53 } 54 EXPORT_SYMBOL_GPL(fib_rule_matchall); 55 56 int fib_default_rule_add(struct fib_rules_ops *ops, 57 u32 pref, u32 table) 58 { 59 struct fib_rule *r; 60 61 r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); 62 if (r == NULL) 63 return -ENOMEM; 64 65 refcount_set(&r->refcnt, 1); 66 r->action = FR_ACT_TO_TBL; 67 r->pref = pref; 68 r->table = table; 69 r->proto = RTPROT_KERNEL; 70 r->fr_net = ops->fro_net; 71 r->uid_range = fib_kuid_range_unset; 72 73 r->suppress_prefixlen = -1; 74 r->suppress_ifgroup = -1; 75 76 /* The lock is not required here, the list in unreachable 77 * at the moment this function is called */ 78 list_add_tail(&r->list, &ops->rules_list); 79 return 0; 80 } 81 EXPORT_SYMBOL(fib_default_rule_add); 82 83 static u32 fib_default_rule_pref(struct fib_rules_ops *ops) 84 { 85 struct list_head *pos; 86 struct fib_rule *rule; 87 88 if (!list_empty(&ops->rules_list)) { 89 pos = ops->rules_list.next; 90 if (pos->next != &ops->rules_list) { 91 rule = list_entry(pos->next, struct fib_rule, list); 92 if (rule->pref) 93 return rule->pref - 1; 94 } 95 } 96 97 return 0; 98 } 99 100 static void notify_rule_change(int event, struct fib_rule *rule, 101 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 102 u32 pid); 103 104 static struct fib_rules_ops *lookup_rules_ops(const struct net *net, 105 int family) 106 { 107 struct fib_rules_ops *ops; 108 109 rcu_read_lock(); 110 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 111 if (ops->family == family) { 112 if (!try_module_get(ops->owner)) 113 ops = NULL; 114 rcu_read_unlock(); 115 return ops; 116 } 117 } 118 rcu_read_unlock(); 119 120 return NULL; 121 } 122 123 static void rules_ops_put(struct fib_rules_ops *ops) 124 { 125 if (ops) 126 module_put(ops->owner); 127 } 128 129 static void flush_route_cache(struct fib_rules_ops *ops) 130 { 131 if (ops->flush_cache) 132 ops->flush_cache(ops); 133 } 134 135 static int __fib_rules_register(struct fib_rules_ops *ops) 136 { 137 int err = -EEXIST; 138 struct fib_rules_ops *o; 139 struct net *net; 140 141 net = ops->fro_net; 142 143 if (ops->rule_size < sizeof(struct fib_rule)) 144 return -EINVAL; 145 146 if (ops->match == NULL || ops->configure == NULL || 147 ops->compare == NULL || ops->fill == NULL || 148 ops->action == NULL) 149 return -EINVAL; 150 151 spin_lock(&net->rules_mod_lock); 152 list_for_each_entry(o, &net->rules_ops, list) 153 if (ops->family == o->family) 154 goto errout; 155 156 list_add_tail_rcu(&ops->list, &net->rules_ops); 157 err = 0; 158 errout: 159 spin_unlock(&net->rules_mod_lock); 160 161 return err; 162 } 163 164 struct fib_rules_ops * 165 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 166 { 167 struct fib_rules_ops *ops; 168 int err; 169 170 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 171 if (ops == NULL) 172 return ERR_PTR(-ENOMEM); 173 174 INIT_LIST_HEAD(&ops->rules_list); 175 ops->fro_net = net; 176 177 err = __fib_rules_register(ops); 178 if (err) { 179 kfree(ops); 180 ops = ERR_PTR(err); 181 } 182 183 return ops; 184 } 185 EXPORT_SYMBOL_GPL(fib_rules_register); 186 187 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 188 { 189 struct fib_rule *rule, *tmp; 190 191 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 192 list_del_rcu(&rule->list); 193 if (ops->delete) 194 ops->delete(rule); 195 fib_rule_put(rule); 196 } 197 } 198 199 void fib_rules_unregister(struct fib_rules_ops *ops) 200 { 201 struct net *net = ops->fro_net; 202 203 spin_lock(&net->rules_mod_lock); 204 list_del_rcu(&ops->list); 205 spin_unlock(&net->rules_mod_lock); 206 207 fib_rules_cleanup_ops(ops); 208 kfree_rcu(ops, rcu); 209 } 210 EXPORT_SYMBOL_GPL(fib_rules_unregister); 211 212 static int uid_range_set(struct fib_kuid_range *range) 213 { 214 return uid_valid(range->start) && uid_valid(range->end); 215 } 216 217 static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb) 218 { 219 struct fib_rule_uid_range *in; 220 struct fib_kuid_range out; 221 222 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]); 223 224 out.start = make_kuid(current_user_ns(), in->start); 225 out.end = make_kuid(current_user_ns(), in->end); 226 227 return out; 228 } 229 230 static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range) 231 { 232 struct fib_rule_uid_range out = { 233 from_kuid_munged(current_user_ns(), range->start), 234 from_kuid_munged(current_user_ns(), range->end) 235 }; 236 237 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out); 238 } 239 240 static int nla_get_port_range(struct nlattr *pattr, 241 struct fib_rule_port_range *port_range) 242 { 243 const struct fib_rule_port_range *pr = nla_data(pattr); 244 245 if (!fib_rule_port_range_valid(pr)) 246 return -EINVAL; 247 248 port_range->start = pr->start; 249 port_range->end = pr->end; 250 251 return 0; 252 } 253 254 static int nla_put_port_range(struct sk_buff *skb, int attrtype, 255 struct fib_rule_port_range *range) 256 { 257 return nla_put(skb, attrtype, sizeof(*range), range); 258 } 259 260 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 261 struct flowi *fl, int flags, 262 struct fib_lookup_arg *arg) 263 { 264 int iifindex, oifindex, ret = 0; 265 266 iifindex = READ_ONCE(rule->iifindex); 267 if (iifindex && (iifindex != fl->flowi_iif)) 268 goto out; 269 270 oifindex = READ_ONCE(rule->oifindex); 271 if (oifindex && (oifindex != fl->flowi_oif)) 272 goto out; 273 274 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) 275 goto out; 276 277 if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) 278 goto out; 279 280 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) 281 goto out; 282 283 if (uid_lt(fl->flowi_uid, rule->uid_range.start) || 284 uid_gt(fl->flowi_uid, rule->uid_range.end)) 285 goto out; 286 287 ret = INDIRECT_CALL_MT(ops->match, 288 fib6_rule_match, 289 fib4_rule_match, 290 rule, fl, flags); 291 out: 292 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 293 } 294 295 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 296 int flags, struct fib_lookup_arg *arg) 297 { 298 struct fib_rule *rule; 299 int err; 300 301 rcu_read_lock(); 302 303 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 304 jumped: 305 if (!fib_rule_match(rule, ops, fl, flags, arg)) 306 continue; 307 308 if (rule->action == FR_ACT_GOTO) { 309 struct fib_rule *target; 310 311 target = rcu_dereference(rule->ctarget); 312 if (target == NULL) { 313 continue; 314 } else { 315 rule = target; 316 goto jumped; 317 } 318 } else if (rule->action == FR_ACT_NOP) 319 continue; 320 else 321 err = INDIRECT_CALL_MT(ops->action, 322 fib6_rule_action, 323 fib4_rule_action, 324 rule, fl, flags, arg); 325 326 if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, 327 fib6_rule_suppress, 328 fib4_rule_suppress, 329 rule, flags, arg)) 330 continue; 331 332 if (err != -EAGAIN) { 333 if ((arg->flags & FIB_LOOKUP_NOREF) || 334 likely(refcount_inc_not_zero(&rule->refcnt))) { 335 arg->rule = rule; 336 goto out; 337 } 338 break; 339 } 340 } 341 342 err = -ESRCH; 343 out: 344 rcu_read_unlock(); 345 346 return err; 347 } 348 EXPORT_SYMBOL_GPL(fib_rules_lookup); 349 350 static int call_fib_rule_notifier(struct notifier_block *nb, 351 enum fib_event_type event_type, 352 struct fib_rule *rule, int family, 353 struct netlink_ext_ack *extack) 354 { 355 struct fib_rule_notifier_info info = { 356 .info.family = family, 357 .info.extack = extack, 358 .rule = rule, 359 }; 360 361 return call_fib_notifier(nb, event_type, &info.info); 362 } 363 364 static int call_fib_rule_notifiers(struct net *net, 365 enum fib_event_type event_type, 366 struct fib_rule *rule, 367 struct fib_rules_ops *ops, 368 struct netlink_ext_ack *extack) 369 { 370 struct fib_rule_notifier_info info = { 371 .info.family = ops->family, 372 .info.extack = extack, 373 .rule = rule, 374 }; 375 376 ASSERT_RTNL_NET(net); 377 378 /* Paired with READ_ONCE() in fib_rules_seq() */ 379 WRITE_ONCE(ops->fib_rules_seq, ops->fib_rules_seq + 1); 380 return call_fib_notifiers(net, event_type, &info.info); 381 } 382 383 /* Called with rcu_read_lock() */ 384 int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, 385 struct netlink_ext_ack *extack) 386 { 387 struct fib_rules_ops *ops; 388 struct fib_rule *rule; 389 int err = 0; 390 391 ops = lookup_rules_ops(net, family); 392 if (!ops) 393 return -EAFNOSUPPORT; 394 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 395 err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD, 396 rule, family, extack); 397 if (err) 398 break; 399 } 400 rules_ops_put(ops); 401 402 return err; 403 } 404 EXPORT_SYMBOL_GPL(fib_rules_dump); 405 406 unsigned int fib_rules_seq_read(const struct net *net, int family) 407 { 408 unsigned int fib_rules_seq; 409 struct fib_rules_ops *ops; 410 411 ops = lookup_rules_ops(net, family); 412 if (!ops) 413 return 0; 414 /* Paired with WRITE_ONCE() in call_fib_rule_notifiers() */ 415 fib_rules_seq = READ_ONCE(ops->fib_rules_seq); 416 rules_ops_put(ops); 417 418 return fib_rules_seq; 419 } 420 EXPORT_SYMBOL_GPL(fib_rules_seq_read); 421 422 static struct fib_rule *rule_find(struct fib_rules_ops *ops, 423 struct fib_rule_hdr *frh, 424 struct nlattr **tb, 425 struct fib_rule *rule, 426 bool user_priority) 427 { 428 struct fib_rule *r; 429 430 list_for_each_entry(r, &ops->rules_list, list) { 431 if (rule->action && r->action != rule->action) 432 continue; 433 434 if (rule->table && r->table != rule->table) 435 continue; 436 437 if (user_priority && r->pref != rule->pref) 438 continue; 439 440 if (rule->iifname[0] && 441 memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 442 continue; 443 444 if (rule->oifname[0] && 445 memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 446 continue; 447 448 if (rule->mark && r->mark != rule->mark) 449 continue; 450 451 if (rule->suppress_ifgroup != -1 && 452 r->suppress_ifgroup != rule->suppress_ifgroup) 453 continue; 454 455 if (rule->suppress_prefixlen != -1 && 456 r->suppress_prefixlen != rule->suppress_prefixlen) 457 continue; 458 459 if (rule->mark_mask && r->mark_mask != rule->mark_mask) 460 continue; 461 462 if (rule->tun_id && r->tun_id != rule->tun_id) 463 continue; 464 465 if (rule->l3mdev && r->l3mdev != rule->l3mdev) 466 continue; 467 468 if (uid_range_set(&rule->uid_range) && 469 (!uid_eq(r->uid_range.start, rule->uid_range.start) || 470 !uid_eq(r->uid_range.end, rule->uid_range.end))) 471 continue; 472 473 if (rule->ip_proto && r->ip_proto != rule->ip_proto) 474 continue; 475 476 if (rule->proto && r->proto != rule->proto) 477 continue; 478 479 if (fib_rule_port_range_set(&rule->sport_range) && 480 !fib_rule_port_range_compare(&r->sport_range, 481 &rule->sport_range)) 482 continue; 483 484 if (fib_rule_port_range_set(&rule->dport_range) && 485 !fib_rule_port_range_compare(&r->dport_range, 486 &rule->dport_range)) 487 continue; 488 489 if (!ops->compare(r, frh, tb)) 490 continue; 491 return r; 492 } 493 494 return NULL; 495 } 496 497 #ifdef CONFIG_NET_L3_MASTER_DEV 498 static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, 499 struct netlink_ext_ack *extack) 500 { 501 nlrule->l3mdev = nla_get_u8(nla); 502 if (nlrule->l3mdev != 1) { 503 NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute"); 504 return -1; 505 } 506 507 return 0; 508 } 509 #else 510 static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, 511 struct netlink_ext_ack *extack) 512 { 513 NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel"); 514 return -1; 515 } 516 #endif 517 518 static int fib_nl2rule(struct net *net, struct nlmsghdr *nlh, 519 struct netlink_ext_ack *extack, 520 struct fib_rules_ops *ops, 521 struct nlattr *tb[], 522 struct fib_rule **rule, 523 bool *user_priority) 524 { 525 struct fib_rule_hdr *frh = nlmsg_data(nlh); 526 struct fib_rule *nlrule = NULL; 527 int err = -EINVAL; 528 529 if (frh->src_len) 530 if (!tb[FRA_SRC] || 531 frh->src_len > (ops->addr_size * 8) || 532 nla_len(tb[FRA_SRC]) != ops->addr_size) { 533 NL_SET_ERR_MSG(extack, "Invalid source address"); 534 goto errout; 535 } 536 537 if (frh->dst_len) 538 if (!tb[FRA_DST] || 539 frh->dst_len > (ops->addr_size * 8) || 540 nla_len(tb[FRA_DST]) != ops->addr_size) { 541 NL_SET_ERR_MSG(extack, "Invalid dst address"); 542 goto errout; 543 } 544 545 nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); 546 if (!nlrule) { 547 err = -ENOMEM; 548 goto errout; 549 } 550 refcount_set(&nlrule->refcnt, 1); 551 nlrule->fr_net = net; 552 553 if (tb[FRA_PRIORITY]) { 554 nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]); 555 *user_priority = true; 556 } 557 558 nlrule->proto = nla_get_u8_default(tb[FRA_PROTOCOL], RTPROT_UNSPEC); 559 560 if (tb[FRA_IIFNAME]) { 561 nlrule->iifindex = -1; 562 nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 563 } 564 565 if (tb[FRA_OIFNAME]) { 566 nlrule->oifindex = -1; 567 nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 568 } 569 570 if (tb[FRA_FWMARK]) { 571 nlrule->mark = nla_get_u32(tb[FRA_FWMARK]); 572 if (nlrule->mark) 573 /* compatibility: if the mark value is non-zero all bits 574 * are compared unless a mask is explicitly specified. 575 */ 576 nlrule->mark_mask = 0xFFFFFFFF; 577 } 578 579 if (tb[FRA_FWMASK]) 580 nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 581 582 if (tb[FRA_TUN_ID]) 583 nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); 584 585 if (tb[FRA_L3MDEV] && 586 fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0) 587 goto errout_free; 588 589 nlrule->action = frh->action; 590 nlrule->flags = frh->flags; 591 nlrule->table = frh_get_table(frh, tb); 592 if (tb[FRA_SUPPRESS_PREFIXLEN]) 593 nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); 594 else 595 nlrule->suppress_prefixlen = -1; 596 597 if (tb[FRA_SUPPRESS_IFGROUP]) 598 nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); 599 else 600 nlrule->suppress_ifgroup = -1; 601 602 if (tb[FRA_GOTO]) { 603 if (nlrule->action != FR_ACT_GOTO) { 604 NL_SET_ERR_MSG(extack, "Unexpected goto"); 605 goto errout_free; 606 } 607 608 nlrule->target = nla_get_u32(tb[FRA_GOTO]); 609 } else if (nlrule->action == FR_ACT_GOTO) { 610 NL_SET_ERR_MSG(extack, "Missing goto target for action goto"); 611 goto errout_free; 612 } 613 614 if (nlrule->l3mdev && nlrule->table) { 615 NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive"); 616 goto errout_free; 617 } 618 619 if (tb[FRA_UID_RANGE]) { 620 if (current_user_ns() != net->user_ns) { 621 err = -EPERM; 622 NL_SET_ERR_MSG(extack, "No permission to set uid"); 623 goto errout_free; 624 } 625 626 nlrule->uid_range = nla_get_kuid_range(tb); 627 628 if (!uid_range_set(&nlrule->uid_range) || 629 !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) { 630 NL_SET_ERR_MSG(extack, "Invalid uid range"); 631 goto errout_free; 632 } 633 } else { 634 nlrule->uid_range = fib_kuid_range_unset; 635 } 636 637 if (tb[FRA_IP_PROTO]) 638 nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]); 639 640 if (tb[FRA_SPORT_RANGE]) { 641 err = nla_get_port_range(tb[FRA_SPORT_RANGE], 642 &nlrule->sport_range); 643 if (err) { 644 NL_SET_ERR_MSG(extack, "Invalid sport range"); 645 goto errout_free; 646 } 647 } 648 649 if (tb[FRA_DPORT_RANGE]) { 650 err = nla_get_port_range(tb[FRA_DPORT_RANGE], 651 &nlrule->dport_range); 652 if (err) { 653 NL_SET_ERR_MSG(extack, "Invalid dport range"); 654 goto errout_free; 655 } 656 } 657 658 *rule = nlrule; 659 660 return 0; 661 662 errout_free: 663 kfree(nlrule); 664 errout: 665 return err; 666 } 667 668 static int fib_nl2rule_rtnl(struct fib_rule *nlrule, 669 struct fib_rules_ops *ops, 670 struct nlattr *tb[], 671 struct netlink_ext_ack *extack) 672 { 673 if (!tb[FRA_PRIORITY]) 674 nlrule->pref = fib_default_rule_pref(ops); 675 676 /* Backward jumps are prohibited to avoid endless loops */ 677 if (tb[FRA_GOTO] && nlrule->target <= nlrule->pref) { 678 NL_SET_ERR_MSG(extack, "Backward goto not supported"); 679 return -EINVAL; 680 } 681 682 if (tb[FRA_IIFNAME]) { 683 struct net_device *dev; 684 685 dev = __dev_get_by_name(nlrule->fr_net, nlrule->iifname); 686 if (dev) 687 nlrule->iifindex = dev->ifindex; 688 } 689 690 if (tb[FRA_OIFNAME]) { 691 struct net_device *dev; 692 693 dev = __dev_get_by_name(nlrule->fr_net, nlrule->oifname); 694 if (dev) 695 nlrule->oifindex = dev->ifindex; 696 } 697 698 return 0; 699 } 700 701 static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, 702 struct nlattr **tb, struct fib_rule *rule) 703 { 704 struct fib_rule *r; 705 706 list_for_each_entry(r, &ops->rules_list, list) { 707 if (r->action != rule->action) 708 continue; 709 710 if (r->table != rule->table) 711 continue; 712 713 if (r->pref != rule->pref) 714 continue; 715 716 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) 717 continue; 718 719 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) 720 continue; 721 722 if (r->mark != rule->mark) 723 continue; 724 725 if (r->suppress_ifgroup != rule->suppress_ifgroup) 726 continue; 727 728 if (r->suppress_prefixlen != rule->suppress_prefixlen) 729 continue; 730 731 if (r->mark_mask != rule->mark_mask) 732 continue; 733 734 if (r->tun_id != rule->tun_id) 735 continue; 736 737 if (r->l3mdev != rule->l3mdev) 738 continue; 739 740 if (!uid_eq(r->uid_range.start, rule->uid_range.start) || 741 !uid_eq(r->uid_range.end, rule->uid_range.end)) 742 continue; 743 744 if (r->ip_proto != rule->ip_proto) 745 continue; 746 747 if (r->proto != rule->proto) 748 continue; 749 750 if (!fib_rule_port_range_compare(&r->sport_range, 751 &rule->sport_range)) 752 continue; 753 754 if (!fib_rule_port_range_compare(&r->dport_range, 755 &rule->dport_range)) 756 continue; 757 758 if (!ops->compare(r, frh, tb)) 759 continue; 760 return 1; 761 } 762 return 0; 763 } 764 765 static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { 766 [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, 767 [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 768 [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 769 [FRA_PRIORITY] = { .type = NLA_U32 }, 770 [FRA_FWMARK] = { .type = NLA_U32 }, 771 [FRA_FLOW] = { .type = NLA_U32 }, 772 [FRA_TUN_ID] = { .type = NLA_U64 }, 773 [FRA_FWMASK] = { .type = NLA_U32 }, 774 [FRA_TABLE] = { .type = NLA_U32 }, 775 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, 776 [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, 777 [FRA_GOTO] = { .type = NLA_U32 }, 778 [FRA_L3MDEV] = { .type = NLA_U8 }, 779 [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, 780 [FRA_PROTOCOL] = { .type = NLA_U8 }, 781 [FRA_IP_PROTO] = { .type = NLA_U8 }, 782 [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, 783 [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, 784 [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2), 785 [FRA_FLOWLABEL] = { .type = NLA_BE32 }, 786 [FRA_FLOWLABEL_MASK] = { .type = NLA_BE32 }, 787 }; 788 789 int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, 790 struct netlink_ext_ack *extack, bool rtnl_held) 791 { 792 struct fib_rule *rule = NULL, *r, *last = NULL; 793 struct fib_rule_hdr *frh = nlmsg_data(nlh); 794 int err = -EINVAL, unresolved = 0; 795 struct fib_rules_ops *ops = NULL; 796 struct nlattr *tb[FRA_MAX + 1]; 797 bool user_priority = false; 798 799 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 800 NL_SET_ERR_MSG(extack, "Invalid msg length"); 801 goto errout; 802 } 803 804 ops = lookup_rules_ops(net, frh->family); 805 if (!ops) { 806 err = -EAFNOSUPPORT; 807 NL_SET_ERR_MSG(extack, "Rule family not supported"); 808 goto errout; 809 } 810 811 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, 812 fib_rule_policy, extack); 813 if (err < 0) { 814 NL_SET_ERR_MSG(extack, "Error parsing msg"); 815 goto errout; 816 } 817 818 err = fib_nl2rule(net, nlh, extack, ops, tb, &rule, &user_priority); 819 if (err) 820 goto errout; 821 822 if (!rtnl_held) 823 rtnl_net_lock(net); 824 825 err = fib_nl2rule_rtnl(rule, ops, tb, extack); 826 if (err) 827 goto errout_free; 828 829 if ((nlh->nlmsg_flags & NLM_F_EXCL) && 830 rule_exists(ops, frh, tb, rule)) { 831 err = -EEXIST; 832 goto errout_free; 833 } 834 835 err = ops->configure(rule, skb, frh, tb, extack); 836 if (err < 0) 837 goto errout_free; 838 839 err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops, 840 extack); 841 if (err < 0) 842 goto errout_free; 843 844 list_for_each_entry(r, &ops->rules_list, list) { 845 if (r->pref == rule->target) { 846 RCU_INIT_POINTER(rule->ctarget, r); 847 break; 848 } 849 } 850 851 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 852 unresolved = 1; 853 854 list_for_each_entry(r, &ops->rules_list, list) { 855 if (r->pref > rule->pref) 856 break; 857 last = r; 858 } 859 860 if (last) 861 list_add_rcu(&rule->list, &last->list); 862 else 863 list_add_rcu(&rule->list, &ops->rules_list); 864 865 if (ops->unresolved_rules) { 866 /* 867 * There are unresolved goto rules in the list, check if 868 * any of them are pointing to this new rule. 869 */ 870 list_for_each_entry(r, &ops->rules_list, list) { 871 if (r->action == FR_ACT_GOTO && 872 r->target == rule->pref && 873 rtnl_dereference(r->ctarget) == NULL) { 874 rcu_assign_pointer(r->ctarget, rule); 875 if (--ops->unresolved_rules == 0) 876 break; 877 } 878 } 879 } 880 881 if (rule->action == FR_ACT_GOTO) 882 ops->nr_goto_rules++; 883 884 if (unresolved) 885 ops->unresolved_rules++; 886 887 if (rule->tun_id) 888 ip_tunnel_need_metadata(); 889 890 fib_rule_get(rule); 891 892 if (!rtnl_held) 893 rtnl_net_unlock(net); 894 895 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 896 fib_rule_put(rule); 897 flush_route_cache(ops); 898 rules_ops_put(ops); 899 return 0; 900 901 errout_free: 902 if (!rtnl_held) 903 rtnl_net_unlock(net); 904 kfree(rule); 905 errout: 906 rules_ops_put(ops); 907 return err; 908 } 909 EXPORT_SYMBOL_GPL(fib_newrule); 910 911 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, 912 struct netlink_ext_ack *extack) 913 { 914 return fib_newrule(sock_net(skb->sk), skb, nlh, extack, false); 915 } 916 917 int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, 918 struct netlink_ext_ack *extack, bool rtnl_held) 919 { 920 struct fib_rule *rule = NULL, *nlrule = NULL; 921 struct fib_rule_hdr *frh = nlmsg_data(nlh); 922 struct fib_rules_ops *ops = NULL; 923 struct nlattr *tb[FRA_MAX+1]; 924 bool user_priority = false; 925 int err = -EINVAL; 926 927 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 928 NL_SET_ERR_MSG(extack, "Invalid msg length"); 929 goto errout; 930 } 931 932 ops = lookup_rules_ops(net, frh->family); 933 if (ops == NULL) { 934 err = -EAFNOSUPPORT; 935 NL_SET_ERR_MSG(extack, "Rule family not supported"); 936 goto errout; 937 } 938 939 err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, 940 fib_rule_policy, extack); 941 if (err < 0) { 942 NL_SET_ERR_MSG(extack, "Error parsing msg"); 943 goto errout; 944 } 945 946 err = fib_nl2rule(net, nlh, extack, ops, tb, &nlrule, &user_priority); 947 if (err) 948 goto errout; 949 950 if (!rtnl_held) 951 rtnl_net_lock(net); 952 953 err = fib_nl2rule_rtnl(nlrule, ops, tb, extack); 954 if (err) 955 goto errout_free; 956 957 rule = rule_find(ops, frh, tb, nlrule, user_priority); 958 if (!rule) { 959 err = -ENOENT; 960 goto errout_free; 961 } 962 963 if (rule->flags & FIB_RULE_PERMANENT) { 964 err = -EPERM; 965 goto errout_free; 966 } 967 968 if (ops->delete) { 969 err = ops->delete(rule); 970 if (err) 971 goto errout_free; 972 } 973 974 if (rule->tun_id) 975 ip_tunnel_unneed_metadata(); 976 977 list_del_rcu(&rule->list); 978 979 if (rule->action == FR_ACT_GOTO) { 980 ops->nr_goto_rules--; 981 if (rtnl_dereference(rule->ctarget) == NULL) 982 ops->unresolved_rules--; 983 } 984 985 /* 986 * Check if this rule is a target to any of them. If so, 987 * adjust to the next one with the same preference or 988 * disable them. As this operation is eventually very 989 * expensive, it is only performed if goto rules, except 990 * current if it is goto rule, have actually been added. 991 */ 992 if (ops->nr_goto_rules > 0) { 993 struct fib_rule *n, *r; 994 995 n = list_next_entry(rule, list); 996 if (&n->list == &ops->rules_list || n->pref != rule->pref) 997 n = NULL; 998 list_for_each_entry(r, &ops->rules_list, list) { 999 if (rtnl_dereference(r->ctarget) != rule) 1000 continue; 1001 rcu_assign_pointer(r->ctarget, n); 1002 if (!n) 1003 ops->unresolved_rules++; 1004 } 1005 } 1006 1007 call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops, NULL); 1008 1009 if (!rtnl_held) 1010 rtnl_net_unlock(net); 1011 1012 notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); 1013 fib_rule_put(rule); 1014 flush_route_cache(ops); 1015 rules_ops_put(ops); 1016 kfree(nlrule); 1017 return 0; 1018 1019 errout_free: 1020 if (!rtnl_held) 1021 rtnl_net_unlock(net); 1022 kfree(nlrule); 1023 errout: 1024 rules_ops_put(ops); 1025 return err; 1026 } 1027 EXPORT_SYMBOL_GPL(fib_delrule); 1028 1029 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, 1030 struct netlink_ext_ack *extack) 1031 { 1032 return fib_delrule(sock_net(skb->sk), skb, nlh, extack, false); 1033 } 1034 1035 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 1036 struct fib_rule *rule) 1037 { 1038 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 1039 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 1040 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 1041 + nla_total_size(4) /* FRA_PRIORITY */ 1042 + nla_total_size(4) /* FRA_TABLE */ 1043 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ 1044 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 1045 + nla_total_size(4) /* FRA_FWMARK */ 1046 + nla_total_size(4) /* FRA_FWMASK */ 1047 + nla_total_size_64bit(8) /* FRA_TUN_ID */ 1048 + nla_total_size(sizeof(struct fib_kuid_range)) 1049 + nla_total_size(1) /* FRA_PROTOCOL */ 1050 + nla_total_size(1) /* FRA_IP_PROTO */ 1051 + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */ 1052 + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */ 1053 1054 if (ops->nlmsg_payload) 1055 payload += ops->nlmsg_payload(rule); 1056 1057 return payload; 1058 } 1059 1060 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 1061 u32 pid, u32 seq, int type, int flags, 1062 struct fib_rules_ops *ops) 1063 { 1064 struct nlmsghdr *nlh; 1065 struct fib_rule_hdr *frh; 1066 1067 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 1068 if (nlh == NULL) 1069 return -EMSGSIZE; 1070 1071 frh = nlmsg_data(nlh); 1072 frh->family = ops->family; 1073 frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; 1074 if (nla_put_u32(skb, FRA_TABLE, rule->table)) 1075 goto nla_put_failure; 1076 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) 1077 goto nla_put_failure; 1078 frh->res1 = 0; 1079 frh->res2 = 0; 1080 frh->action = rule->action; 1081 frh->flags = rule->flags; 1082 1083 if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto)) 1084 goto nla_put_failure; 1085 1086 if (rule->action == FR_ACT_GOTO && 1087 rcu_access_pointer(rule->ctarget) == NULL) 1088 frh->flags |= FIB_RULE_UNRESOLVED; 1089 1090 if (rule->iifname[0]) { 1091 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) 1092 goto nla_put_failure; 1093 if (READ_ONCE(rule->iifindex) == -1) 1094 frh->flags |= FIB_RULE_IIF_DETACHED; 1095 } 1096 1097 if (rule->oifname[0]) { 1098 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) 1099 goto nla_put_failure; 1100 if (READ_ONCE(rule->oifindex) == -1) 1101 frh->flags |= FIB_RULE_OIF_DETACHED; 1102 } 1103 1104 if ((rule->pref && 1105 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || 1106 (rule->mark && 1107 nla_put_u32(skb, FRA_FWMARK, rule->mark)) || 1108 ((rule->mark_mask || rule->mark) && 1109 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || 1110 (rule->target && 1111 nla_put_u32(skb, FRA_GOTO, rule->target)) || 1112 (rule->tun_id && 1113 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || 1114 (rule->l3mdev && 1115 nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) || 1116 (uid_range_set(&rule->uid_range) && 1117 nla_put_uid_range(skb, &rule->uid_range)) || 1118 (fib_rule_port_range_set(&rule->sport_range) && 1119 nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) || 1120 (fib_rule_port_range_set(&rule->dport_range) && 1121 nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) || 1122 (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto))) 1123 goto nla_put_failure; 1124 1125 if (rule->suppress_ifgroup != -1) { 1126 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) 1127 goto nla_put_failure; 1128 } 1129 1130 if (ops->fill(rule, skb, frh) < 0) 1131 goto nla_put_failure; 1132 1133 nlmsg_end(skb, nlh); 1134 return 0; 1135 1136 nla_put_failure: 1137 nlmsg_cancel(skb, nlh); 1138 return -EMSGSIZE; 1139 } 1140 1141 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 1142 struct fib_rules_ops *ops) 1143 { 1144 int idx = 0; 1145 struct fib_rule *rule; 1146 int err = 0; 1147 1148 rcu_read_lock(); 1149 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 1150 if (idx < cb->args[1]) 1151 goto skip; 1152 1153 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 1154 cb->nlh->nlmsg_seq, RTM_NEWRULE, 1155 NLM_F_MULTI, ops); 1156 if (err) 1157 break; 1158 skip: 1159 idx++; 1160 } 1161 rcu_read_unlock(); 1162 cb->args[1] = idx; 1163 rules_ops_put(ops); 1164 1165 return err; 1166 } 1167 1168 static int fib_valid_dumprule_req(const struct nlmsghdr *nlh, 1169 struct netlink_ext_ack *extack) 1170 { 1171 struct fib_rule_hdr *frh; 1172 1173 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { 1174 NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request"); 1175 return -EINVAL; 1176 } 1177 1178 frh = nlmsg_data(nlh); 1179 if (frh->dst_len || frh->src_len || frh->tos || frh->table || 1180 frh->res1 || frh->res2 || frh->action || frh->flags) { 1181 NL_SET_ERR_MSG(extack, 1182 "Invalid values in header for fib rule dump request"); 1183 return -EINVAL; 1184 } 1185 1186 if (nlmsg_attrlen(nlh, sizeof(*frh))) { 1187 NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request"); 1188 return -EINVAL; 1189 } 1190 1191 return 0; 1192 } 1193 1194 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 1195 { 1196 const struct nlmsghdr *nlh = cb->nlh; 1197 struct net *net = sock_net(skb->sk); 1198 struct fib_rules_ops *ops; 1199 int err, idx = 0, family; 1200 1201 if (cb->strict_check) { 1202 err = fib_valid_dumprule_req(nlh, cb->extack); 1203 1204 if (err < 0) 1205 return err; 1206 } 1207 1208 family = rtnl_msg_family(nlh); 1209 if (family != AF_UNSPEC) { 1210 /* Protocol specific dump request */ 1211 ops = lookup_rules_ops(net, family); 1212 if (ops == NULL) 1213 return -EAFNOSUPPORT; 1214 1215 return dump_rules(skb, cb, ops); 1216 } 1217 1218 err = 0; 1219 rcu_read_lock(); 1220 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 1221 if (idx < cb->args[0] || !try_module_get(ops->owner)) 1222 goto skip; 1223 1224 err = dump_rules(skb, cb, ops); 1225 if (err < 0) 1226 break; 1227 1228 cb->args[1] = 0; 1229 skip: 1230 idx++; 1231 } 1232 rcu_read_unlock(); 1233 cb->args[0] = idx; 1234 1235 return err; 1236 } 1237 1238 static void notify_rule_change(int event, struct fib_rule *rule, 1239 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 1240 u32 pid) 1241 { 1242 struct net *net; 1243 struct sk_buff *skb; 1244 int err = -ENOMEM; 1245 1246 net = ops->fro_net; 1247 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 1248 if (skb == NULL) 1249 goto errout; 1250 1251 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 1252 if (err < 0) { 1253 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 1254 WARN_ON(err == -EMSGSIZE); 1255 kfree_skb(skb); 1256 goto errout; 1257 } 1258 1259 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 1260 return; 1261 errout: 1262 rtnl_set_sk_err(net, ops->nlgroup, err); 1263 } 1264 1265 static void attach_rules(struct list_head *rules, struct net_device *dev) 1266 { 1267 struct fib_rule *rule; 1268 1269 list_for_each_entry(rule, rules, list) { 1270 if (rule->iifindex == -1 && 1271 strcmp(dev->name, rule->iifname) == 0) 1272 WRITE_ONCE(rule->iifindex, dev->ifindex); 1273 if (rule->oifindex == -1 && 1274 strcmp(dev->name, rule->oifname) == 0) 1275 WRITE_ONCE(rule->oifindex, dev->ifindex); 1276 } 1277 } 1278 1279 static void detach_rules(struct list_head *rules, struct net_device *dev) 1280 { 1281 struct fib_rule *rule; 1282 1283 list_for_each_entry(rule, rules, list) { 1284 if (rule->iifindex == dev->ifindex) 1285 WRITE_ONCE(rule->iifindex, -1); 1286 if (rule->oifindex == dev->ifindex) 1287 WRITE_ONCE(rule->oifindex, -1); 1288 } 1289 } 1290 1291 1292 static int fib_rules_event(struct notifier_block *this, unsigned long event, 1293 void *ptr) 1294 { 1295 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1296 struct net *net = dev_net(dev); 1297 struct fib_rules_ops *ops; 1298 1299 ASSERT_RTNL(); 1300 1301 switch (event) { 1302 case NETDEV_REGISTER: 1303 list_for_each_entry(ops, &net->rules_ops, list) 1304 attach_rules(&ops->rules_list, dev); 1305 break; 1306 1307 case NETDEV_CHANGENAME: 1308 list_for_each_entry(ops, &net->rules_ops, list) { 1309 detach_rules(&ops->rules_list, dev); 1310 attach_rules(&ops->rules_list, dev); 1311 } 1312 break; 1313 1314 case NETDEV_UNREGISTER: 1315 list_for_each_entry(ops, &net->rules_ops, list) 1316 detach_rules(&ops->rules_list, dev); 1317 break; 1318 } 1319 1320 return NOTIFY_DONE; 1321 } 1322 1323 static struct notifier_block fib_rules_notifier = { 1324 .notifier_call = fib_rules_event, 1325 }; 1326 1327 static int __net_init fib_rules_net_init(struct net *net) 1328 { 1329 INIT_LIST_HEAD(&net->rules_ops); 1330 spin_lock_init(&net->rules_mod_lock); 1331 return 0; 1332 } 1333 1334 static void __net_exit fib_rules_net_exit(struct net *net) 1335 { 1336 WARN_ON_ONCE(!list_empty(&net->rules_ops)); 1337 } 1338 1339 static struct pernet_operations fib_rules_net_ops = { 1340 .init = fib_rules_net_init, 1341 .exit = fib_rules_net_exit, 1342 }; 1343 1344 static const struct rtnl_msg_handler fib_rules_rtnl_msg_handlers[] __initconst = { 1345 {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule, 1346 .flags = RTNL_FLAG_DOIT_PERNET}, 1347 {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule, 1348 .flags = RTNL_FLAG_DOIT_PERNET}, 1349 {.msgtype = RTM_GETRULE, .dumpit = fib_nl_dumprule, 1350 .flags = RTNL_FLAG_DUMP_UNLOCKED}, 1351 }; 1352 1353 static int __init fib_rules_init(void) 1354 { 1355 int err; 1356 1357 rtnl_register_many(fib_rules_rtnl_msg_handlers); 1358 1359 err = register_pernet_subsys(&fib_rules_net_ops); 1360 if (err < 0) 1361 goto fail; 1362 1363 err = register_netdevice_notifier(&fib_rules_notifier); 1364 if (err < 0) 1365 goto fail_unregister; 1366 1367 return 0; 1368 1369 fail_unregister: 1370 unregister_pernet_subsys(&fib_rules_net_ops); 1371 fail: 1372 rtnl_unregister_many(fib_rules_rtnl_msg_handlers); 1373 return err; 1374 } 1375 1376 subsys_initcall(fib_rules_init); 1377