1 /* 2 * net/core/fib_rules.c Generic Routing Rules 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation, version 2. 7 * 8 * Authors: Thomas Graf <tgraf@suug.ch> 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 #include <net/net_namespace.h> 16 #include <net/sock.h> 17 #include <net/fib_rules.h> 18 19 int fib_default_rule_add(struct fib_rules_ops *ops, 20 u32 pref, u32 table, u32 flags) 21 { 22 struct fib_rule *r; 23 24 r = kzalloc(ops->rule_size, GFP_KERNEL); 25 if (r == NULL) 26 return -ENOMEM; 27 28 atomic_set(&r->refcnt, 1); 29 r->action = FR_ACT_TO_TBL; 30 r->pref = pref; 31 r->table = table; 32 r->flags = flags; 33 r->fr_net = hold_net(ops->fro_net); 34 35 /* The lock is not required here, the list in unreacheable 36 * at the moment this function is called */ 37 list_add_tail(&r->list, &ops->rules_list); 38 return 0; 39 } 40 EXPORT_SYMBOL(fib_default_rule_add); 41 42 u32 fib_default_rule_pref(struct fib_rules_ops *ops) 43 { 44 struct list_head *pos; 45 struct fib_rule *rule; 46 47 if (!list_empty(&ops->rules_list)) { 48 pos = ops->rules_list.next; 49 if (pos->next != &ops->rules_list) { 50 rule = list_entry(pos->next, struct fib_rule, list); 51 if (rule->pref) 52 return rule->pref - 1; 53 } 54 } 55 56 return 0; 57 } 58 EXPORT_SYMBOL(fib_default_rule_pref); 59 60 static void notify_rule_change(int event, struct fib_rule *rule, 61 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 62 u32 pid); 63 64 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) 65 { 66 struct fib_rules_ops *ops; 67 68 rcu_read_lock(); 69 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 70 if (ops->family == family) { 71 if (!try_module_get(ops->owner)) 72 ops = NULL; 73 rcu_read_unlock(); 74 return ops; 75 } 76 } 77 rcu_read_unlock(); 78 79 return NULL; 80 } 81 82 static void rules_ops_put(struct fib_rules_ops *ops) 83 { 84 if (ops) 85 module_put(ops->owner); 86 } 87 88 static void flush_route_cache(struct fib_rules_ops *ops) 89 { 90 if (ops->flush_cache) 91 ops->flush_cache(ops); 92 } 93 94 static int __fib_rules_register(struct fib_rules_ops *ops) 95 { 96 int err = -EEXIST; 97 struct fib_rules_ops *o; 98 struct net *net; 99 100 net = ops->fro_net; 101 102 if (ops->rule_size < sizeof(struct fib_rule)) 103 return -EINVAL; 104 105 if (ops->match == NULL || ops->configure == NULL || 106 ops->compare == NULL || ops->fill == NULL || 107 ops->action == NULL) 108 return -EINVAL; 109 110 spin_lock(&net->rules_mod_lock); 111 list_for_each_entry(o, &net->rules_ops, list) 112 if (ops->family == o->family) 113 goto errout; 114 115 hold_net(net); 116 list_add_tail_rcu(&ops->list, &net->rules_ops); 117 err = 0; 118 errout: 119 spin_unlock(&net->rules_mod_lock); 120 121 return err; 122 } 123 124 struct fib_rules_ops * 125 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) 126 { 127 struct fib_rules_ops *ops; 128 int err; 129 130 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); 131 if (ops == NULL) 132 return ERR_PTR(-ENOMEM); 133 134 INIT_LIST_HEAD(&ops->rules_list); 135 ops->fro_net = net; 136 137 err = __fib_rules_register(ops); 138 if (err) { 139 kfree(ops); 140 ops = ERR_PTR(err); 141 } 142 143 return ops; 144 } 145 EXPORT_SYMBOL_GPL(fib_rules_register); 146 147 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 148 { 149 struct fib_rule *rule, *tmp; 150 151 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { 152 list_del_rcu(&rule->list); 153 fib_rule_put(rule); 154 } 155 } 156 157 static void fib_rules_put_rcu(struct rcu_head *head) 158 { 159 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu); 160 struct net *net = ops->fro_net; 161 162 release_net(net); 163 kfree(ops); 164 } 165 166 void fib_rules_unregister(struct fib_rules_ops *ops) 167 { 168 struct net *net = ops->fro_net; 169 170 spin_lock(&net->rules_mod_lock); 171 list_del_rcu(&ops->list); 172 fib_rules_cleanup_ops(ops); 173 spin_unlock(&net->rules_mod_lock); 174 175 call_rcu(&ops->rcu, fib_rules_put_rcu); 176 } 177 EXPORT_SYMBOL_GPL(fib_rules_unregister); 178 179 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 180 struct flowi *fl, int flags) 181 { 182 int ret = 0; 183 184 if (rule->iifindex && (rule->iifindex != fl->iif) && 185 !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF)) 186 goto out; 187 188 if (rule->oifindex && (rule->oifindex != fl->oif)) 189 goto out; 190 191 if ((rule->mark ^ fl->mark) & rule->mark_mask) 192 goto out; 193 194 ret = ops->match(rule, fl, flags); 195 out: 196 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; 197 } 198 199 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, 200 int flags, struct fib_lookup_arg *arg) 201 { 202 struct fib_rule *rule; 203 int err; 204 205 rcu_read_lock(); 206 207 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 208 jumped: 209 if (!fib_rule_match(rule, ops, fl, flags)) 210 continue; 211 212 if (rule->action == FR_ACT_GOTO) { 213 struct fib_rule *target; 214 215 target = rcu_dereference(rule->ctarget); 216 if (target == NULL) { 217 continue; 218 } else { 219 rule = target; 220 goto jumped; 221 } 222 } else if (rule->action == FR_ACT_NOP) 223 continue; 224 else 225 err = ops->action(rule, fl, flags, arg); 226 227 if (err != -EAGAIN) { 228 if ((arg->flags & FIB_LOOKUP_NOREF) || 229 likely(atomic_inc_not_zero(&rule->refcnt))) { 230 arg->rule = rule; 231 goto out; 232 } 233 break; 234 } 235 } 236 237 err = -ESRCH; 238 out: 239 rcu_read_unlock(); 240 241 return err; 242 } 243 EXPORT_SYMBOL_GPL(fib_rules_lookup); 244 245 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 246 struct fib_rules_ops *ops) 247 { 248 int err = -EINVAL; 249 250 if (frh->src_len) 251 if (tb[FRA_SRC] == NULL || 252 frh->src_len > (ops->addr_size * 8) || 253 nla_len(tb[FRA_SRC]) != ops->addr_size) 254 goto errout; 255 256 if (frh->dst_len) 257 if (tb[FRA_DST] == NULL || 258 frh->dst_len > (ops->addr_size * 8) || 259 nla_len(tb[FRA_DST]) != ops->addr_size) 260 goto errout; 261 262 err = 0; 263 errout: 264 return err; 265 } 266 267 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 268 { 269 struct net *net = sock_net(skb->sk); 270 struct fib_rule_hdr *frh = nlmsg_data(nlh); 271 struct fib_rules_ops *ops = NULL; 272 struct fib_rule *rule, *r, *last = NULL; 273 struct nlattr *tb[FRA_MAX+1]; 274 int err = -EINVAL, unresolved = 0; 275 276 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 277 goto errout; 278 279 ops = lookup_rules_ops(net, frh->family); 280 if (ops == NULL) { 281 err = -EAFNOSUPPORT; 282 goto errout; 283 } 284 285 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 286 if (err < 0) 287 goto errout; 288 289 err = validate_rulemsg(frh, tb, ops); 290 if (err < 0) 291 goto errout; 292 293 rule = kzalloc(ops->rule_size, GFP_KERNEL); 294 if (rule == NULL) { 295 err = -ENOMEM; 296 goto errout; 297 } 298 rule->fr_net = hold_net(net); 299 300 if (tb[FRA_PRIORITY]) 301 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); 302 303 if (tb[FRA_IIFNAME]) { 304 struct net_device *dev; 305 306 rule->iifindex = -1; 307 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); 308 dev = __dev_get_by_name(net, rule->iifname); 309 if (dev) 310 rule->iifindex = dev->ifindex; 311 } 312 313 if (tb[FRA_OIFNAME]) { 314 struct net_device *dev; 315 316 rule->oifindex = -1; 317 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); 318 dev = __dev_get_by_name(net, rule->oifname); 319 if (dev) 320 rule->oifindex = dev->ifindex; 321 } 322 323 if (tb[FRA_FWMARK]) { 324 rule->mark = nla_get_u32(tb[FRA_FWMARK]); 325 if (rule->mark) 326 /* compatibility: if the mark value is non-zero all bits 327 * are compared unless a mask is explicitly specified. 328 */ 329 rule->mark_mask = 0xFFFFFFFF; 330 } 331 332 if (tb[FRA_FWMASK]) 333 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); 334 335 rule->action = frh->action; 336 rule->flags = frh->flags; 337 rule->table = frh_get_table(frh, tb); 338 339 if (!tb[FRA_PRIORITY] && ops->default_pref) 340 rule->pref = ops->default_pref(ops); 341 342 err = -EINVAL; 343 if (tb[FRA_GOTO]) { 344 if (rule->action != FR_ACT_GOTO) 345 goto errout_free; 346 347 rule->target = nla_get_u32(tb[FRA_GOTO]); 348 /* Backward jumps are prohibited to avoid endless loops */ 349 if (rule->target <= rule->pref) 350 goto errout_free; 351 352 list_for_each_entry(r, &ops->rules_list, list) { 353 if (r->pref == rule->target) { 354 RCU_INIT_POINTER(rule->ctarget, r); 355 break; 356 } 357 } 358 359 if (rcu_dereference_protected(rule->ctarget, 1) == NULL) 360 unresolved = 1; 361 } else if (rule->action == FR_ACT_GOTO) 362 goto errout_free; 363 364 err = ops->configure(rule, skb, frh, tb); 365 if (err < 0) 366 goto errout_free; 367 368 list_for_each_entry(r, &ops->rules_list, list) { 369 if (r->pref > rule->pref) 370 break; 371 last = r; 372 } 373 374 fib_rule_get(rule); 375 376 if (last) 377 list_add_rcu(&rule->list, &last->list); 378 else 379 list_add_rcu(&rule->list, &ops->rules_list); 380 381 if (ops->unresolved_rules) { 382 /* 383 * There are unresolved goto rules in the list, check if 384 * any of them are pointing to this new rule. 385 */ 386 list_for_each_entry(r, &ops->rules_list, list) { 387 if (r->action == FR_ACT_GOTO && 388 r->target == rule->pref) { 389 BUG_ON(rtnl_dereference(r->ctarget) != NULL); 390 rcu_assign_pointer(r->ctarget, rule); 391 if (--ops->unresolved_rules == 0) 392 break; 393 } 394 } 395 } 396 397 if (rule->action == FR_ACT_GOTO) 398 ops->nr_goto_rules++; 399 400 if (unresolved) 401 ops->unresolved_rules++; 402 403 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); 404 flush_route_cache(ops); 405 rules_ops_put(ops); 406 return 0; 407 408 errout_free: 409 release_net(rule->fr_net); 410 kfree(rule); 411 errout: 412 rules_ops_put(ops); 413 return err; 414 } 415 416 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 417 { 418 struct net *net = sock_net(skb->sk); 419 struct fib_rule_hdr *frh = nlmsg_data(nlh); 420 struct fib_rules_ops *ops = NULL; 421 struct fib_rule *rule, *tmp; 422 struct nlattr *tb[FRA_MAX+1]; 423 int err = -EINVAL; 424 425 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) 426 goto errout; 427 428 ops = lookup_rules_ops(net, frh->family); 429 if (ops == NULL) { 430 err = -EAFNOSUPPORT; 431 goto errout; 432 } 433 434 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); 435 if (err < 0) 436 goto errout; 437 438 err = validate_rulemsg(frh, tb, ops); 439 if (err < 0) 440 goto errout; 441 442 list_for_each_entry(rule, &ops->rules_list, list) { 443 if (frh->action && (frh->action != rule->action)) 444 continue; 445 446 if (frh->table && (frh_get_table(frh, tb) != rule->table)) 447 continue; 448 449 if (tb[FRA_PRIORITY] && 450 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) 451 continue; 452 453 if (tb[FRA_IIFNAME] && 454 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) 455 continue; 456 457 if (tb[FRA_OIFNAME] && 458 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) 459 continue; 460 461 if (tb[FRA_FWMARK] && 462 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) 463 continue; 464 465 if (tb[FRA_FWMASK] && 466 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) 467 continue; 468 469 if (!ops->compare(rule, frh, tb)) 470 continue; 471 472 if (rule->flags & FIB_RULE_PERMANENT) { 473 err = -EPERM; 474 goto errout; 475 } 476 477 list_del_rcu(&rule->list); 478 479 if (rule->action == FR_ACT_GOTO) 480 ops->nr_goto_rules--; 481 482 /* 483 * Check if this rule is a target to any of them. If so, 484 * disable them. As this operation is eventually very 485 * expensive, it is only performed if goto rules have 486 * actually been added. 487 */ 488 if (ops->nr_goto_rules > 0) { 489 list_for_each_entry(tmp, &ops->rules_list, list) { 490 if (rtnl_dereference(tmp->ctarget) == rule) { 491 rcu_assign_pointer(tmp->ctarget, NULL); 492 ops->unresolved_rules++; 493 } 494 } 495 } 496 497 notify_rule_change(RTM_DELRULE, rule, ops, nlh, 498 NETLINK_CB(skb).pid); 499 fib_rule_put(rule); 500 flush_route_cache(ops); 501 rules_ops_put(ops); 502 return 0; 503 } 504 505 err = -ENOENT; 506 errout: 507 rules_ops_put(ops); 508 return err; 509 } 510 511 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, 512 struct fib_rule *rule) 513 { 514 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) 515 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ 516 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ 517 + nla_total_size(4) /* FRA_PRIORITY */ 518 + nla_total_size(4) /* FRA_TABLE */ 519 + nla_total_size(4) /* FRA_FWMARK */ 520 + nla_total_size(4); /* FRA_FWMASK */ 521 522 if (ops->nlmsg_payload) 523 payload += ops->nlmsg_payload(rule); 524 525 return payload; 526 } 527 528 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, 529 u32 pid, u32 seq, int type, int flags, 530 struct fib_rules_ops *ops) 531 { 532 struct nlmsghdr *nlh; 533 struct fib_rule_hdr *frh; 534 535 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); 536 if (nlh == NULL) 537 return -EMSGSIZE; 538 539 frh = nlmsg_data(nlh); 540 frh->family = ops->family; 541 frh->table = rule->table; 542 NLA_PUT_U32(skb, FRA_TABLE, rule->table); 543 frh->res1 = 0; 544 frh->res2 = 0; 545 frh->action = rule->action; 546 frh->flags = rule->flags; 547 548 if (rule->action == FR_ACT_GOTO && 549 rcu_dereference_raw(rule->ctarget) == NULL) 550 frh->flags |= FIB_RULE_UNRESOLVED; 551 552 if (rule->iifname[0]) { 553 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); 554 555 if (rule->iifindex == -1) 556 frh->flags |= FIB_RULE_IIF_DETACHED; 557 } 558 559 if (rule->oifname[0]) { 560 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); 561 562 if (rule->oifindex == -1) 563 frh->flags |= FIB_RULE_OIF_DETACHED; 564 } 565 566 if (rule->pref) 567 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); 568 569 if (rule->mark) 570 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); 571 572 if (rule->mark_mask || rule->mark) 573 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); 574 575 if (rule->target) 576 NLA_PUT_U32(skb, FRA_GOTO, rule->target); 577 578 if (ops->fill(rule, skb, frh) < 0) 579 goto nla_put_failure; 580 581 return nlmsg_end(skb, nlh); 582 583 nla_put_failure: 584 nlmsg_cancel(skb, nlh); 585 return -EMSGSIZE; 586 } 587 588 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, 589 struct fib_rules_ops *ops) 590 { 591 int idx = 0; 592 struct fib_rule *rule; 593 594 list_for_each_entry(rule, &ops->rules_list, list) { 595 if (idx < cb->args[1]) 596 goto skip; 597 598 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid, 599 cb->nlh->nlmsg_seq, RTM_NEWRULE, 600 NLM_F_MULTI, ops) < 0) 601 break; 602 skip: 603 idx++; 604 } 605 cb->args[1] = idx; 606 rules_ops_put(ops); 607 608 return skb->len; 609 } 610 611 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 612 { 613 struct net *net = sock_net(skb->sk); 614 struct fib_rules_ops *ops; 615 int idx = 0, family; 616 617 family = rtnl_msg_family(cb->nlh); 618 if (family != AF_UNSPEC) { 619 /* Protocol specific dump request */ 620 ops = lookup_rules_ops(net, family); 621 if (ops == NULL) 622 return -EAFNOSUPPORT; 623 624 return dump_rules(skb, cb, ops); 625 } 626 627 rcu_read_lock(); 628 list_for_each_entry_rcu(ops, &net->rules_ops, list) { 629 if (idx < cb->args[0] || !try_module_get(ops->owner)) 630 goto skip; 631 632 if (dump_rules(skb, cb, ops) < 0) 633 break; 634 635 cb->args[1] = 0; 636 skip: 637 idx++; 638 } 639 rcu_read_unlock(); 640 cb->args[0] = idx; 641 642 return skb->len; 643 } 644 645 static void notify_rule_change(int event, struct fib_rule *rule, 646 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 647 u32 pid) 648 { 649 struct net *net; 650 struct sk_buff *skb; 651 int err = -ENOBUFS; 652 653 net = ops->fro_net; 654 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); 655 if (skb == NULL) 656 goto errout; 657 658 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); 659 if (err < 0) { 660 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ 661 WARN_ON(err == -EMSGSIZE); 662 kfree_skb(skb); 663 goto errout; 664 } 665 666 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); 667 return; 668 errout: 669 if (err < 0) 670 rtnl_set_sk_err(net, ops->nlgroup, err); 671 } 672 673 static void attach_rules(struct list_head *rules, struct net_device *dev) 674 { 675 struct fib_rule *rule; 676 677 list_for_each_entry(rule, rules, list) { 678 if (rule->iifindex == -1 && 679 strcmp(dev->name, rule->iifname) == 0) 680 rule->iifindex = dev->ifindex; 681 if (rule->oifindex == -1 && 682 strcmp(dev->name, rule->oifname) == 0) 683 rule->oifindex = dev->ifindex; 684 } 685 } 686 687 static void detach_rules(struct list_head *rules, struct net_device *dev) 688 { 689 struct fib_rule *rule; 690 691 list_for_each_entry(rule, rules, list) { 692 if (rule->iifindex == dev->ifindex) 693 rule->iifindex = -1; 694 if (rule->oifindex == dev->ifindex) 695 rule->oifindex = -1; 696 } 697 } 698 699 700 static int fib_rules_event(struct notifier_block *this, unsigned long event, 701 void *ptr) 702 { 703 struct net_device *dev = ptr; 704 struct net *net = dev_net(dev); 705 struct fib_rules_ops *ops; 706 707 ASSERT_RTNL(); 708 709 switch (event) { 710 case NETDEV_REGISTER: 711 list_for_each_entry(ops, &net->rules_ops, list) 712 attach_rules(&ops->rules_list, dev); 713 break; 714 715 case NETDEV_UNREGISTER: 716 list_for_each_entry(ops, &net->rules_ops, list) 717 detach_rules(&ops->rules_list, dev); 718 break; 719 } 720 721 return NOTIFY_DONE; 722 } 723 724 static struct notifier_block fib_rules_notifier = { 725 .notifier_call = fib_rules_event, 726 }; 727 728 static int __net_init fib_rules_net_init(struct net *net) 729 { 730 INIT_LIST_HEAD(&net->rules_ops); 731 spin_lock_init(&net->rules_mod_lock); 732 return 0; 733 } 734 735 static struct pernet_operations fib_rules_net_ops = { 736 .init = fib_rules_net_init, 737 }; 738 739 static int __init fib_rules_init(void) 740 { 741 int err; 742 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL); 743 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL); 744 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule); 745 746 err = register_pernet_subsys(&fib_rules_net_ops); 747 if (err < 0) 748 goto fail; 749 750 err = register_netdevice_notifier(&fib_rules_notifier); 751 if (err < 0) 752 goto fail_unregister; 753 754 return 0; 755 756 fail_unregister: 757 unregister_pernet_subsys(&fib_rules_net_ops); 758 fail: 759 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); 760 rtnl_unregister(PF_UNSPEC, RTM_DELRULE); 761 rtnl_unregister(PF_UNSPEC, RTM_GETRULE); 762 return err; 763 } 764 765 subsys_initcall(fib_rules_init); 766