xref: /linux/net/core/fib_rules.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * net/core/fib_rules.c		Generic Routing Rules
3  *
4  *	This program is free software; you can redistribute it and/or
5  *	modify it under the terms of the GNU General Public License as
6  *	published by the Free Software Foundation, version 2.
7  *
8  * Authors:	Thomas Graf <tgraf@suug.ch>
9  */
10 
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
17 #include <net/sock.h>
18 #include <net/fib_rules.h>
19 
20 int fib_default_rule_add(struct fib_rules_ops *ops,
21 			 u32 pref, u32 table, u32 flags)
22 {
23 	struct fib_rule *r;
24 
25 	r = kzalloc(ops->rule_size, GFP_KERNEL);
26 	if (r == NULL)
27 		return -ENOMEM;
28 
29 	atomic_set(&r->refcnt, 1);
30 	r->action = FR_ACT_TO_TBL;
31 	r->pref = pref;
32 	r->table = table;
33 	r->flags = flags;
34 	r->fr_net = hold_net(ops->fro_net);
35 
36 	/* The lock is not required here, the list in unreacheable
37 	 * at the moment this function is called */
38 	list_add_tail(&r->list, &ops->rules_list);
39 	return 0;
40 }
41 EXPORT_SYMBOL(fib_default_rule_add);
42 
43 u32 fib_default_rule_pref(struct fib_rules_ops *ops)
44 {
45 	struct list_head *pos;
46 	struct fib_rule *rule;
47 
48 	if (!list_empty(&ops->rules_list)) {
49 		pos = ops->rules_list.next;
50 		if (pos->next != &ops->rules_list) {
51 			rule = list_entry(pos->next, struct fib_rule, list);
52 			if (rule->pref)
53 				return rule->pref - 1;
54 		}
55 	}
56 
57 	return 0;
58 }
59 EXPORT_SYMBOL(fib_default_rule_pref);
60 
61 static void notify_rule_change(int event, struct fib_rule *rule,
62 			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
63 			       u32 pid);
64 
65 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
66 {
67 	struct fib_rules_ops *ops;
68 
69 	rcu_read_lock();
70 	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
71 		if (ops->family == family) {
72 			if (!try_module_get(ops->owner))
73 				ops = NULL;
74 			rcu_read_unlock();
75 			return ops;
76 		}
77 	}
78 	rcu_read_unlock();
79 
80 	return NULL;
81 }
82 
83 static void rules_ops_put(struct fib_rules_ops *ops)
84 {
85 	if (ops)
86 		module_put(ops->owner);
87 }
88 
89 static void flush_route_cache(struct fib_rules_ops *ops)
90 {
91 	if (ops->flush_cache)
92 		ops->flush_cache(ops);
93 }
94 
95 static int __fib_rules_register(struct fib_rules_ops *ops)
96 {
97 	int err = -EEXIST;
98 	struct fib_rules_ops *o;
99 	struct net *net;
100 
101 	net = ops->fro_net;
102 
103 	if (ops->rule_size < sizeof(struct fib_rule))
104 		return -EINVAL;
105 
106 	if (ops->match == NULL || ops->configure == NULL ||
107 	    ops->compare == NULL || ops->fill == NULL ||
108 	    ops->action == NULL)
109 		return -EINVAL;
110 
111 	spin_lock(&net->rules_mod_lock);
112 	list_for_each_entry(o, &net->rules_ops, list)
113 		if (ops->family == o->family)
114 			goto errout;
115 
116 	hold_net(net);
117 	list_add_tail_rcu(&ops->list, &net->rules_ops);
118 	err = 0;
119 errout:
120 	spin_unlock(&net->rules_mod_lock);
121 
122 	return err;
123 }
124 
125 struct fib_rules_ops *
126 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
127 {
128 	struct fib_rules_ops *ops;
129 	int err;
130 
131 	ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
132 	if (ops == NULL)
133 		return ERR_PTR(-ENOMEM);
134 
135 	INIT_LIST_HEAD(&ops->rules_list);
136 	ops->fro_net = net;
137 
138 	err = __fib_rules_register(ops);
139 	if (err) {
140 		kfree(ops);
141 		ops = ERR_PTR(err);
142 	}
143 
144 	return ops;
145 }
146 EXPORT_SYMBOL_GPL(fib_rules_register);
147 
148 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
149 {
150 	struct fib_rule *rule, *tmp;
151 
152 	list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
153 		list_del_rcu(&rule->list);
154 		fib_rule_put(rule);
155 	}
156 }
157 
158 static void fib_rules_put_rcu(struct rcu_head *head)
159 {
160 	struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
161 	struct net *net = ops->fro_net;
162 
163 	release_net(net);
164 	kfree(ops);
165 }
166 
167 void fib_rules_unregister(struct fib_rules_ops *ops)
168 {
169 	struct net *net = ops->fro_net;
170 
171 	spin_lock(&net->rules_mod_lock);
172 	list_del_rcu(&ops->list);
173 	fib_rules_cleanup_ops(ops);
174 	spin_unlock(&net->rules_mod_lock);
175 
176 	call_rcu(&ops->rcu, fib_rules_put_rcu);
177 }
178 EXPORT_SYMBOL_GPL(fib_rules_unregister);
179 
180 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
181 			  struct flowi *fl, int flags)
182 {
183 	int ret = 0;
184 
185 	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
186 		goto out;
187 
188 	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
189 		goto out;
190 
191 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
192 		goto out;
193 
194 	ret = ops->match(rule, fl, flags);
195 out:
196 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
197 }
198 
199 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
200 		     int flags, struct fib_lookup_arg *arg)
201 {
202 	struct fib_rule *rule;
203 	int err;
204 
205 	rcu_read_lock();
206 
207 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
208 jumped:
209 		if (!fib_rule_match(rule, ops, fl, flags))
210 			continue;
211 
212 		if (rule->action == FR_ACT_GOTO) {
213 			struct fib_rule *target;
214 
215 			target = rcu_dereference(rule->ctarget);
216 			if (target == NULL) {
217 				continue;
218 			} else {
219 				rule = target;
220 				goto jumped;
221 			}
222 		} else if (rule->action == FR_ACT_NOP)
223 			continue;
224 		else
225 			err = ops->action(rule, fl, flags, arg);
226 
227 		if (err != -EAGAIN) {
228 			if ((arg->flags & FIB_LOOKUP_NOREF) ||
229 			    likely(atomic_inc_not_zero(&rule->refcnt))) {
230 				arg->rule = rule;
231 				goto out;
232 			}
233 			break;
234 		}
235 	}
236 
237 	err = -ESRCH;
238 out:
239 	rcu_read_unlock();
240 
241 	return err;
242 }
243 EXPORT_SYMBOL_GPL(fib_rules_lookup);
244 
245 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
246 			    struct fib_rules_ops *ops)
247 {
248 	int err = -EINVAL;
249 
250 	if (frh->src_len)
251 		if (tb[FRA_SRC] == NULL ||
252 		    frh->src_len > (ops->addr_size * 8) ||
253 		    nla_len(tb[FRA_SRC]) != ops->addr_size)
254 			goto errout;
255 
256 	if (frh->dst_len)
257 		if (tb[FRA_DST] == NULL ||
258 		    frh->dst_len > (ops->addr_size * 8) ||
259 		    nla_len(tb[FRA_DST]) != ops->addr_size)
260 			goto errout;
261 
262 	err = 0;
263 errout:
264 	return err;
265 }
266 
267 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
268 {
269 	struct net *net = sock_net(skb->sk);
270 	struct fib_rule_hdr *frh = nlmsg_data(nlh);
271 	struct fib_rules_ops *ops = NULL;
272 	struct fib_rule *rule, *r, *last = NULL;
273 	struct nlattr *tb[FRA_MAX+1];
274 	int err = -EINVAL, unresolved = 0;
275 
276 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
277 		goto errout;
278 
279 	ops = lookup_rules_ops(net, frh->family);
280 	if (ops == NULL) {
281 		err = -EAFNOSUPPORT;
282 		goto errout;
283 	}
284 
285 	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
286 	if (err < 0)
287 		goto errout;
288 
289 	err = validate_rulemsg(frh, tb, ops);
290 	if (err < 0)
291 		goto errout;
292 
293 	rule = kzalloc(ops->rule_size, GFP_KERNEL);
294 	if (rule == NULL) {
295 		err = -ENOMEM;
296 		goto errout;
297 	}
298 	rule->fr_net = hold_net(net);
299 
300 	if (tb[FRA_PRIORITY])
301 		rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
302 
303 	if (tb[FRA_IIFNAME]) {
304 		struct net_device *dev;
305 
306 		rule->iifindex = -1;
307 		nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
308 		dev = __dev_get_by_name(net, rule->iifname);
309 		if (dev)
310 			rule->iifindex = dev->ifindex;
311 	}
312 
313 	if (tb[FRA_OIFNAME]) {
314 		struct net_device *dev;
315 
316 		rule->oifindex = -1;
317 		nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
318 		dev = __dev_get_by_name(net, rule->oifname);
319 		if (dev)
320 			rule->oifindex = dev->ifindex;
321 	}
322 
323 	if (tb[FRA_FWMARK]) {
324 		rule->mark = nla_get_u32(tb[FRA_FWMARK]);
325 		if (rule->mark)
326 			/* compatibility: if the mark value is non-zero all bits
327 			 * are compared unless a mask is explicitly specified.
328 			 */
329 			rule->mark_mask = 0xFFFFFFFF;
330 	}
331 
332 	if (tb[FRA_FWMASK])
333 		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
334 
335 	rule->action = frh->action;
336 	rule->flags = frh->flags;
337 	rule->table = frh_get_table(frh, tb);
338 
339 	if (!tb[FRA_PRIORITY] && ops->default_pref)
340 		rule->pref = ops->default_pref(ops);
341 
342 	err = -EINVAL;
343 	if (tb[FRA_GOTO]) {
344 		if (rule->action != FR_ACT_GOTO)
345 			goto errout_free;
346 
347 		rule->target = nla_get_u32(tb[FRA_GOTO]);
348 		/* Backward jumps are prohibited to avoid endless loops */
349 		if (rule->target <= rule->pref)
350 			goto errout_free;
351 
352 		list_for_each_entry(r, &ops->rules_list, list) {
353 			if (r->pref == rule->target) {
354 				RCU_INIT_POINTER(rule->ctarget, r);
355 				break;
356 			}
357 		}
358 
359 		if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
360 			unresolved = 1;
361 	} else if (rule->action == FR_ACT_GOTO)
362 		goto errout_free;
363 
364 	err = ops->configure(rule, skb, frh, tb);
365 	if (err < 0)
366 		goto errout_free;
367 
368 	list_for_each_entry(r, &ops->rules_list, list) {
369 		if (r->pref > rule->pref)
370 			break;
371 		last = r;
372 	}
373 
374 	fib_rule_get(rule);
375 
376 	if (last)
377 		list_add_rcu(&rule->list, &last->list);
378 	else
379 		list_add_rcu(&rule->list, &ops->rules_list);
380 
381 	if (ops->unresolved_rules) {
382 		/*
383 		 * There are unresolved goto rules in the list, check if
384 		 * any of them are pointing to this new rule.
385 		 */
386 		list_for_each_entry(r, &ops->rules_list, list) {
387 			if (r->action == FR_ACT_GOTO &&
388 			    r->target == rule->pref &&
389 			    rtnl_dereference(r->ctarget) == NULL) {
390 				rcu_assign_pointer(r->ctarget, rule);
391 				if (--ops->unresolved_rules == 0)
392 					break;
393 			}
394 		}
395 	}
396 
397 	if (rule->action == FR_ACT_GOTO)
398 		ops->nr_goto_rules++;
399 
400 	if (unresolved)
401 		ops->unresolved_rules++;
402 
403 	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
404 	flush_route_cache(ops);
405 	rules_ops_put(ops);
406 	return 0;
407 
408 errout_free:
409 	release_net(rule->fr_net);
410 	kfree(rule);
411 errout:
412 	rules_ops_put(ops);
413 	return err;
414 }
415 
416 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
417 {
418 	struct net *net = sock_net(skb->sk);
419 	struct fib_rule_hdr *frh = nlmsg_data(nlh);
420 	struct fib_rules_ops *ops = NULL;
421 	struct fib_rule *rule, *tmp;
422 	struct nlattr *tb[FRA_MAX+1];
423 	int err = -EINVAL;
424 
425 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
426 		goto errout;
427 
428 	ops = lookup_rules_ops(net, frh->family);
429 	if (ops == NULL) {
430 		err = -EAFNOSUPPORT;
431 		goto errout;
432 	}
433 
434 	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
435 	if (err < 0)
436 		goto errout;
437 
438 	err = validate_rulemsg(frh, tb, ops);
439 	if (err < 0)
440 		goto errout;
441 
442 	list_for_each_entry(rule, &ops->rules_list, list) {
443 		if (frh->action && (frh->action != rule->action))
444 			continue;
445 
446 		if (frh->table && (frh_get_table(frh, tb) != rule->table))
447 			continue;
448 
449 		if (tb[FRA_PRIORITY] &&
450 		    (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
451 			continue;
452 
453 		if (tb[FRA_IIFNAME] &&
454 		    nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
455 			continue;
456 
457 		if (tb[FRA_OIFNAME] &&
458 		    nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
459 			continue;
460 
461 		if (tb[FRA_FWMARK] &&
462 		    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
463 			continue;
464 
465 		if (tb[FRA_FWMASK] &&
466 		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
467 			continue;
468 
469 		if (!ops->compare(rule, frh, tb))
470 			continue;
471 
472 		if (rule->flags & FIB_RULE_PERMANENT) {
473 			err = -EPERM;
474 			goto errout;
475 		}
476 
477 		list_del_rcu(&rule->list);
478 
479 		if (rule->action == FR_ACT_GOTO) {
480 			ops->nr_goto_rules--;
481 			if (rtnl_dereference(rule->ctarget) == NULL)
482 				ops->unresolved_rules--;
483 		}
484 
485 		/*
486 		 * Check if this rule is a target to any of them. If so,
487 		 * disable them. As this operation is eventually very
488 		 * expensive, it is only performed if goto rules have
489 		 * actually been added.
490 		 */
491 		if (ops->nr_goto_rules > 0) {
492 			list_for_each_entry(tmp, &ops->rules_list, list) {
493 				if (rtnl_dereference(tmp->ctarget) == rule) {
494 					RCU_INIT_POINTER(tmp->ctarget, NULL);
495 					ops->unresolved_rules++;
496 				}
497 			}
498 		}
499 
500 		notify_rule_change(RTM_DELRULE, rule, ops, nlh,
501 				   NETLINK_CB(skb).pid);
502 		fib_rule_put(rule);
503 		flush_route_cache(ops);
504 		rules_ops_put(ops);
505 		return 0;
506 	}
507 
508 	err = -ENOENT;
509 errout:
510 	rules_ops_put(ops);
511 	return err;
512 }
513 
514 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
515 					 struct fib_rule *rule)
516 {
517 	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
518 			 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
519 			 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
520 			 + nla_total_size(4) /* FRA_PRIORITY */
521 			 + nla_total_size(4) /* FRA_TABLE */
522 			 + nla_total_size(4) /* FRA_FWMARK */
523 			 + nla_total_size(4); /* FRA_FWMASK */
524 
525 	if (ops->nlmsg_payload)
526 		payload += ops->nlmsg_payload(rule);
527 
528 	return payload;
529 }
530 
531 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
532 			    u32 pid, u32 seq, int type, int flags,
533 			    struct fib_rules_ops *ops)
534 {
535 	struct nlmsghdr *nlh;
536 	struct fib_rule_hdr *frh;
537 
538 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
539 	if (nlh == NULL)
540 		return -EMSGSIZE;
541 
542 	frh = nlmsg_data(nlh);
543 	frh->family = ops->family;
544 	frh->table = rule->table;
545 	NLA_PUT_U32(skb, FRA_TABLE, rule->table);
546 	frh->res1 = 0;
547 	frh->res2 = 0;
548 	frh->action = rule->action;
549 	frh->flags = rule->flags;
550 
551 	if (rule->action == FR_ACT_GOTO &&
552 	    rcu_access_pointer(rule->ctarget) == NULL)
553 		frh->flags |= FIB_RULE_UNRESOLVED;
554 
555 	if (rule->iifname[0]) {
556 		NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
557 
558 		if (rule->iifindex == -1)
559 			frh->flags |= FIB_RULE_IIF_DETACHED;
560 	}
561 
562 	if (rule->oifname[0]) {
563 		NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
564 
565 		if (rule->oifindex == -1)
566 			frh->flags |= FIB_RULE_OIF_DETACHED;
567 	}
568 
569 	if (rule->pref)
570 		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
571 
572 	if (rule->mark)
573 		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
574 
575 	if (rule->mark_mask || rule->mark)
576 		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
577 
578 	if (rule->target)
579 		NLA_PUT_U32(skb, FRA_GOTO, rule->target);
580 
581 	if (ops->fill(rule, skb, frh) < 0)
582 		goto nla_put_failure;
583 
584 	return nlmsg_end(skb, nlh);
585 
586 nla_put_failure:
587 	nlmsg_cancel(skb, nlh);
588 	return -EMSGSIZE;
589 }
590 
591 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
592 		      struct fib_rules_ops *ops)
593 {
594 	int idx = 0;
595 	struct fib_rule *rule;
596 
597 	rcu_read_lock();
598 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
599 		if (idx < cb->args[1])
600 			goto skip;
601 
602 		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
603 				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
604 				     NLM_F_MULTI, ops) < 0)
605 			break;
606 skip:
607 		idx++;
608 	}
609 	rcu_read_unlock();
610 	cb->args[1] = idx;
611 	rules_ops_put(ops);
612 
613 	return skb->len;
614 }
615 
616 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
617 {
618 	struct net *net = sock_net(skb->sk);
619 	struct fib_rules_ops *ops;
620 	int idx = 0, family;
621 
622 	family = rtnl_msg_family(cb->nlh);
623 	if (family != AF_UNSPEC) {
624 		/* Protocol specific dump request */
625 		ops = lookup_rules_ops(net, family);
626 		if (ops == NULL)
627 			return -EAFNOSUPPORT;
628 
629 		return dump_rules(skb, cb, ops);
630 	}
631 
632 	rcu_read_lock();
633 	list_for_each_entry_rcu(ops, &net->rules_ops, list) {
634 		if (idx < cb->args[0] || !try_module_get(ops->owner))
635 			goto skip;
636 
637 		if (dump_rules(skb, cb, ops) < 0)
638 			break;
639 
640 		cb->args[1] = 0;
641 skip:
642 		idx++;
643 	}
644 	rcu_read_unlock();
645 	cb->args[0] = idx;
646 
647 	return skb->len;
648 }
649 
650 static void notify_rule_change(int event, struct fib_rule *rule,
651 			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
652 			       u32 pid)
653 {
654 	struct net *net;
655 	struct sk_buff *skb;
656 	int err = -ENOBUFS;
657 
658 	net = ops->fro_net;
659 	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
660 	if (skb == NULL)
661 		goto errout;
662 
663 	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
664 	if (err < 0) {
665 		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
666 		WARN_ON(err == -EMSGSIZE);
667 		kfree_skb(skb);
668 		goto errout;
669 	}
670 
671 	rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
672 	return;
673 errout:
674 	if (err < 0)
675 		rtnl_set_sk_err(net, ops->nlgroup, err);
676 }
677 
678 static void attach_rules(struct list_head *rules, struct net_device *dev)
679 {
680 	struct fib_rule *rule;
681 
682 	list_for_each_entry(rule, rules, list) {
683 		if (rule->iifindex == -1 &&
684 		    strcmp(dev->name, rule->iifname) == 0)
685 			rule->iifindex = dev->ifindex;
686 		if (rule->oifindex == -1 &&
687 		    strcmp(dev->name, rule->oifname) == 0)
688 			rule->oifindex = dev->ifindex;
689 	}
690 }
691 
692 static void detach_rules(struct list_head *rules, struct net_device *dev)
693 {
694 	struct fib_rule *rule;
695 
696 	list_for_each_entry(rule, rules, list) {
697 		if (rule->iifindex == dev->ifindex)
698 			rule->iifindex = -1;
699 		if (rule->oifindex == dev->ifindex)
700 			rule->oifindex = -1;
701 	}
702 }
703 
704 
705 static int fib_rules_event(struct notifier_block *this, unsigned long event,
706 			    void *ptr)
707 {
708 	struct net_device *dev = ptr;
709 	struct net *net = dev_net(dev);
710 	struct fib_rules_ops *ops;
711 
712 	ASSERT_RTNL();
713 
714 	switch (event) {
715 	case NETDEV_REGISTER:
716 		list_for_each_entry(ops, &net->rules_ops, list)
717 			attach_rules(&ops->rules_list, dev);
718 		break;
719 
720 	case NETDEV_UNREGISTER:
721 		list_for_each_entry(ops, &net->rules_ops, list)
722 			detach_rules(&ops->rules_list, dev);
723 		break;
724 	}
725 
726 	return NOTIFY_DONE;
727 }
728 
729 static struct notifier_block fib_rules_notifier = {
730 	.notifier_call = fib_rules_event,
731 };
732 
733 static int __net_init fib_rules_net_init(struct net *net)
734 {
735 	INIT_LIST_HEAD(&net->rules_ops);
736 	spin_lock_init(&net->rules_mod_lock);
737 	return 0;
738 }
739 
740 static struct pernet_operations fib_rules_net_ops = {
741 	.init = fib_rules_net_init,
742 };
743 
744 static int __init fib_rules_init(void)
745 {
746 	int err;
747 	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
748 	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
749 	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
750 
751 	err = register_pernet_subsys(&fib_rules_net_ops);
752 	if (err < 0)
753 		goto fail;
754 
755 	err = register_netdevice_notifier(&fib_rules_notifier);
756 	if (err < 0)
757 		goto fail_unregister;
758 
759 	return 0;
760 
761 fail_unregister:
762 	unregister_pernet_subsys(&fib_rules_net_ops);
763 fail:
764 	rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
765 	rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
766 	rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
767 	return err;
768 }
769 
770 subsys_initcall(fib_rules_init);
771