xref: /linux/net/core/fib_rules.c (revision eb2bce7f5e7ac1ca6da434461217fadf3c688d2c)
1 /*
2  * net/core/fib_rules.c		Generic Routing Rules
3  *
4  *	This program is free software; you can redistribute it and/or
5  *	modify it under the terms of the GNU General Public License as
6  *	published by the Free Software Foundation, version 2.
7  *
8  * Authors:	Thomas Graf <tgraf@suug.ch>
9  */
10 
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/fib_rules.h>
15 
16 static LIST_HEAD(rules_ops);
17 static DEFINE_SPINLOCK(rules_mod_lock);
18 
19 static void notify_rule_change(int event, struct fib_rule *rule,
20 			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
21 			       u32 pid);
22 
23 static struct fib_rules_ops *lookup_rules_ops(int family)
24 {
25 	struct fib_rules_ops *ops;
26 
27 	rcu_read_lock();
28 	list_for_each_entry_rcu(ops, &rules_ops, list) {
29 		if (ops->family == family) {
30 			if (!try_module_get(ops->owner))
31 				ops = NULL;
32 			rcu_read_unlock();
33 			return ops;
34 		}
35 	}
36 	rcu_read_unlock();
37 
38 	return NULL;
39 }
40 
41 static void rules_ops_put(struct fib_rules_ops *ops)
42 {
43 	if (ops)
44 		module_put(ops->owner);
45 }
46 
47 static void flush_route_cache(struct fib_rules_ops *ops)
48 {
49 	if (ops->flush_cache)
50 		ops->flush_cache();
51 }
52 
53 int fib_rules_register(struct fib_rules_ops *ops)
54 {
55 	int err = -EEXIST;
56 	struct fib_rules_ops *o;
57 
58 	if (ops->rule_size < sizeof(struct fib_rule))
59 		return -EINVAL;
60 
61 	if (ops->match == NULL || ops->configure == NULL ||
62 	    ops->compare == NULL || ops->fill == NULL ||
63 	    ops->action == NULL)
64 		return -EINVAL;
65 
66 	spin_lock(&rules_mod_lock);
67 	list_for_each_entry(o, &rules_ops, list)
68 		if (ops->family == o->family)
69 			goto errout;
70 
71 	list_add_tail_rcu(&ops->list, &rules_ops);
72 	err = 0;
73 errout:
74 	spin_unlock(&rules_mod_lock);
75 
76 	return err;
77 }
78 
79 EXPORT_SYMBOL_GPL(fib_rules_register);
80 
81 static void cleanup_ops(struct fib_rules_ops *ops)
82 {
83 	struct fib_rule *rule, *tmp;
84 
85 	list_for_each_entry_safe(rule, tmp, ops->rules_list, list) {
86 		list_del_rcu(&rule->list);
87 		fib_rule_put(rule);
88 	}
89 }
90 
91 int fib_rules_unregister(struct fib_rules_ops *ops)
92 {
93 	int err = 0;
94 	struct fib_rules_ops *o;
95 
96 	spin_lock(&rules_mod_lock);
97 	list_for_each_entry(o, &rules_ops, list) {
98 		if (o == ops) {
99 			list_del_rcu(&o->list);
100 			cleanup_ops(ops);
101 			goto out;
102 		}
103 	}
104 
105 	err = -ENOENT;
106 out:
107 	spin_unlock(&rules_mod_lock);
108 
109 	synchronize_rcu();
110 
111 	return err;
112 }
113 
114 EXPORT_SYMBOL_GPL(fib_rules_unregister);
115 
116 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
117 			  struct flowi *fl, int flags)
118 {
119 	int ret = 0;
120 
121 	if (rule->ifindex && (rule->ifindex != fl->iif))
122 		goto out;
123 
124 	if ((rule->mark ^ fl->mark) & rule->mark_mask)
125 		goto out;
126 
127 	ret = ops->match(rule, fl, flags);
128 out:
129 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
130 }
131 
132 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
133 		     int flags, struct fib_lookup_arg *arg)
134 {
135 	struct fib_rule *rule;
136 	int err;
137 
138 	rcu_read_lock();
139 
140 	list_for_each_entry_rcu(rule, ops->rules_list, list) {
141 jumped:
142 		if (!fib_rule_match(rule, ops, fl, flags))
143 			continue;
144 
145 		if (rule->action == FR_ACT_GOTO) {
146 			struct fib_rule *target;
147 
148 			target = rcu_dereference(rule->ctarget);
149 			if (target == NULL) {
150 				continue;
151 			} else {
152 				rule = target;
153 				goto jumped;
154 			}
155 		} else if (rule->action == FR_ACT_NOP)
156 			continue;
157 		else
158 			err = ops->action(rule, fl, flags, arg);
159 
160 		if (err != -EAGAIN) {
161 			fib_rule_get(rule);
162 			arg->rule = rule;
163 			goto out;
164 		}
165 	}
166 
167 	err = -ESRCH;
168 out:
169 	rcu_read_unlock();
170 
171 	return err;
172 }
173 
174 EXPORT_SYMBOL_GPL(fib_rules_lookup);
175 
176 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
177 			    struct fib_rules_ops *ops)
178 {
179 	int err = -EINVAL;
180 
181 	if (frh->src_len)
182 		if (tb[FRA_SRC] == NULL ||
183 		    frh->src_len > (ops->addr_size * 8) ||
184 		    nla_len(tb[FRA_SRC]) != ops->addr_size)
185 			goto errout;
186 
187 	if (frh->dst_len)
188 		if (tb[FRA_DST] == NULL ||
189 		    frh->dst_len > (ops->addr_size * 8) ||
190 		    nla_len(tb[FRA_DST]) != ops->addr_size)
191 			goto errout;
192 
193 	err = 0;
194 errout:
195 	return err;
196 }
197 
198 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
199 {
200 	struct fib_rule_hdr *frh = nlmsg_data(nlh);
201 	struct fib_rules_ops *ops = NULL;
202 	struct fib_rule *rule, *r, *last = NULL;
203 	struct nlattr *tb[FRA_MAX+1];
204 	int err = -EINVAL, unresolved = 0;
205 
206 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
207 		goto errout;
208 
209 	ops = lookup_rules_ops(frh->family);
210 	if (ops == NULL) {
211 		err = EAFNOSUPPORT;
212 		goto errout;
213 	}
214 
215 	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
216 	if (err < 0)
217 		goto errout;
218 
219 	err = validate_rulemsg(frh, tb, ops);
220 	if (err < 0)
221 		goto errout;
222 
223 	rule = kzalloc(ops->rule_size, GFP_KERNEL);
224 	if (rule == NULL) {
225 		err = -ENOMEM;
226 		goto errout;
227 	}
228 
229 	if (tb[FRA_PRIORITY])
230 		rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
231 
232 	if (tb[FRA_IFNAME]) {
233 		struct net_device *dev;
234 
235 		rule->ifindex = -1;
236 		nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
237 		dev = __dev_get_by_name(rule->ifname);
238 		if (dev)
239 			rule->ifindex = dev->ifindex;
240 	}
241 
242 	if (tb[FRA_FWMARK]) {
243 		rule->mark = nla_get_u32(tb[FRA_FWMARK]);
244 		if (rule->mark)
245 			/* compatibility: if the mark value is non-zero all bits
246 			 * are compared unless a mask is explicitly specified.
247 			 */
248 			rule->mark_mask = 0xFFFFFFFF;
249 	}
250 
251 	if (tb[FRA_FWMASK])
252 		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
253 
254 	rule->action = frh->action;
255 	rule->flags = frh->flags;
256 	rule->table = frh_get_table(frh, tb);
257 
258 	if (!rule->pref && ops->default_pref)
259 		rule->pref = ops->default_pref();
260 
261 	err = -EINVAL;
262 	if (tb[FRA_GOTO]) {
263 		if (rule->action != FR_ACT_GOTO)
264 			goto errout_free;
265 
266 		rule->target = nla_get_u32(tb[FRA_GOTO]);
267 		/* Backward jumps are prohibited to avoid endless loops */
268 		if (rule->target <= rule->pref)
269 			goto errout_free;
270 
271 		list_for_each_entry(r, ops->rules_list, list) {
272 			if (r->pref == rule->target) {
273 				rule->ctarget = r;
274 				break;
275 			}
276 		}
277 
278 		if (rule->ctarget == NULL)
279 			unresolved = 1;
280 	} else if (rule->action == FR_ACT_GOTO)
281 		goto errout_free;
282 
283 	err = ops->configure(rule, skb, nlh, frh, tb);
284 	if (err < 0)
285 		goto errout_free;
286 
287 	list_for_each_entry(r, ops->rules_list, list) {
288 		if (r->pref > rule->pref)
289 			break;
290 		last = r;
291 	}
292 
293 	fib_rule_get(rule);
294 
295 	if (ops->unresolved_rules) {
296 		/*
297 		 * There are unresolved goto rules in the list, check if
298 		 * any of them are pointing to this new rule.
299 		 */
300 		list_for_each_entry(r, ops->rules_list, list) {
301 			if (r->action == FR_ACT_GOTO &&
302 			    r->target == rule->pref) {
303 				BUG_ON(r->ctarget != NULL);
304 				rcu_assign_pointer(r->ctarget, rule);
305 				if (--ops->unresolved_rules == 0)
306 					break;
307 			}
308 		}
309 	}
310 
311 	if (rule->action == FR_ACT_GOTO)
312 		ops->nr_goto_rules++;
313 
314 	if (unresolved)
315 		ops->unresolved_rules++;
316 
317 	if (last)
318 		list_add_rcu(&rule->list, &last->list);
319 	else
320 		list_add_rcu(&rule->list, ops->rules_list);
321 
322 	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
323 	flush_route_cache(ops);
324 	rules_ops_put(ops);
325 	return 0;
326 
327 errout_free:
328 	kfree(rule);
329 errout:
330 	rules_ops_put(ops);
331 	return err;
332 }
333 
334 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
335 {
336 	struct fib_rule_hdr *frh = nlmsg_data(nlh);
337 	struct fib_rules_ops *ops = NULL;
338 	struct fib_rule *rule, *tmp;
339 	struct nlattr *tb[FRA_MAX+1];
340 	int err = -EINVAL;
341 
342 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
343 		goto errout;
344 
345 	ops = lookup_rules_ops(frh->family);
346 	if (ops == NULL) {
347 		err = EAFNOSUPPORT;
348 		goto errout;
349 	}
350 
351 	err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
352 	if (err < 0)
353 		goto errout;
354 
355 	err = validate_rulemsg(frh, tb, ops);
356 	if (err < 0)
357 		goto errout;
358 
359 	list_for_each_entry(rule, ops->rules_list, list) {
360 		if (frh->action && (frh->action != rule->action))
361 			continue;
362 
363 		if (frh->table && (frh_get_table(frh, tb) != rule->table))
364 			continue;
365 
366 		if (tb[FRA_PRIORITY] &&
367 		    (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
368 			continue;
369 
370 		if (tb[FRA_IFNAME] &&
371 		    nla_strcmp(tb[FRA_IFNAME], rule->ifname))
372 			continue;
373 
374 		if (tb[FRA_FWMARK] &&
375 		    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
376 			continue;
377 
378 		if (tb[FRA_FWMASK] &&
379 		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
380 			continue;
381 
382 		if (!ops->compare(rule, frh, tb))
383 			continue;
384 
385 		if (rule->flags & FIB_RULE_PERMANENT) {
386 			err = -EPERM;
387 			goto errout;
388 		}
389 
390 		list_del_rcu(&rule->list);
391 
392 		if (rule->action == FR_ACT_GOTO)
393 			ops->nr_goto_rules--;
394 
395 		/*
396 		 * Check if this rule is a target to any of them. If so,
397 		 * disable them. As this operation is eventually very
398 		 * expensive, it is only performed if goto rules have
399 		 * actually been added.
400 		 */
401 		if (ops->nr_goto_rules > 0) {
402 			list_for_each_entry(tmp, ops->rules_list, list) {
403 				if (tmp->ctarget == rule) {
404 					rcu_assign_pointer(tmp->ctarget, NULL);
405 					ops->unresolved_rules++;
406 				}
407 			}
408 		}
409 
410 		synchronize_rcu();
411 		notify_rule_change(RTM_DELRULE, rule, ops, nlh,
412 				   NETLINK_CB(skb).pid);
413 		fib_rule_put(rule);
414 		flush_route_cache(ops);
415 		rules_ops_put(ops);
416 		return 0;
417 	}
418 
419 	err = -ENOENT;
420 errout:
421 	rules_ops_put(ops);
422 	return err;
423 }
424 
425 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
426 					 struct fib_rule *rule)
427 {
428 	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
429 			 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
430 			 + nla_total_size(4) /* FRA_PRIORITY */
431 			 + nla_total_size(4) /* FRA_TABLE */
432 			 + nla_total_size(4) /* FRA_FWMARK */
433 			 + nla_total_size(4); /* FRA_FWMASK */
434 
435 	if (ops->nlmsg_payload)
436 		payload += ops->nlmsg_payload(rule);
437 
438 	return payload;
439 }
440 
441 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
442 			    u32 pid, u32 seq, int type, int flags,
443 			    struct fib_rules_ops *ops)
444 {
445 	struct nlmsghdr *nlh;
446 	struct fib_rule_hdr *frh;
447 
448 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
449 	if (nlh == NULL)
450 		return -EMSGSIZE;
451 
452 	frh = nlmsg_data(nlh);
453 	frh->table = rule->table;
454 	NLA_PUT_U32(skb, FRA_TABLE, rule->table);
455 	frh->res1 = 0;
456 	frh->res2 = 0;
457 	frh->action = rule->action;
458 	frh->flags = rule->flags;
459 
460 	if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
461 		frh->flags |= FIB_RULE_UNRESOLVED;
462 
463 	if (rule->ifname[0]) {
464 		NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
465 
466 		if (rule->ifindex == -1)
467 			frh->flags |= FIB_RULE_DEV_DETACHED;
468 	}
469 
470 	if (rule->pref)
471 		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
472 
473 	if (rule->mark)
474 		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
475 
476 	if (rule->mark_mask || rule->mark)
477 		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
478 
479 	if (rule->target)
480 		NLA_PUT_U32(skb, FRA_GOTO, rule->target);
481 
482 	if (ops->fill(rule, skb, nlh, frh) < 0)
483 		goto nla_put_failure;
484 
485 	return nlmsg_end(skb, nlh);
486 
487 nla_put_failure:
488 	nlmsg_cancel(skb, nlh);
489 	return -EMSGSIZE;
490 }
491 
492 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
493 		      struct fib_rules_ops *ops)
494 {
495 	int idx = 0;
496 	struct fib_rule *rule;
497 
498 	list_for_each_entry(rule, ops->rules_list, list) {
499 		if (idx < cb->args[1])
500 			goto skip;
501 
502 		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
503 				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
504 				     NLM_F_MULTI, ops) < 0)
505 			break;
506 skip:
507 		idx++;
508 	}
509 	cb->args[1] = idx;
510 	rules_ops_put(ops);
511 
512 	return skb->len;
513 }
514 
515 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
516 {
517 	struct fib_rules_ops *ops;
518 	int idx = 0, family;
519 
520 	family = rtnl_msg_family(cb->nlh);
521 	if (family != AF_UNSPEC) {
522 		/* Protocol specific dump request */
523 		ops = lookup_rules_ops(family);
524 		if (ops == NULL)
525 			return -EAFNOSUPPORT;
526 
527 		return dump_rules(skb, cb, ops);
528 	}
529 
530 	rcu_read_lock();
531 	list_for_each_entry_rcu(ops, &rules_ops, list) {
532 		if (idx < cb->args[0] || !try_module_get(ops->owner))
533 			goto skip;
534 
535 		if (dump_rules(skb, cb, ops) < 0)
536 			break;
537 
538 		cb->args[1] = 0;
539 	skip:
540 		idx++;
541 	}
542 	rcu_read_unlock();
543 	cb->args[0] = idx;
544 
545 	return skb->len;
546 }
547 
548 static void notify_rule_change(int event, struct fib_rule *rule,
549 			       struct fib_rules_ops *ops, struct nlmsghdr *nlh,
550 			       u32 pid)
551 {
552 	struct sk_buff *skb;
553 	int err = -ENOBUFS;
554 
555 	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
556 	if (skb == NULL)
557 		goto errout;
558 
559 	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
560 	if (err < 0) {
561 		/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
562 		WARN_ON(err == -EMSGSIZE);
563 		kfree_skb(skb);
564 		goto errout;
565 	}
566 	err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
567 errout:
568 	if (err < 0)
569 		rtnl_set_sk_err(ops->nlgroup, err);
570 }
571 
572 static void attach_rules(struct list_head *rules, struct net_device *dev)
573 {
574 	struct fib_rule *rule;
575 
576 	list_for_each_entry(rule, rules, list) {
577 		if (rule->ifindex == -1 &&
578 		    strcmp(dev->name, rule->ifname) == 0)
579 			rule->ifindex = dev->ifindex;
580 	}
581 }
582 
583 static void detach_rules(struct list_head *rules, struct net_device *dev)
584 {
585 	struct fib_rule *rule;
586 
587 	list_for_each_entry(rule, rules, list)
588 		if (rule->ifindex == dev->ifindex)
589 			rule->ifindex = -1;
590 }
591 
592 
593 static int fib_rules_event(struct notifier_block *this, unsigned long event,
594 			    void *ptr)
595 {
596 	struct net_device *dev = ptr;
597 	struct fib_rules_ops *ops;
598 
599 	ASSERT_RTNL();
600 	rcu_read_lock();
601 
602 	switch (event) {
603 	case NETDEV_REGISTER:
604 		list_for_each_entry(ops, &rules_ops, list)
605 			attach_rules(ops->rules_list, dev);
606 		break;
607 
608 	case NETDEV_UNREGISTER:
609 		list_for_each_entry(ops, &rules_ops, list)
610 			detach_rules(ops->rules_list, dev);
611 		break;
612 	}
613 
614 	rcu_read_unlock();
615 
616 	return NOTIFY_DONE;
617 }
618 
619 static struct notifier_block fib_rules_notifier = {
620 	.notifier_call = fib_rules_event,
621 };
622 
623 static int __init fib_rules_init(void)
624 {
625 	rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
626 	rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
627 	rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
628 
629 	return register_netdevice_notifier(&fib_rules_notifier);
630 }
631 
632 subsys_initcall(fib_rules_init);
633