xref: /linux/net/ipv4/fib_rules.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		IPv4 Forwarding Information Base: policy rules.
7  *
8  * Version:	$Id: fib_rules.c,v 1.17 2001/10/31 21:55:54 davem Exp $
9  *
10  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11  *
12  *		This program is free software; you can redistribute it and/or
13  *		modify it under the terms of the GNU General Public License
14  *		as published by the Free Software Foundation; either version
15  *		2 of the License, or (at your option) any later version.
16  *
17  * Fixes:
18  * 		Rani Assaf	:	local_rule cannot be deleted
19  *		Marc Boucher	:	routing by fwmark
20  */
21 
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
24 #include <linux/bitops.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/mm.h>
29 #include <linux/string.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/errno.h>
33 #include <linux/in.h>
34 #include <linux/inet.h>
35 #include <linux/inetdevice.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/proc_fs.h>
39 #include <linux/skbuff.h>
40 #include <linux/netlink.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
43 #include <linux/rcupdate.h>
44 
45 #include <net/ip.h>
46 #include <net/protocol.h>
47 #include <net/route.h>
48 #include <net/tcp.h>
49 #include <net/sock.h>
50 #include <net/ip_fib.h>
51 
52 #define FRprintk(a...)
53 
54 struct fib_rule
55 {
56 	struct hlist_node hlist;
57 	atomic_t	r_clntref;
58 	u32		r_preference;
59 	unsigned char	r_table;
60 	unsigned char	r_action;
61 	unsigned char	r_dst_len;
62 	unsigned char	r_src_len;
63 	u32		r_src;
64 	u32		r_srcmask;
65 	u32		r_dst;
66 	u32		r_dstmask;
67 	u32		r_srcmap;
68 	u8		r_flags;
69 	u8		r_tos;
70 #ifdef CONFIG_IP_ROUTE_FWMARK
71 	u32		r_fwmark;
72 #endif
73 	int		r_ifindex;
74 #ifdef CONFIG_NET_CLS_ROUTE
75 	__u32		r_tclassid;
76 #endif
77 	char		r_ifname[IFNAMSIZ];
78 	int		r_dead;
79 	struct		rcu_head rcu;
80 };
81 
82 static struct fib_rule default_rule = {
83 	.r_clntref =	ATOMIC_INIT(2),
84 	.r_preference =	0x7FFF,
85 	.r_table =	RT_TABLE_DEFAULT,
86 	.r_action =	RTN_UNICAST,
87 };
88 
89 static struct fib_rule main_rule = {
90 	.r_clntref =	ATOMIC_INIT(2),
91 	.r_preference =	0x7FFE,
92 	.r_table =	RT_TABLE_MAIN,
93 	.r_action =	RTN_UNICAST,
94 };
95 
96 static struct fib_rule local_rule = {
97 	.r_clntref =	ATOMIC_INIT(2),
98 	.r_table =	RT_TABLE_LOCAL,
99 	.r_action =	RTN_UNICAST,
100 };
101 
102 static struct hlist_head fib_rules;
103 
104 /* writer func called from netlink -- rtnl_sem hold*/
105 
106 static void rtmsg_rule(int, struct fib_rule *);
107 
108 int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
109 {
110 	struct rtattr **rta = arg;
111 	struct rtmsg *rtm = NLMSG_DATA(nlh);
112 	struct fib_rule *r;
113 	struct hlist_node *node;
114 	int err = -ESRCH;
115 
116 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
117 		if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
118 		    rtm->rtm_src_len == r->r_src_len &&
119 		    rtm->rtm_dst_len == r->r_dst_len &&
120 		    (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
121 		    rtm->rtm_tos == r->r_tos &&
122 #ifdef CONFIG_IP_ROUTE_FWMARK
123 		    (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
124 #endif
125 		    (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
126 		    (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
127 		    (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
128 		    (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
129 			err = -EPERM;
130 			if (r == &local_rule)
131 				break;
132 
133 			hlist_del_rcu(&r->hlist);
134 			r->r_dead = 1;
135 			rtmsg_rule(RTM_DELRULE, r);
136 			fib_rule_put(r);
137 			err = 0;
138 			break;
139 		}
140 	}
141 	return err;
142 }
143 
144 /* Allocate new unique table id */
145 
146 static struct fib_table *fib_empty_table(void)
147 {
148 	int id;
149 
150 	for (id = 1; id <= RT_TABLE_MAX; id++)
151 		if (fib_tables[id] == NULL)
152 			return __fib_new_table(id);
153 	return NULL;
154 }
155 
156 static inline void fib_rule_put_rcu(struct rcu_head *head)
157 {
158 	struct fib_rule *r = container_of(head, struct fib_rule, rcu);
159 	kfree(r);
160 }
161 
162 void fib_rule_put(struct fib_rule *r)
163 {
164 	if (atomic_dec_and_test(&r->r_clntref)) {
165 		if (r->r_dead)
166 			call_rcu(&r->rcu, fib_rule_put_rcu);
167 		else
168 			printk("Freeing alive rule %p\n", r);
169 	}
170 }
171 
172 /* writer func called from netlink -- rtnl_sem hold*/
173 
174 int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
175 {
176 	struct rtattr **rta = arg;
177 	struct rtmsg *rtm = NLMSG_DATA(nlh);
178 	struct fib_rule *r, *new_r, *last = NULL;
179 	struct hlist_node *node = NULL;
180 	unsigned char table_id;
181 
182 	if (rtm->rtm_src_len > 32 || rtm->rtm_dst_len > 32 ||
183 	    (rtm->rtm_tos & ~IPTOS_TOS_MASK))
184 		return -EINVAL;
185 
186 	if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
187 		return -EINVAL;
188 
189 	table_id = rtm->rtm_table;
190 	if (table_id == RT_TABLE_UNSPEC) {
191 		struct fib_table *table;
192 		if (rtm->rtm_type == RTN_UNICAST) {
193 			if ((table = fib_empty_table()) == NULL)
194 				return -ENOBUFS;
195 			table_id = table->tb_id;
196 		}
197 	}
198 
199 	new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
200 	if (!new_r)
201 		return -ENOMEM;
202 	memset(new_r, 0, sizeof(*new_r));
203 
204 	if (rta[RTA_SRC-1])
205 		memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
206 	if (rta[RTA_DST-1])
207 		memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 4);
208 	if (rta[RTA_GATEWAY-1])
209 		memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 4);
210 	new_r->r_src_len = rtm->rtm_src_len;
211 	new_r->r_dst_len = rtm->rtm_dst_len;
212 	new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
213 	new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
214 	new_r->r_tos = rtm->rtm_tos;
215 #ifdef CONFIG_IP_ROUTE_FWMARK
216 	if (rta[RTA_PROTOINFO-1])
217 		memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
218 #endif
219 	new_r->r_action = rtm->rtm_type;
220 	new_r->r_flags = rtm->rtm_flags;
221 	if (rta[RTA_PRIORITY-1])
222 		memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
223 	new_r->r_table = table_id;
224 	if (rta[RTA_IIF-1]) {
225 		struct net_device *dev;
226 		rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
227 		new_r->r_ifindex = -1;
228 		dev = __dev_get_by_name(new_r->r_ifname);
229 		if (dev)
230 			new_r->r_ifindex = dev->ifindex;
231 	}
232 #ifdef CONFIG_NET_CLS_ROUTE
233 	if (rta[RTA_FLOW-1])
234 		memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
235 #endif
236 	r = container_of(fib_rules.first, struct fib_rule, hlist);
237 
238 	if (!new_r->r_preference) {
239 		if (r && r->hlist.next != NULL) {
240 			r = container_of(r->hlist.next, struct fib_rule, hlist);
241 			if (r->r_preference)
242 				new_r->r_preference = r->r_preference - 1;
243 		}
244 	}
245 
246 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
247 		if (r->r_preference > new_r->r_preference)
248 			break;
249 		last = r;
250 	}
251 	atomic_inc(&new_r->r_clntref);
252 
253 	if (last)
254 		hlist_add_after_rcu(&last->hlist, &new_r->hlist);
255 	else
256 		hlist_add_before_rcu(&new_r->hlist, &r->hlist);
257 
258 	rtmsg_rule(RTM_NEWRULE, new_r);
259 	return 0;
260 }
261 
262 #ifdef CONFIG_NET_CLS_ROUTE
263 u32 fib_rules_tclass(struct fib_result *res)
264 {
265 	if (res->r)
266 		return res->r->r_tclassid;
267 	return 0;
268 }
269 #endif
270 
271 /* callers should hold rtnl semaphore */
272 
273 static void fib_rules_detach(struct net_device *dev)
274 {
275 	struct hlist_node *node;
276 	struct fib_rule *r;
277 
278 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
279 		if (r->r_ifindex == dev->ifindex)
280 			r->r_ifindex = -1;
281 
282 	}
283 }
284 
285 /* callers should hold rtnl semaphore */
286 
287 static void fib_rules_attach(struct net_device *dev)
288 {
289 	struct hlist_node *node;
290 	struct fib_rule *r;
291 
292 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
293 		if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
294 			r->r_ifindex = dev->ifindex;
295 	}
296 }
297 
298 int fib_lookup(const struct flowi *flp, struct fib_result *res)
299 {
300 	int err;
301 	struct fib_rule *r, *policy;
302 	struct fib_table *tb;
303 	struct hlist_node *node;
304 
305 	u32 daddr = flp->fl4_dst;
306 	u32 saddr = flp->fl4_src;
307 
308 FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
309 	NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
310 
311 	rcu_read_lock();
312 
313 	hlist_for_each_entry_rcu(r, node, &fib_rules, hlist) {
314 		if (((saddr^r->r_src) & r->r_srcmask) ||
315 		    ((daddr^r->r_dst) & r->r_dstmask) ||
316 		    (r->r_tos && r->r_tos != flp->fl4_tos) ||
317 #ifdef CONFIG_IP_ROUTE_FWMARK
318 		    (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
319 #endif
320 		    (r->r_ifindex && r->r_ifindex != flp->iif))
321 			continue;
322 
323 FRprintk("tb %d r %d ", r->r_table, r->r_action);
324 		switch (r->r_action) {
325 		case RTN_UNICAST:
326 			policy = r;
327 			break;
328 		case RTN_UNREACHABLE:
329 			rcu_read_unlock();
330 			return -ENETUNREACH;
331 		default:
332 		case RTN_BLACKHOLE:
333 			rcu_read_unlock();
334 			return -EINVAL;
335 		case RTN_PROHIBIT:
336 			rcu_read_unlock();
337 			return -EACCES;
338 		}
339 
340 		if ((tb = fib_get_table(r->r_table)) == NULL)
341 			continue;
342 		err = tb->tb_lookup(tb, flp, res);
343 		if (err == 0) {
344 			res->r = policy;
345 			if (policy)
346 				atomic_inc(&policy->r_clntref);
347 			rcu_read_unlock();
348 			return 0;
349 		}
350 		if (err < 0 && err != -EAGAIN) {
351 			rcu_read_unlock();
352 			return err;
353 		}
354 	}
355 FRprintk("FAILURE\n");
356 	rcu_read_unlock();
357 	return -ENETUNREACH;
358 }
359 
360 void fib_select_default(const struct flowi *flp, struct fib_result *res)
361 {
362 	if (res->r && res->r->r_action == RTN_UNICAST &&
363 	    FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
364 		struct fib_table *tb;
365 		if ((tb = fib_get_table(res->r->r_table)) != NULL)
366 			tb->tb_select_default(tb, flp, res);
367 	}
368 }
369 
370 static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
371 {
372 	struct net_device *dev = ptr;
373 
374 	if (event == NETDEV_UNREGISTER)
375 		fib_rules_detach(dev);
376 	else if (event == NETDEV_REGISTER)
377 		fib_rules_attach(dev);
378 	return NOTIFY_DONE;
379 }
380 
381 
382 static struct notifier_block fib_rules_notifier = {
383 	.notifier_call =fib_rules_event,
384 };
385 
386 static __inline__ int inet_fill_rule(struct sk_buff *skb,
387 				     struct fib_rule *r,
388 				     u32 pid, u32 seq, int event,
389 				     unsigned int flags)
390 {
391 	struct rtmsg *rtm;
392 	struct nlmsghdr  *nlh;
393 	unsigned char	 *b = skb->tail;
394 
395 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
396 	rtm = NLMSG_DATA(nlh);
397 	rtm->rtm_family = AF_INET;
398 	rtm->rtm_dst_len = r->r_dst_len;
399 	rtm->rtm_src_len = r->r_src_len;
400 	rtm->rtm_tos = r->r_tos;
401 #ifdef CONFIG_IP_ROUTE_FWMARK
402 	if (r->r_fwmark)
403 		RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
404 #endif
405 	rtm->rtm_table = r->r_table;
406 	rtm->rtm_protocol = 0;
407 	rtm->rtm_scope = 0;
408 	rtm->rtm_type = r->r_action;
409 	rtm->rtm_flags = r->r_flags;
410 
411 	if (r->r_dst_len)
412 		RTA_PUT(skb, RTA_DST, 4, &r->r_dst);
413 	if (r->r_src_len)
414 		RTA_PUT(skb, RTA_SRC, 4, &r->r_src);
415 	if (r->r_ifname[0])
416 		RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
417 	if (r->r_preference)
418 		RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
419 	if (r->r_srcmap)
420 		RTA_PUT(skb, RTA_GATEWAY, 4, &r->r_srcmap);
421 #ifdef CONFIG_NET_CLS_ROUTE
422 	if (r->r_tclassid)
423 		RTA_PUT(skb, RTA_FLOW, 4, &r->r_tclassid);
424 #endif
425 	nlh->nlmsg_len = skb->tail - b;
426 	return skb->len;
427 
428 nlmsg_failure:
429 rtattr_failure:
430 	skb_trim(skb, b - skb->data);
431 	return -1;
432 }
433 
434 /* callers should hold rtnl semaphore */
435 
436 static void rtmsg_rule(int event, struct fib_rule *r)
437 {
438 	int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
439 	struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
440 
441 	if (!skb)
442 		netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
443 	else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
444 		kfree_skb(skb);
445 		netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
446 	} else {
447 		netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
448 	}
449 }
450 
451 int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
452 {
453 	int idx = 0;
454 	int s_idx = cb->args[0];
455 	struct fib_rule *r;
456 	struct hlist_node *node;
457 
458 	rcu_read_lock();
459 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
460 		if (idx < s_idx)
461 			goto next;
462 		if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
463 				   cb->nlh->nlmsg_seq,
464 				   RTM_NEWRULE, NLM_F_MULTI) < 0)
465 			break;
466 next:
467 		idx++;
468 	}
469 	rcu_read_unlock();
470 	cb->args[0] = idx;
471 
472 	return skb->len;
473 }
474 
475 void __init fib_rules_init(void)
476 {
477 	INIT_HLIST_HEAD(&fib_rules);
478 	hlist_add_head(&local_rule.hlist, &fib_rules);
479 	hlist_add_after(&local_rule.hlist, &main_rule.hlist);
480 	hlist_add_after(&main_rule.hlist, &default_rule.hlist);
481 	register_netdevice_notifier(&fib_rules_notifier);
482 }
483