xref: /linux/net/ipv4/fib_rules.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		IPv4 Forwarding Information Base: policy rules.
7  *
8  * Version:	$Id: fib_rules.c,v 1.17 2001/10/31 21:55:54 davem Exp $
9  *
10  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11  *
12  *		This program is free software; you can redistribute it and/or
13  *		modify it under the terms of the GNU General Public License
14  *		as published by the Free Software Foundation; either version
15  *		2 of the License, or (at your option) any later version.
16  *
17  * Fixes:
18  * 		Rani Assaf	:	local_rule cannot be deleted
19  *		Marc Boucher	:	routing by fwmark
20  */
21 
22 #include <linux/config.h>
23 #include <asm/uaccess.h>
24 #include <asm/system.h>
25 #include <linux/bitops.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/mm.h>
30 #include <linux/string.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/errno.h>
34 #include <linux/in.h>
35 #include <linux/inet.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/skbuff.h>
41 #include <linux/netlink.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/rcupdate.h>
45 
46 #include <net/ip.h>
47 #include <net/protocol.h>
48 #include <net/route.h>
49 #include <net/tcp.h>
50 #include <net/sock.h>
51 #include <net/ip_fib.h>
52 
53 #define FRprintk(a...)
54 
55 struct fib_rule
56 {
57 	struct hlist_node hlist;
58 	atomic_t	r_clntref;
59 	u32		r_preference;
60 	unsigned char	r_table;
61 	unsigned char	r_action;
62 	unsigned char	r_dst_len;
63 	unsigned char	r_src_len;
64 	u32		r_src;
65 	u32		r_srcmask;
66 	u32		r_dst;
67 	u32		r_dstmask;
68 	u32		r_srcmap;
69 	u8		r_flags;
70 	u8		r_tos;
71 #ifdef CONFIG_IP_ROUTE_FWMARK
72 	u32		r_fwmark;
73 #endif
74 	int		r_ifindex;
75 #ifdef CONFIG_NET_CLS_ROUTE
76 	__u32		r_tclassid;
77 #endif
78 	char		r_ifname[IFNAMSIZ];
79 	int		r_dead;
80 	struct		rcu_head rcu;
81 };
82 
83 static struct fib_rule default_rule = {
84 	.r_clntref =	ATOMIC_INIT(2),
85 	.r_preference =	0x7FFF,
86 	.r_table =	RT_TABLE_DEFAULT,
87 	.r_action =	RTN_UNICAST,
88 };
89 
90 static struct fib_rule main_rule = {
91 	.r_clntref =	ATOMIC_INIT(2),
92 	.r_preference =	0x7FFE,
93 	.r_table =	RT_TABLE_MAIN,
94 	.r_action =	RTN_UNICAST,
95 };
96 
97 static struct fib_rule local_rule = {
98 	.r_clntref =	ATOMIC_INIT(2),
99 	.r_table =	RT_TABLE_LOCAL,
100 	.r_action =	RTN_UNICAST,
101 };
102 
103 static struct hlist_head fib_rules;
104 
105 /* writer func called from netlink -- rtnl_sem hold*/
106 
107 static void rtmsg_rule(int, struct fib_rule *);
108 
109 int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
110 {
111 	struct rtattr **rta = arg;
112 	struct rtmsg *rtm = NLMSG_DATA(nlh);
113 	struct fib_rule *r;
114 	struct hlist_node *node;
115 	int err = -ESRCH;
116 
117 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
118 		if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
119 		    rtm->rtm_src_len == r->r_src_len &&
120 		    rtm->rtm_dst_len == r->r_dst_len &&
121 		    (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
122 		    rtm->rtm_tos == r->r_tos &&
123 #ifdef CONFIG_IP_ROUTE_FWMARK
124 		    (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
125 #endif
126 		    (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
127 		    (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
128 		    (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
129 		    (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
130 			err = -EPERM;
131 			if (r == &local_rule)
132 				break;
133 
134 			hlist_del_rcu(&r->hlist);
135 			r->r_dead = 1;
136 			rtmsg_rule(RTM_DELRULE, r);
137 			fib_rule_put(r);
138 			err = 0;
139 			break;
140 		}
141 	}
142 	return err;
143 }
144 
145 /* Allocate new unique table id */
146 
147 static struct fib_table *fib_empty_table(void)
148 {
149 	int id;
150 
151 	for (id = 1; id <= RT_TABLE_MAX; id++)
152 		if (fib_tables[id] == NULL)
153 			return __fib_new_table(id);
154 	return NULL;
155 }
156 
157 static inline void fib_rule_put_rcu(struct rcu_head *head)
158 {
159 	struct fib_rule *r = container_of(head, struct fib_rule, rcu);
160 	kfree(r);
161 }
162 
163 void fib_rule_put(struct fib_rule *r)
164 {
165 	if (atomic_dec_and_test(&r->r_clntref)) {
166 		if (r->r_dead)
167 			call_rcu(&r->rcu, fib_rule_put_rcu);
168 		else
169 			printk("Freeing alive rule %p\n", r);
170 	}
171 }
172 
173 /* writer func called from netlink -- rtnl_sem hold*/
174 
175 int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
176 {
177 	struct rtattr **rta = arg;
178 	struct rtmsg *rtm = NLMSG_DATA(nlh);
179 	struct fib_rule *r, *new_r, *last = NULL;
180 	struct hlist_node *node = NULL;
181 	unsigned char table_id;
182 
183 	if (rtm->rtm_src_len > 32 || rtm->rtm_dst_len > 32 ||
184 	    (rtm->rtm_tos & ~IPTOS_TOS_MASK))
185 		return -EINVAL;
186 
187 	if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
188 		return -EINVAL;
189 
190 	table_id = rtm->rtm_table;
191 	if (table_id == RT_TABLE_UNSPEC) {
192 		struct fib_table *table;
193 		if (rtm->rtm_type == RTN_UNICAST) {
194 			if ((table = fib_empty_table()) == NULL)
195 				return -ENOBUFS;
196 			table_id = table->tb_id;
197 		}
198 	}
199 
200 	new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
201 	if (!new_r)
202 		return -ENOMEM;
203 	memset(new_r, 0, sizeof(*new_r));
204 
205 	if (rta[RTA_SRC-1])
206 		memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
207 	if (rta[RTA_DST-1])
208 		memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 4);
209 	if (rta[RTA_GATEWAY-1])
210 		memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 4);
211 	new_r->r_src_len = rtm->rtm_src_len;
212 	new_r->r_dst_len = rtm->rtm_dst_len;
213 	new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
214 	new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
215 	new_r->r_tos = rtm->rtm_tos;
216 #ifdef CONFIG_IP_ROUTE_FWMARK
217 	if (rta[RTA_PROTOINFO-1])
218 		memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
219 #endif
220 	new_r->r_action = rtm->rtm_type;
221 	new_r->r_flags = rtm->rtm_flags;
222 	if (rta[RTA_PRIORITY-1])
223 		memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
224 	new_r->r_table = table_id;
225 	if (rta[RTA_IIF-1]) {
226 		struct net_device *dev;
227 		rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
228 		new_r->r_ifindex = -1;
229 		dev = __dev_get_by_name(new_r->r_ifname);
230 		if (dev)
231 			new_r->r_ifindex = dev->ifindex;
232 	}
233 #ifdef CONFIG_NET_CLS_ROUTE
234 	if (rta[RTA_FLOW-1])
235 		memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
236 #endif
237 	r = container_of(fib_rules.first, struct fib_rule, hlist);
238 
239 	if (!new_r->r_preference) {
240 		if (r && r->hlist.next != NULL) {
241 			r = container_of(r->hlist.next, struct fib_rule, hlist);
242 			if (r->r_preference)
243 				new_r->r_preference = r->r_preference - 1;
244 		}
245 	}
246 
247 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
248 		if (r->r_preference > new_r->r_preference)
249 			break;
250 		last = r;
251 	}
252 	atomic_inc(&new_r->r_clntref);
253 
254 	if (last)
255 		hlist_add_after_rcu(&last->hlist, &new_r->hlist);
256 	else
257 		hlist_add_before_rcu(&new_r->hlist, &r->hlist);
258 
259 	rtmsg_rule(RTM_NEWRULE, new_r);
260 	return 0;
261 }
262 
263 #ifdef CONFIG_NET_CLS_ROUTE
264 u32 fib_rules_tclass(struct fib_result *res)
265 {
266 	if (res->r)
267 		return res->r->r_tclassid;
268 	return 0;
269 }
270 #endif
271 
272 /* callers should hold rtnl semaphore */
273 
274 static void fib_rules_detach(struct net_device *dev)
275 {
276 	struct hlist_node *node;
277 	struct fib_rule *r;
278 
279 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
280 		if (r->r_ifindex == dev->ifindex)
281 			r->r_ifindex = -1;
282 
283 	}
284 }
285 
286 /* callers should hold rtnl semaphore */
287 
288 static void fib_rules_attach(struct net_device *dev)
289 {
290 	struct hlist_node *node;
291 	struct fib_rule *r;
292 
293 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
294 		if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
295 			r->r_ifindex = dev->ifindex;
296 	}
297 }
298 
299 int fib_lookup(const struct flowi *flp, struct fib_result *res)
300 {
301 	int err;
302 	struct fib_rule *r, *policy;
303 	struct fib_table *tb;
304 	struct hlist_node *node;
305 
306 	u32 daddr = flp->fl4_dst;
307 	u32 saddr = flp->fl4_src;
308 
309 FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
310 	NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
311 
312 	rcu_read_lock();
313 
314 	hlist_for_each_entry_rcu(r, node, &fib_rules, hlist) {
315 		if (((saddr^r->r_src) & r->r_srcmask) ||
316 		    ((daddr^r->r_dst) & r->r_dstmask) ||
317 		    (r->r_tos && r->r_tos != flp->fl4_tos) ||
318 #ifdef CONFIG_IP_ROUTE_FWMARK
319 		    (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
320 #endif
321 		    (r->r_ifindex && r->r_ifindex != flp->iif))
322 			continue;
323 
324 FRprintk("tb %d r %d ", r->r_table, r->r_action);
325 		switch (r->r_action) {
326 		case RTN_UNICAST:
327 			policy = r;
328 			break;
329 		case RTN_UNREACHABLE:
330 			rcu_read_unlock();
331 			return -ENETUNREACH;
332 		default:
333 		case RTN_BLACKHOLE:
334 			rcu_read_unlock();
335 			return -EINVAL;
336 		case RTN_PROHIBIT:
337 			rcu_read_unlock();
338 			return -EACCES;
339 		}
340 
341 		if ((tb = fib_get_table(r->r_table)) == NULL)
342 			continue;
343 		err = tb->tb_lookup(tb, flp, res);
344 		if (err == 0) {
345 			res->r = policy;
346 			if (policy)
347 				atomic_inc(&policy->r_clntref);
348 			rcu_read_unlock();
349 			return 0;
350 		}
351 		if (err < 0 && err != -EAGAIN) {
352 			rcu_read_unlock();
353 			return err;
354 		}
355 	}
356 FRprintk("FAILURE\n");
357 	rcu_read_unlock();
358 	return -ENETUNREACH;
359 }
360 
361 void fib_select_default(const struct flowi *flp, struct fib_result *res)
362 {
363 	if (res->r && res->r->r_action == RTN_UNICAST &&
364 	    FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
365 		struct fib_table *tb;
366 		if ((tb = fib_get_table(res->r->r_table)) != NULL)
367 			tb->tb_select_default(tb, flp, res);
368 	}
369 }
370 
371 static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
372 {
373 	struct net_device *dev = ptr;
374 
375 	if (event == NETDEV_UNREGISTER)
376 		fib_rules_detach(dev);
377 	else if (event == NETDEV_REGISTER)
378 		fib_rules_attach(dev);
379 	return NOTIFY_DONE;
380 }
381 
382 
383 static struct notifier_block fib_rules_notifier = {
384 	.notifier_call =fib_rules_event,
385 };
386 
387 static __inline__ int inet_fill_rule(struct sk_buff *skb,
388 				     struct fib_rule *r,
389 				     u32 pid, u32 seq, int event,
390 				     unsigned int flags)
391 {
392 	struct rtmsg *rtm;
393 	struct nlmsghdr  *nlh;
394 	unsigned char	 *b = skb->tail;
395 
396 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
397 	rtm = NLMSG_DATA(nlh);
398 	rtm->rtm_family = AF_INET;
399 	rtm->rtm_dst_len = r->r_dst_len;
400 	rtm->rtm_src_len = r->r_src_len;
401 	rtm->rtm_tos = r->r_tos;
402 #ifdef CONFIG_IP_ROUTE_FWMARK
403 	if (r->r_fwmark)
404 		RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
405 #endif
406 	rtm->rtm_table = r->r_table;
407 	rtm->rtm_protocol = 0;
408 	rtm->rtm_scope = 0;
409 	rtm->rtm_type = r->r_action;
410 	rtm->rtm_flags = r->r_flags;
411 
412 	if (r->r_dst_len)
413 		RTA_PUT(skb, RTA_DST, 4, &r->r_dst);
414 	if (r->r_src_len)
415 		RTA_PUT(skb, RTA_SRC, 4, &r->r_src);
416 	if (r->r_ifname[0])
417 		RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
418 	if (r->r_preference)
419 		RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
420 	if (r->r_srcmap)
421 		RTA_PUT(skb, RTA_GATEWAY, 4, &r->r_srcmap);
422 #ifdef CONFIG_NET_CLS_ROUTE
423 	if (r->r_tclassid)
424 		RTA_PUT(skb, RTA_FLOW, 4, &r->r_tclassid);
425 #endif
426 	nlh->nlmsg_len = skb->tail - b;
427 	return skb->len;
428 
429 nlmsg_failure:
430 rtattr_failure:
431 	skb_trim(skb, b - skb->data);
432 	return -1;
433 }
434 
435 /* callers should hold rtnl semaphore */
436 
437 static void rtmsg_rule(int event, struct fib_rule *r)
438 {
439 	int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
440 	struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
441 
442 	if (!skb)
443 		netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
444 	else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
445 		kfree_skb(skb);
446 		netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
447 	} else {
448 		netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
449 	}
450 }
451 
452 int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
453 {
454 	int idx = 0;
455 	int s_idx = cb->args[0];
456 	struct fib_rule *r;
457 	struct hlist_node *node;
458 
459 	rcu_read_lock();
460 	hlist_for_each_entry(r, node, &fib_rules, hlist) {
461 
462 		if (idx < s_idx)
463 			continue;
464 		if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
465 				   cb->nlh->nlmsg_seq,
466 				   RTM_NEWRULE, NLM_F_MULTI) < 0)
467 			break;
468 		idx++;
469 	}
470 	rcu_read_unlock();
471 	cb->args[0] = idx;
472 
473 	return skb->len;
474 }
475 
476 void __init fib_rules_init(void)
477 {
478 	INIT_HLIST_HEAD(&fib_rules);
479 	hlist_add_head(&local_rule.hlist, &fib_rules);
480 	hlist_add_after(&local_rule.hlist, &main_rule.hlist);
481 	hlist_add_after(&main_rule.hlist, &default_rule.hlist);
482 	register_netdevice_notifier(&fib_rules_notifier);
483 }
484