xref: /linux/net/bridge/br_forward.c (revision 7b12b9137930eb821b68e1bfa11e9de692208620)
1 /*
2  *	Forwarding decision
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *
8  *	$Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9  *
10  *	This program is free software; you can redistribute it and/or
11  *	modify it under the terms of the GNU General Public License
12  *	as published by the Free Software Foundation; either version
13  *	2 of the License, or (at your option) any later version.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_vlan.h>
20 #include <linux/netfilter_bridge.h>
21 #include "br_private.h"
22 
23 static inline int should_deliver(const struct net_bridge_port *p,
24 				 const struct sk_buff *skb)
25 {
26 	if (skb->dev == p->dev ||
27 	    p->state != BR_STATE_FORWARDING)
28 		return 0;
29 
30 	return 1;
31 }
32 
33 static inline unsigned packet_length(const struct sk_buff *skb)
34 {
35 	return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
36 }
37 
38 int br_dev_queue_push_xmit(struct sk_buff *skb)
39 {
40 	/* drop mtu oversized packets except tso */
41 	if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
42 		kfree_skb(skb);
43 	else {
44 #ifdef CONFIG_BRIDGE_NETFILTER
45 		/* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
46 		nf_bridge_maybe_copy_header(skb);
47 #endif
48 		skb_push(skb, ETH_HLEN);
49 
50 		dev_queue_xmit(skb);
51 	}
52 
53 	return 0;
54 }
55 
56 int br_forward_finish(struct sk_buff *skb)
57 {
58 	NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
59 			br_dev_queue_push_xmit);
60 
61 	return 0;
62 }
63 
64 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65 {
66 	skb->dev = to->dev;
67 	NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
68 			br_forward_finish);
69 }
70 
71 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
72 {
73 	struct net_device *indev;
74 
75 	indev = skb->dev;
76 	skb->dev = to->dev;
77 	skb->ip_summed = CHECKSUM_NONE;
78 
79 	NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
80 			br_forward_finish);
81 }
82 
83 /* called with rcu_read_lock */
84 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
85 {
86 	if (should_deliver(to, skb)) {
87 		__br_deliver(to, skb);
88 		return;
89 	}
90 
91 	kfree_skb(skb);
92 }
93 
94 /* called with rcu_read_lock */
95 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
96 {
97 	if (should_deliver(to, skb)) {
98 		__br_forward(to, skb);
99 		return;
100 	}
101 
102 	kfree_skb(skb);
103 }
104 
105 /* called under bridge lock */
106 static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
107 	void (*__packet_hook)(const struct net_bridge_port *p,
108 			      struct sk_buff *skb))
109 {
110 	struct net_bridge_port *p;
111 	struct net_bridge_port *prev;
112 
113 	if (clone) {
114 		struct sk_buff *skb2;
115 
116 		if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
117 			br->statistics.tx_dropped++;
118 			return;
119 		}
120 
121 		skb = skb2;
122 	}
123 
124 	prev = NULL;
125 
126 	list_for_each_entry_rcu(p, &br->port_list, list) {
127 		if (should_deliver(p, skb)) {
128 			if (prev != NULL) {
129 				struct sk_buff *skb2;
130 
131 				if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
132 					br->statistics.tx_dropped++;
133 					kfree_skb(skb);
134 					return;
135 				}
136 
137 				__packet_hook(prev, skb2);
138 			}
139 
140 			prev = p;
141 		}
142 	}
143 
144 	if (prev != NULL) {
145 		__packet_hook(prev, skb);
146 		return;
147 	}
148 
149 	kfree_skb(skb);
150 }
151 
152 
153 /* called with rcu_read_lock */
154 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
155 {
156 	br_flood(br, skb, clone, __br_deliver);
157 }
158 
159 /* called under bridge lock */
160 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
161 {
162 	br_flood(br, skb, clone, __br_forward);
163 }
164