1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Forwarding decision 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/err.h> 11 #include <linux/slab.h> 12 #include <linux/kernel.h> 13 #include <linux/netdevice.h> 14 #include <linux/netpoll.h> 15 #include <linux/skbuff.h> 16 #include <linux/if_vlan.h> 17 #include <linux/netfilter_bridge.h> 18 #include "br_private.h" 19 20 /* Don't forward packets to originating port or forwarding disabled */ 21 static inline int should_deliver(const struct net_bridge_port *p, 22 const struct sk_buff *skb) 23 { 24 struct net_bridge_vlan_group *vg; 25 26 vg = nbp_vlan_group_rcu(p); 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 28 (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && 29 br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && 30 !br_skb_isolated(p, skb); 31 } 32 33 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 34 { 35 skb_push(skb, ETH_HLEN); 36 if (!is_skb_forwardable(skb->dev, skb)) 37 goto drop; 38 39 br_drop_fake_rtable(skb); 40 41 if (skb->ip_summed == CHECKSUM_PARTIAL && 42 eth_type_vlan(skb->protocol)) { 43 int depth; 44 45 if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth)) 46 goto drop; 47 48 skb_set_network_header(skb, depth); 49 } 50 51 br_switchdev_frame_set_offload_fwd_mark(skb); 52 53 dev_queue_xmit(skb); 54 55 return 0; 56 57 drop: 58 kfree_skb(skb); 59 return 0; 60 } 61 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 62 63 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 64 { 65 skb_clear_tstamp(skb); 66 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, 67 net, sk, skb, NULL, skb->dev, 68 br_dev_queue_push_xmit); 69 70 } 71 EXPORT_SYMBOL_GPL(br_forward_finish); 72 73 static void __br_forward(const struct net_bridge_port *to, 74 struct sk_buff *skb, bool local_orig) 75 { 76 struct net_bridge_vlan_group *vg; 77 struct net_device *indev; 78 struct net *net; 79 int br_hook; 80 81 /* Mark the skb for forwarding offload early so that br_handle_vlan() 82 * can know whether to pop the VLAN header on egress or keep it. 83 */ 84 nbp_switchdev_frame_mark_tx_fwd_offload(to, skb); 85 86 vg = nbp_vlan_group_rcu(to); 87 skb = br_handle_vlan(to->br, to, vg, skb); 88 if (!skb) 89 return; 90 91 indev = skb->dev; 92 skb->dev = to->dev; 93 if (!local_orig) { 94 if (skb_warn_if_lro(skb)) { 95 kfree_skb(skb); 96 return; 97 } 98 br_hook = NF_BR_FORWARD; 99 skb_forward_csum(skb); 100 net = dev_net(indev); 101 } else { 102 if (unlikely(netpoll_tx_running(to->br->dev))) { 103 skb_push(skb, ETH_HLEN); 104 if (!is_skb_forwardable(skb->dev, skb)) 105 kfree_skb(skb); 106 else 107 br_netpoll_send_skb(to, skb); 108 return; 109 } 110 br_hook = NF_BR_LOCAL_OUT; 111 net = dev_net(skb->dev); 112 indev = NULL; 113 } 114 115 NF_HOOK(NFPROTO_BRIDGE, br_hook, 116 net, NULL, skb, indev, skb->dev, 117 br_forward_finish); 118 } 119 120 static int deliver_clone(const struct net_bridge_port *prev, 121 struct sk_buff *skb, bool local_orig) 122 { 123 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 124 125 skb = skb_clone(skb, GFP_ATOMIC); 126 if (!skb) { 127 DEV_STATS_INC(dev, tx_dropped); 128 return -ENOMEM; 129 } 130 131 __br_forward(prev, skb, local_orig); 132 return 0; 133 } 134 135 /** 136 * br_forward - forward a packet to a specific port 137 * @to: destination port 138 * @skb: packet being forwarded 139 * @local_rcv: packet will be received locally after forwarding 140 * @local_orig: packet is locally originated 141 * 142 * Should be called with rcu_read_lock. 143 */ 144 void br_forward(const struct net_bridge_port *to, 145 struct sk_buff *skb, bool local_rcv, bool local_orig) 146 { 147 if (unlikely(!to)) 148 goto out; 149 150 /* redirect to backup link if the destination port is down */ 151 if (rcu_access_pointer(to->backup_port) && 152 (!netif_carrier_ok(to->dev) || !netif_running(to->dev))) { 153 struct net_bridge_port *backup_port; 154 155 backup_port = rcu_dereference(to->backup_port); 156 if (unlikely(!backup_port)) 157 goto out; 158 BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid); 159 to = backup_port; 160 } 161 162 if (should_deliver(to, skb)) { 163 if (local_rcv) 164 deliver_clone(to, skb, local_orig); 165 else 166 __br_forward(to, skb, local_orig); 167 return; 168 } 169 170 out: 171 if (!local_rcv) 172 kfree_skb(skb); 173 } 174 EXPORT_SYMBOL_GPL(br_forward); 175 176 static struct net_bridge_port *maybe_deliver( 177 struct net_bridge_port *prev, struct net_bridge_port *p, 178 struct sk_buff *skb, bool local_orig) 179 { 180 u8 igmp_type = br_multicast_igmp_type(skb); 181 int err; 182 183 if (!should_deliver(p, skb)) 184 return prev; 185 186 nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb); 187 188 if (!prev) 189 goto out; 190 191 err = deliver_clone(prev, skb, local_orig); 192 if (err) 193 return ERR_PTR(err); 194 out: 195 br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX); 196 197 return p; 198 } 199 200 /* called under rcu_read_lock */ 201 void br_flood(struct net_bridge *br, struct sk_buff *skb, 202 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig, 203 u16 vid) 204 { 205 enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; 206 struct net_bridge_port *prev = NULL; 207 struct net_bridge_port *p; 208 209 br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST); 210 211 list_for_each_entry_rcu(p, &br->port_list, list) { 212 /* Do not flood unicast traffic to ports that turn it off, nor 213 * other traffic if flood off, except for traffic we originate 214 */ 215 switch (pkt_type) { 216 case BR_PKT_UNICAST: 217 if (!(p->flags & BR_FLOOD)) 218 continue; 219 break; 220 case BR_PKT_MULTICAST: 221 if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) 222 continue; 223 break; 224 case BR_PKT_BROADCAST: 225 if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) 226 continue; 227 break; 228 } 229 230 /* Do not flood to ports that enable proxy ARP */ 231 if (p->flags & BR_PROXYARP) 232 continue; 233 if (BR_INPUT_SKB_CB(skb)->proxyarp_replied && 234 ((p->flags & BR_PROXYARP_WIFI) || 235 br_is_neigh_suppress_enabled(p, vid))) 236 continue; 237 238 prev = maybe_deliver(prev, p, skb, local_orig); 239 if (IS_ERR(prev)) { 240 reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : 241 SKB_DROP_REASON_NOT_SPECIFIED; 242 goto out; 243 } 244 } 245 246 if (!prev) 247 goto out; 248 249 if (local_rcv) 250 deliver_clone(prev, skb, local_orig); 251 else 252 __br_forward(prev, skb, local_orig); 253 return; 254 255 out: 256 if (!local_rcv) 257 kfree_skb_reason(skb, reason); 258 } 259 260 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 261 static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, 262 const unsigned char *addr, bool local_orig) 263 { 264 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 265 const unsigned char *src = eth_hdr(skb)->h_source; 266 struct sk_buff *nskb; 267 268 if (!should_deliver(p, skb)) 269 return; 270 271 /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */ 272 if (skb->dev == p->dev && ether_addr_equal(src, addr)) 273 return; 274 275 __skb_push(skb, ETH_HLEN); 276 nskb = pskb_copy(skb, GFP_ATOMIC); 277 __skb_pull(skb, ETH_HLEN); 278 if (!nskb) { 279 DEV_STATS_INC(dev, tx_dropped); 280 return; 281 } 282 283 skb = nskb; 284 __skb_pull(skb, ETH_HLEN); 285 if (!is_broadcast_ether_addr(addr)) 286 memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); 287 288 __br_forward(p, skb, local_orig); 289 } 290 291 /* called with rcu_read_lock */ 292 void br_multicast_flood(struct net_bridge_mdb_entry *mdst, 293 struct sk_buff *skb, 294 struct net_bridge_mcast *brmctx, 295 bool local_rcv, bool local_orig) 296 { 297 enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; 298 struct net_bridge_port *prev = NULL; 299 struct net_bridge_port_group *p; 300 bool allow_mode_include = true; 301 struct hlist_node *rp; 302 303 rp = br_multicast_get_first_rport_node(brmctx, skb); 304 305 if (mdst) { 306 p = rcu_dereference(mdst->ports); 307 if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) && 308 br_multicast_is_star_g(&mdst->addr)) 309 allow_mode_include = false; 310 } else { 311 p = NULL; 312 br_tc_skb_miss_set(skb, true); 313 } 314 315 while (p || rp) { 316 struct net_bridge_port *port, *lport, *rport; 317 318 lport = p ? p->key.port : NULL; 319 rport = br_multicast_rport_from_node_skb(rp, skb); 320 321 if ((unsigned long)lport > (unsigned long)rport) { 322 port = lport; 323 324 if (port->flags & BR_MULTICAST_TO_UNICAST) { 325 maybe_deliver_addr(lport, skb, p->eth_addr, 326 local_orig); 327 goto delivered; 328 } 329 if ((!allow_mode_include && 330 p->filter_mode == MCAST_INCLUDE) || 331 (p->flags & MDB_PG_FLAGS_BLOCKED)) 332 goto delivered; 333 } else { 334 port = rport; 335 } 336 337 prev = maybe_deliver(prev, port, skb, local_orig); 338 if (IS_ERR(prev)) { 339 reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : 340 SKB_DROP_REASON_NOT_SPECIFIED; 341 goto out; 342 } 343 delivered: 344 if ((unsigned long)lport >= (unsigned long)port) 345 p = rcu_dereference(p->next); 346 if ((unsigned long)rport >= (unsigned long)port) 347 rp = rcu_dereference(hlist_next_rcu(rp)); 348 } 349 350 if (!prev) 351 goto out; 352 353 if (local_rcv) 354 deliver_clone(prev, skb, local_orig); 355 else 356 __br_forward(prev, skb, local_orig); 357 return; 358 359 out: 360 if (!local_rcv) 361 kfree_skb_reason(skb, reason); 362 } 363 #endif 364