xref: /linux/net/hsr/hsr_slave.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
3  *
4  * Author(s):
5  *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6  *
7  * Frame handler other utility functions for HSR and PRP.
8  */
9 
10 #include "hsr_slave.h"
11 #include <linux/etherdevice.h>
12 #include <linux/if_arp.h>
13 #include <linux/if_vlan.h>
14 #include "hsr_main.h"
15 #include "hsr_device.h"
16 #include "hsr_forward.h"
17 #include "hsr_framereg.h"
18 
19 bool hsr_invalid_dan_ingress_frame(__be16 protocol)
20 {
21 	return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
22 }
23 
24 static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
25 {
26 	struct sk_buff *skb = *pskb;
27 	struct hsr_port *port;
28 	struct hsr_priv *hsr;
29 	__be16 protocol;
30 
31 	/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
32 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
33 		return RX_HANDLER_PASS;
34 
35 	if (!skb_mac_header_was_set(skb)) {
36 		WARN_ONCE(1, "%s: skb invalid", __func__);
37 		return RX_HANDLER_PASS;
38 	}
39 
40 	port = hsr_port_get_rcu(skb->dev);
41 	if (!port)
42 		goto finish_pass;
43 	hsr = port->hsr;
44 
45 	if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
46 		/* Directly kill frames sent by ourselves */
47 		kfree_skb(skb);
48 		goto finish_consume;
49 	}
50 
51 	/* For HSR, only tagged frames are expected (unless the device offloads
52 	 * HSR tag removal), but for PRP there could be non tagged frames as
53 	 * well from Single attached nodes (SANs).
54 	 */
55 	protocol = eth_hdr(skb)->h_proto;
56 
57 	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
58 	    port->type != HSR_PT_INTERLINK &&
59 	    hsr->proto_ops->invalid_dan_ingress_frame &&
60 	    hsr->proto_ops->invalid_dan_ingress_frame(protocol))
61 		goto finish_pass;
62 
63 	skb_push(skb, ETH_HLEN);
64 	skb_reset_mac_header(skb);
65 	if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
66 	    protocol == htons(ETH_P_HSR)) {
67 		if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {
68 			kfree_skb(skb);
69 			goto finish_consume;
70 		}
71 
72 		skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
73 	}
74 	skb_reset_mac_len(skb);
75 
76 	/* Only the frames received over the interlink port will assign a
77 	 * sequence number and require synchronisation vs other sender.
78 	 */
79 	if (port->type == HSR_PT_INTERLINK) {
80 		spin_lock_bh(&hsr->seqnr_lock);
81 		hsr_forward_skb(skb, port);
82 		spin_unlock_bh(&hsr->seqnr_lock);
83 	} else {
84 		hsr_forward_skb(skb, port);
85 	}
86 
87 finish_consume:
88 	return RX_HANDLER_CONSUMED;
89 
90 finish_pass:
91 	return RX_HANDLER_PASS;
92 }
93 
94 bool hsr_port_exists(const struct net_device *dev)
95 {
96 	return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
97 }
98 
99 static int hsr_check_dev_ok(struct net_device *dev,
100 			    struct netlink_ext_ack *extack)
101 {
102 	/* Don't allow HSR on non-ethernet like devices */
103 	if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
104 	    dev->addr_len != ETH_ALEN) {
105 		NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
106 		return -EINVAL;
107 	}
108 
109 	/* Don't allow enslaving hsr devices */
110 	if (is_hsr_master(dev)) {
111 		NL_SET_ERR_MSG_MOD(extack,
112 				   "Cannot create trees of HSR devices.");
113 		return -EINVAL;
114 	}
115 
116 	if (hsr_port_exists(dev)) {
117 		NL_SET_ERR_MSG_MOD(extack,
118 				   "This device is already a HSR slave.");
119 		return -EINVAL;
120 	}
121 
122 	if (is_vlan_dev(dev)) {
123 		NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
124 		return -EINVAL;
125 	}
126 
127 	if (dev->priv_flags & IFF_DONT_BRIDGE) {
128 		NL_SET_ERR_MSG_MOD(extack,
129 				   "This device does not support bridging.");
130 		return -EOPNOTSUPP;
131 	}
132 
133 	/* HSR over bonded devices has not been tested, but I'm not sure it
134 	 * won't work...
135 	 */
136 
137 	return 0;
138 }
139 
140 /* Setup device to be added to the HSR bridge. */
141 static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
142 			     struct hsr_port *port,
143 			     struct netlink_ext_ack *extack)
144 
145 {
146 	struct netdev_lag_upper_info lag_upper_info;
147 	struct net_device *hsr_dev;
148 	struct hsr_port *master;
149 	int res;
150 
151 	/* Don't use promiscuous mode for offload since L2 frame forward
152 	 * happens at the offloaded hardware.
153 	 */
154 	if (!port->hsr->fwd_offloaded) {
155 		res = dev_set_promiscuity(dev, 1);
156 		if (res)
157 			return res;
158 	}
159 
160 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
161 	hsr_dev = master->dev;
162 
163 	lag_upper_info.tx_type = NETDEV_LAG_TX_TYPE_BROADCAST;
164 	lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
165 	res = netdev_master_upper_dev_link(dev, hsr_dev, NULL, &lag_upper_info, extack);
166 	if (res)
167 		goto fail_upper_dev_link;
168 
169 	res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
170 	if (res)
171 		goto fail_rx_handler;
172 	dev_disable_lro(dev);
173 
174 	return 0;
175 
176 fail_rx_handler:
177 	netdev_upper_dev_unlink(dev, hsr_dev);
178 fail_upper_dev_link:
179 	if (!port->hsr->fwd_offloaded)
180 		dev_set_promiscuity(dev, -1);
181 
182 	return res;
183 }
184 
185 int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
186 		 enum hsr_port_type type, struct netlink_ext_ack *extack)
187 {
188 	struct hsr_port *port, *master;
189 	int res;
190 
191 	if (type != HSR_PT_MASTER) {
192 		res = hsr_check_dev_ok(dev, extack);
193 		if (res)
194 			return res;
195 	}
196 
197 	port = hsr_port_get_hsr(hsr, type);
198 	if (port)
199 		return -EBUSY;	/* This port already exists */
200 
201 	port = kzalloc(sizeof(*port), GFP_KERNEL);
202 	if (!port)
203 		return -ENOMEM;
204 
205 	port->hsr = hsr;
206 	port->dev = dev;
207 	port->type = type;
208 	ether_addr_copy(port->original_macaddress, dev->dev_addr);
209 
210 	if (type != HSR_PT_MASTER) {
211 		res = hsr_portdev_setup(hsr, dev, port, extack);
212 		if (res)
213 			goto fail_dev_setup;
214 	}
215 
216 	list_add_tail_rcu(&port->port_list, &hsr->ports);
217 
218 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
219 	netdev_update_features(master->dev);
220 	dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
221 
222 	return 0;
223 
224 fail_dev_setup:
225 	kfree(port);
226 	return res;
227 }
228 
229 void hsr_del_port(struct hsr_port *port)
230 {
231 	struct hsr_priv *hsr;
232 	struct hsr_port *master;
233 
234 	hsr = port->hsr;
235 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
236 	list_del_rcu(&port->port_list);
237 
238 	if (port != master) {
239 		netdev_update_features(master->dev);
240 		dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
241 		netdev_rx_handler_unregister(port->dev);
242 		if (!port->hsr->fwd_offloaded)
243 			dev_set_promiscuity(port->dev, -1);
244 		netdev_upper_dev_unlink(port->dev, master->dev);
245 		eth_hw_addr_set(port->dev, port->original_macaddress);
246 	}
247 
248 	kfree_rcu(port, rcu);
249 }
250