xref: /linux/drivers/net/can/vxcan.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vxcan.c - Virtual CAN Tunnel for cross namespace communication
4  *
5  * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6  * specific parts and from drivers/net/veth.c to implement the netlink API
7  * for network interface pairs in a common and established way.
8  *
9  * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
10  */
11 
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/netdevice.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_ether.h>
18 #include <linux/can.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/skb.h>
21 #include <linux/can/vxcan.h>
22 #include <linux/can/can-ml.h>
23 #include <linux/slab.h>
24 #include <net/can.h>
25 #include <net/rtnetlink.h>
26 
27 #define DRV_NAME "vxcan"
28 
29 MODULE_DESCRIPTION("Virtual CAN Tunnel");
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
32 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
33 
34 struct vxcan_priv {
35 	struct net_device __rcu	*peer;
36 };
37 
38 static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
39 {
40 	struct vxcan_priv *priv = netdev_priv(dev);
41 	struct net_device *peer;
42 	struct net_device_stats *peerstats, *srcstats = &dev->stats;
43 	struct can_skb_ext *csx;
44 	struct sk_buff *skb;
45 	unsigned int len;
46 
47 	if (can_dropped_invalid_skb(dev, oskb))
48 		return NETDEV_TX_OK;
49 
50 	rcu_read_lock();
51 	peer = rcu_dereference(priv->peer);
52 	if (unlikely(!peer)) {
53 		kfree_skb(oskb);
54 		dev->stats.tx_dropped++;
55 		goto out_unlock;
56 	}
57 
58 	skb_tx_timestamp(oskb);
59 
60 	skb = skb_clone(oskb, GFP_ATOMIC);
61 	if (skb) {
62 		consume_skb(oskb);
63 	} else {
64 		kfree_skb(oskb);
65 		goto out_unlock;
66 	}
67 
68 	/* the cloned skb points to the skb extension of the already cloned
69 	 * oskb with an increased refcount. skb_ext_add() creates a copy to
70 	 * separate the skb extension data which is needed to start with a
71 	 * fresh can_gw_hops counter in the other namespace.
72 	 */
73 	csx = skb_ext_add(skb, SKB_EXT_CAN);
74 	if (!csx) {
75 		kfree_skb(skb);
76 		goto out_unlock;
77 	}
78 
79 	/* reset CAN GW hop counter */
80 	csx->can_gw_hops = 0;
81 	skb->pkt_type   = PACKET_BROADCAST;
82 	skb->dev        = peer;
83 	skb->ip_summed  = CHECKSUM_UNNECESSARY;
84 
85 	len = can_skb_get_data_len(skb);
86 	if (netif_rx(skb) == NET_RX_SUCCESS) {
87 		srcstats->tx_packets++;
88 		srcstats->tx_bytes += len;
89 		peerstats = &peer->stats;
90 		peerstats->rx_packets++;
91 		peerstats->rx_bytes += len;
92 	}
93 
94 out_unlock:
95 	rcu_read_unlock();
96 	return NETDEV_TX_OK;
97 }
98 
99 
100 static int vxcan_open(struct net_device *dev)
101 {
102 	struct vxcan_priv *priv = netdev_priv(dev);
103 	struct net_device *peer = rtnl_dereference(priv->peer);
104 
105 	if (!peer)
106 		return -ENOTCONN;
107 
108 	if (peer->flags & IFF_UP) {
109 		netif_carrier_on(dev);
110 		netif_carrier_on(peer);
111 	}
112 	return 0;
113 }
114 
115 static int vxcan_close(struct net_device *dev)
116 {
117 	struct vxcan_priv *priv = netdev_priv(dev);
118 	struct net_device *peer = rtnl_dereference(priv->peer);
119 
120 	netif_carrier_off(dev);
121 	if (peer)
122 		netif_carrier_off(peer);
123 
124 	return 0;
125 }
126 
127 static int vxcan_get_iflink(const struct net_device *dev)
128 {
129 	struct vxcan_priv *priv = netdev_priv(dev);
130 	struct net_device *peer;
131 	int iflink;
132 
133 	rcu_read_lock();
134 	peer = rcu_dereference(priv->peer);
135 	iflink = peer ? READ_ONCE(peer->ifindex) : 0;
136 	rcu_read_unlock();
137 
138 	return iflink;
139 }
140 
141 static void vxcan_set_cap_info(struct net_device *dev)
142 {
143 	u32 can_cap = CAN_CAP_CC;
144 
145 	if (dev->mtu > CAN_MTU)
146 		can_cap |= CAN_CAP_FD;
147 
148 	if (dev->mtu >= CANXL_MIN_MTU)
149 		can_cap |= CAN_CAP_XL;
150 
151 	can_set_cap(dev, can_cap);
152 }
153 
154 static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
155 {
156 	/* Do not allow changing the MTU while running */
157 	if (dev->flags & IFF_UP)
158 		return -EBUSY;
159 
160 	if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
161 	    !can_is_canxl_dev_mtu(new_mtu))
162 		return -EINVAL;
163 
164 	WRITE_ONCE(dev->mtu, new_mtu);
165 	vxcan_set_cap_info(dev);
166 	return 0;
167 }
168 
169 static const struct net_device_ops vxcan_netdev_ops = {
170 	.ndo_open	= vxcan_open,
171 	.ndo_stop	= vxcan_close,
172 	.ndo_start_xmit	= vxcan_xmit,
173 	.ndo_get_iflink	= vxcan_get_iflink,
174 	.ndo_change_mtu = vxcan_change_mtu,
175 };
176 
177 static const struct ethtool_ops vxcan_ethtool_ops = {
178 	.get_ts_info = ethtool_op_get_ts_info,
179 };
180 
181 static void vxcan_setup(struct net_device *dev)
182 {
183 	struct can_ml_priv *can_ml;
184 
185 	dev->type		= ARPHRD_CAN;
186 	dev->mtu		= CANXL_MTU;
187 	dev->hard_header_len	= 0;
188 	dev->addr_len		= 0;
189 	dev->tx_queue_len	= 0;
190 	dev->flags		= IFF_NOARP;
191 	dev->netdev_ops		= &vxcan_netdev_ops;
192 	dev->ethtool_ops	= &vxcan_ethtool_ops;
193 	dev->needs_free_netdev	= true;
194 
195 	can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
196 	can_set_ml_priv(dev, can_ml);
197 	vxcan_set_cap_info(dev);
198 }
199 
200 /* forward declaration for rtnl_create_link() */
201 static struct rtnl_link_ops vxcan_link_ops;
202 
203 static int vxcan_newlink(struct net_device *dev,
204 			 struct rtnl_newlink_params *params,
205 			 struct netlink_ext_ack *extack)
206 {
207 	struct net *peer_net = rtnl_newlink_peer_net(params);
208 	struct nlattr **data = params->data;
209 	struct nlattr **tb = params->tb;
210 	struct vxcan_priv *priv;
211 	struct net_device *peer;
212 
213 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
214 	char ifname[IFNAMSIZ];
215 	unsigned char name_assign_type;
216 	struct ifinfomsg *ifmp = NULL;
217 	int err;
218 
219 	/* register peer device */
220 	if (data && data[VXCAN_INFO_PEER]) {
221 		struct nlattr *nla_peer = data[VXCAN_INFO_PEER];
222 
223 		ifmp = nla_data(nla_peer);
224 		rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
225 		tbp = peer_tb;
226 	}
227 
228 	if (ifmp && tbp[IFLA_IFNAME]) {
229 		nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
230 		name_assign_type = NET_NAME_USER;
231 	} else {
232 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
233 		name_assign_type = NET_NAME_ENUM;
234 	}
235 
236 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
237 				&vxcan_link_ops, tbp, extack);
238 	if (IS_ERR(peer))
239 		return PTR_ERR(peer);
240 
241 	if (ifmp && dev->ifindex)
242 		peer->ifindex = ifmp->ifi_index;
243 
244 	err = register_netdevice(peer);
245 	if (err < 0) {
246 		free_netdev(peer);
247 		return err;
248 	}
249 
250 	netif_carrier_off(peer);
251 
252 	err = rtnl_configure_link(peer, ifmp, 0, NULL);
253 	if (err < 0)
254 		goto unregister_network_device;
255 
256 	/* register first device */
257 	if (tb[IFLA_IFNAME])
258 		nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
259 	else
260 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
261 
262 	err = register_netdevice(dev);
263 	if (err < 0)
264 		goto unregister_network_device;
265 
266 	netif_carrier_off(dev);
267 
268 	/* cross link the device pair */
269 	priv = netdev_priv(dev);
270 	rcu_assign_pointer(priv->peer, peer);
271 
272 	priv = netdev_priv(peer);
273 	rcu_assign_pointer(priv->peer, dev);
274 
275 	return 0;
276 
277 unregister_network_device:
278 	unregister_netdevice(peer);
279 	return err;
280 }
281 
282 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
283 {
284 	struct vxcan_priv *priv;
285 	struct net_device *peer;
286 
287 	priv = netdev_priv(dev);
288 	peer = rtnl_dereference(priv->peer);
289 
290 	/* Note : dellink() is called from default_device_exit_batch(),
291 	 * before a rcu_synchronize() point. The devices are guaranteed
292 	 * not being freed before one RCU grace period.
293 	 */
294 	RCU_INIT_POINTER(priv->peer, NULL);
295 	unregister_netdevice_queue(dev, head);
296 
297 	if (peer) {
298 		priv = netdev_priv(peer);
299 		RCU_INIT_POINTER(priv->peer, NULL);
300 		unregister_netdevice_queue(peer, head);
301 	}
302 }
303 
304 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
305 	[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
306 };
307 
308 static struct net *vxcan_get_link_net(const struct net_device *dev)
309 {
310 	struct vxcan_priv *priv = netdev_priv(dev);
311 	struct net_device *peer = rtnl_dereference(priv->peer);
312 
313 	return peer ? dev_net(peer) : dev_net(dev);
314 }
315 
316 static struct rtnl_link_ops vxcan_link_ops = {
317 	.kind		= DRV_NAME,
318 	.priv_size	= ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
319 	.setup		= vxcan_setup,
320 	.newlink	= vxcan_newlink,
321 	.dellink	= vxcan_dellink,
322 	.policy		= vxcan_policy,
323 	.peer_type	= VXCAN_INFO_PEER,
324 	.maxtype	= VXCAN_INFO_MAX,
325 	.get_link_net	= vxcan_get_link_net,
326 };
327 
328 static __init int vxcan_init(void)
329 {
330 	pr_info("vxcan: Virtual CAN Tunnel driver\n");
331 
332 	return rtnl_link_register(&vxcan_link_ops);
333 }
334 
335 static __exit void vxcan_exit(void)
336 {
337 	rtnl_link_unregister(&vxcan_link_ops);
338 }
339 
340 module_init(vxcan_init);
341 module_exit(vxcan_exit);
342