1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vxcan.c - Virtual CAN Tunnel for cross namespace communication 4 * 5 * This code is derived from drivers/net/can/vcan.c for the virtual CAN 6 * specific parts and from drivers/net/veth.c to implement the netlink API 7 * for network interface pairs in a common and established way. 8 * 9 * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net> 10 */ 11 12 #include <linux/ethtool.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/netdevice.h> 16 #include <linux/if_arp.h> 17 #include <linux/if_ether.h> 18 #include <linux/can.h> 19 #include <linux/can/dev.h> 20 #include <linux/can/skb.h> 21 #include <linux/can/vxcan.h> 22 #include <linux/can/can-ml.h> 23 #include <linux/slab.h> 24 #include <net/rtnetlink.h> 25 26 #define DRV_NAME "vxcan" 27 28 MODULE_DESCRIPTION("Virtual CAN Tunnel"); 29 MODULE_LICENSE("GPL"); 30 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); 31 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 32 33 struct vxcan_priv { 34 struct net_device __rcu *peer; 35 }; 36 37 static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev) 38 { 39 struct vxcan_priv *priv = netdev_priv(dev); 40 struct net_device *peer; 41 struct net_device_stats *peerstats, *srcstats = &dev->stats; 42 struct sk_buff *skb; 43 unsigned int len; 44 45 if (can_dropped_invalid_skb(dev, oskb)) 46 return NETDEV_TX_OK; 47 48 rcu_read_lock(); 49 peer = rcu_dereference(priv->peer); 50 if (unlikely(!peer)) { 51 kfree_skb(oskb); 52 dev->stats.tx_dropped++; 53 goto out_unlock; 54 } 55 56 skb_tx_timestamp(oskb); 57 58 skb = skb_clone(oskb, GFP_ATOMIC); 59 if (skb) { 60 consume_skb(oskb); 61 } else { 62 kfree_skb(oskb); 63 goto out_unlock; 64 } 65 66 /* reset CAN GW hop counter */ 67 skb->csum_start = 0; 68 skb->pkt_type = PACKET_BROADCAST; 69 skb->dev = peer; 70 skb->ip_summed = CHECKSUM_UNNECESSARY; 71 72 len = can_skb_get_data_len(skb); 73 if (netif_rx(skb) == NET_RX_SUCCESS) { 74 srcstats->tx_packets++; 75 srcstats->tx_bytes += len; 76 peerstats = &peer->stats; 77 peerstats->rx_packets++; 78 peerstats->rx_bytes += len; 79 } 80 81 out_unlock: 82 rcu_read_unlock(); 83 return NETDEV_TX_OK; 84 } 85 86 87 static int vxcan_open(struct net_device *dev) 88 { 89 struct vxcan_priv *priv = netdev_priv(dev); 90 struct net_device *peer = rtnl_dereference(priv->peer); 91 92 if (!peer) 93 return -ENOTCONN; 94 95 if (peer->flags & IFF_UP) { 96 netif_carrier_on(dev); 97 netif_carrier_on(peer); 98 } 99 return 0; 100 } 101 102 static int vxcan_close(struct net_device *dev) 103 { 104 struct vxcan_priv *priv = netdev_priv(dev); 105 struct net_device *peer = rtnl_dereference(priv->peer); 106 107 netif_carrier_off(dev); 108 if (peer) 109 netif_carrier_off(peer); 110 111 return 0; 112 } 113 114 static int vxcan_get_iflink(const struct net_device *dev) 115 { 116 struct vxcan_priv *priv = netdev_priv(dev); 117 struct net_device *peer; 118 int iflink; 119 120 rcu_read_lock(); 121 peer = rcu_dereference(priv->peer); 122 iflink = peer ? READ_ONCE(peer->ifindex) : 0; 123 rcu_read_unlock(); 124 125 return iflink; 126 } 127 128 static int vxcan_change_mtu(struct net_device *dev, int new_mtu) 129 { 130 /* Do not allow changing the MTU while running */ 131 if (dev->flags & IFF_UP) 132 return -EBUSY; 133 134 if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU && 135 !can_is_canxl_dev_mtu(new_mtu)) 136 return -EINVAL; 137 138 WRITE_ONCE(dev->mtu, new_mtu); 139 return 0; 140 } 141 142 static const struct net_device_ops vxcan_netdev_ops = { 143 .ndo_open = vxcan_open, 144 .ndo_stop = vxcan_close, 145 .ndo_start_xmit = vxcan_xmit, 146 .ndo_get_iflink = vxcan_get_iflink, 147 .ndo_change_mtu = vxcan_change_mtu, 148 }; 149 150 static const struct ethtool_ops vxcan_ethtool_ops = { 151 .get_ts_info = ethtool_op_get_ts_info, 152 }; 153 154 static void vxcan_setup(struct net_device *dev) 155 { 156 struct can_ml_priv *can_ml; 157 158 dev->type = ARPHRD_CAN; 159 dev->mtu = CANFD_MTU; 160 dev->hard_header_len = 0; 161 dev->addr_len = 0; 162 dev->tx_queue_len = 0; 163 dev->flags = IFF_NOARP; 164 dev->netdev_ops = &vxcan_netdev_ops; 165 dev->ethtool_ops = &vxcan_ethtool_ops; 166 dev->needs_free_netdev = true; 167 168 can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); 169 can_set_ml_priv(dev, can_ml); 170 } 171 172 /* forward declaration for rtnl_create_link() */ 173 static struct rtnl_link_ops vxcan_link_ops; 174 175 static int vxcan_newlink(struct net *peer_net, struct net_device *dev, 176 struct nlattr *tb[], struct nlattr *data[], 177 struct netlink_ext_ack *extack) 178 { 179 struct vxcan_priv *priv; 180 struct net_device *peer; 181 182 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb; 183 char ifname[IFNAMSIZ]; 184 unsigned char name_assign_type; 185 struct ifinfomsg *ifmp = NULL; 186 int err; 187 188 /* register peer device */ 189 if (data && data[VXCAN_INFO_PEER]) { 190 struct nlattr *nla_peer = data[VXCAN_INFO_PEER]; 191 192 ifmp = nla_data(nla_peer); 193 rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); 194 tbp = peer_tb; 195 } 196 197 if (ifmp && tbp[IFLA_IFNAME]) { 198 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 199 name_assign_type = NET_NAME_USER; 200 } else { 201 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 202 name_assign_type = NET_NAME_ENUM; 203 } 204 205 peer = rtnl_create_link(peer_net, ifname, name_assign_type, 206 &vxcan_link_ops, tbp, extack); 207 if (IS_ERR(peer)) 208 return PTR_ERR(peer); 209 210 if (ifmp && dev->ifindex) 211 peer->ifindex = ifmp->ifi_index; 212 213 err = register_netdevice(peer); 214 if (err < 0) { 215 free_netdev(peer); 216 return err; 217 } 218 219 netif_carrier_off(peer); 220 221 err = rtnl_configure_link(peer, ifmp, 0, NULL); 222 if (err < 0) 223 goto unregister_network_device; 224 225 /* register first device */ 226 if (tb[IFLA_IFNAME]) 227 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); 228 else 229 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); 230 231 err = register_netdevice(dev); 232 if (err < 0) 233 goto unregister_network_device; 234 235 netif_carrier_off(dev); 236 237 /* cross link the device pair */ 238 priv = netdev_priv(dev); 239 rcu_assign_pointer(priv->peer, peer); 240 241 priv = netdev_priv(peer); 242 rcu_assign_pointer(priv->peer, dev); 243 244 return 0; 245 246 unregister_network_device: 247 unregister_netdevice(peer); 248 return err; 249 } 250 251 static void vxcan_dellink(struct net_device *dev, struct list_head *head) 252 { 253 struct vxcan_priv *priv; 254 struct net_device *peer; 255 256 priv = netdev_priv(dev); 257 peer = rtnl_dereference(priv->peer); 258 259 /* Note : dellink() is called from default_device_exit_batch(), 260 * before a rcu_synchronize() point. The devices are guaranteed 261 * not being freed before one RCU grace period. 262 */ 263 RCU_INIT_POINTER(priv->peer, NULL); 264 unregister_netdevice_queue(dev, head); 265 266 if (peer) { 267 priv = netdev_priv(peer); 268 RCU_INIT_POINTER(priv->peer, NULL); 269 unregister_netdevice_queue(peer, head); 270 } 271 } 272 273 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = { 274 [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, 275 }; 276 277 static struct net *vxcan_get_link_net(const struct net_device *dev) 278 { 279 struct vxcan_priv *priv = netdev_priv(dev); 280 struct net_device *peer = rtnl_dereference(priv->peer); 281 282 return peer ? dev_net(peer) : dev_net(dev); 283 } 284 285 static struct rtnl_link_ops vxcan_link_ops = { 286 .kind = DRV_NAME, 287 .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv), 288 .setup = vxcan_setup, 289 .newlink = vxcan_newlink, 290 .dellink = vxcan_dellink, 291 .policy = vxcan_policy, 292 .peer_type = VXCAN_INFO_PEER, 293 .maxtype = VXCAN_INFO_MAX, 294 .get_link_net = vxcan_get_link_net, 295 }; 296 297 static __init int vxcan_init(void) 298 { 299 pr_info("vxcan: Virtual CAN Tunnel driver\n"); 300 301 return rtnl_link_register(&vxcan_link_ops); 302 } 303 304 static __exit void vxcan_exit(void) 305 { 306 rtnl_link_unregister(&vxcan_link_ops); 307 } 308 309 module_init(vxcan_init); 310 module_exit(vxcan_exit); 311