1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * 13 * RMNET Data virtual network driver 14 * 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <linux/if_arp.h> 19 #include <net/pkt_sched.h> 20 #include "rmnet_config.h" 21 #include "rmnet_handlers.h" 22 #include "rmnet_private.h" 23 #include "rmnet_map.h" 24 #include "rmnet_vnd.h" 25 26 /* RX/TX Fixup */ 27 28 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) 29 { 30 dev->stats.rx_packets++; 31 dev->stats.rx_bytes += skb->len; 32 } 33 34 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) 35 { 36 dev->stats.tx_packets++; 37 dev->stats.tx_bytes += skb->len; 38 } 39 40 /* Network Device Operations */ 41 42 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, 43 struct net_device *dev) 44 { 45 struct rmnet_priv *priv; 46 47 priv = netdev_priv(dev); 48 if (priv->real_dev) { 49 rmnet_egress_handler(skb); 50 } else { 51 dev->stats.tx_dropped++; 52 kfree_skb(skb); 53 } 54 return NETDEV_TX_OK; 55 } 56 57 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) 58 { 59 if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) 60 return -EINVAL; 61 62 rmnet_dev->mtu = new_mtu; 63 return 0; 64 } 65 66 static int rmnet_vnd_get_iflink(const struct net_device *dev) 67 { 68 struct rmnet_priv *priv = netdev_priv(dev); 69 70 return priv->real_dev->ifindex; 71 } 72 73 static const struct net_device_ops rmnet_vnd_ops = { 74 .ndo_start_xmit = rmnet_vnd_start_xmit, 75 .ndo_change_mtu = rmnet_vnd_change_mtu, 76 .ndo_get_iflink = rmnet_vnd_get_iflink, 77 .ndo_add_slave = rmnet_add_bridge, 78 .ndo_del_slave = rmnet_del_bridge, 79 }; 80 81 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, 82 * flags, ARP type, needed headroom, etc... 83 */ 84 void rmnet_vnd_setup(struct net_device *rmnet_dev) 85 { 86 rmnet_dev->netdev_ops = &rmnet_vnd_ops; 87 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; 88 rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; 89 random_ether_addr(rmnet_dev->dev_addr); 90 rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; 91 92 /* Raw IP mode */ 93 rmnet_dev->header_ops = NULL; /* No header */ 94 rmnet_dev->type = ARPHRD_RAWIP; 95 rmnet_dev->hard_header_len = 0; 96 rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); 97 98 rmnet_dev->needs_free_netdev = true; 99 } 100 101 /* Exposed API */ 102 103 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, 104 struct rmnet_port *port, 105 struct net_device *real_dev, 106 struct rmnet_endpoint *ep) 107 { 108 struct rmnet_priv *priv; 109 int rc; 110 111 if (ep->egress_dev) 112 return -EINVAL; 113 114 rc = register_netdevice(rmnet_dev); 115 if (!rc) { 116 ep->egress_dev = rmnet_dev; 117 ep->mux_id = id; 118 port->nr_rmnet_devs++; 119 120 rmnet_dev->rtnl_link_ops = &rmnet_link_ops; 121 122 priv = netdev_priv(rmnet_dev); 123 priv->mux_id = id; 124 priv->real_dev = real_dev; 125 126 netdev_dbg(rmnet_dev, "rmnet dev created\n"); 127 } 128 129 return rc; 130 } 131 132 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, 133 struct rmnet_endpoint *ep) 134 { 135 if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) 136 return -EINVAL; 137 138 ep->egress_dev = NULL; 139 port->nr_rmnet_devs--; 140 return 0; 141 } 142 143 u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) 144 { 145 struct rmnet_priv *priv; 146 147 priv = netdev_priv(rmnet_dev); 148 return priv->mux_id; 149 } 150 151 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) 152 { 153 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); 154 /* Although we expect similar number of enable/disable 155 * commands, optimize for the disable. That is more 156 * latency sensitive than enable 157 */ 158 if (unlikely(enable)) 159 netif_wake_queue(rmnet_dev); 160 else 161 netif_stop_queue(rmnet_dev); 162 163 return 0; 164 } 165