xref: /linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c (revision 7f356166aebb0d956d367dfe55e19d7783277d09)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data virtual network driver
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/if_arp.h>
10 #include <net/pkt_sched.h>
11 #include "rmnet_config.h"
12 #include "rmnet_handlers.h"
13 #include "rmnet_private.h"
14 #include "rmnet_map.h"
15 #include "rmnet_vnd.h"
16 
17 /* RX/TX Fixup */
18 
19 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
20 {
21 	struct rmnet_priv *priv = netdev_priv(dev);
22 	struct rmnet_pcpu_stats *pcpu_ptr;
23 
24 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
25 
26 	u64_stats_update_begin(&pcpu_ptr->syncp);
27 	pcpu_ptr->stats.rx_pkts++;
28 	pcpu_ptr->stats.rx_bytes += skb->len;
29 	u64_stats_update_end(&pcpu_ptr->syncp);
30 }
31 
32 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
33 {
34 	struct rmnet_priv *priv = netdev_priv(dev);
35 	struct rmnet_pcpu_stats *pcpu_ptr;
36 
37 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
38 
39 	u64_stats_update_begin(&pcpu_ptr->syncp);
40 	pcpu_ptr->stats.tx_pkts++;
41 	pcpu_ptr->stats.tx_bytes += skb->len;
42 	u64_stats_update_end(&pcpu_ptr->syncp);
43 }
44 
45 /* Network Device Operations */
46 
47 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
48 					struct net_device *dev)
49 {
50 	struct rmnet_priv *priv;
51 
52 	priv = netdev_priv(dev);
53 	if (priv->real_dev) {
54 		rmnet_egress_handler(skb);
55 	} else {
56 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
57 		kfree_skb(skb);
58 	}
59 	return NETDEV_TX_OK;
60 }
61 
62 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
63 {
64 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
65 		return -EINVAL;
66 
67 	rmnet_dev->mtu = new_mtu;
68 	return 0;
69 }
70 
71 static int rmnet_vnd_get_iflink(const struct net_device *dev)
72 {
73 	struct rmnet_priv *priv = netdev_priv(dev);
74 
75 	return priv->real_dev->ifindex;
76 }
77 
78 static int rmnet_vnd_init(struct net_device *dev)
79 {
80 	struct rmnet_priv *priv = netdev_priv(dev);
81 	int err;
82 
83 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
84 	if (!priv->pcpu_stats)
85 		return -ENOMEM;
86 
87 	err = gro_cells_init(&priv->gro_cells, dev);
88 	if (err) {
89 		free_percpu(priv->pcpu_stats);
90 		return err;
91 	}
92 
93 	return 0;
94 }
95 
96 static void rmnet_vnd_uninit(struct net_device *dev)
97 {
98 	struct rmnet_priv *priv = netdev_priv(dev);
99 
100 	gro_cells_destroy(&priv->gro_cells);
101 	free_percpu(priv->pcpu_stats);
102 }
103 
104 static void rmnet_get_stats64(struct net_device *dev,
105 			      struct rtnl_link_stats64 *s)
106 {
107 	struct rmnet_priv *priv = netdev_priv(dev);
108 	struct rmnet_vnd_stats total_stats;
109 	struct rmnet_pcpu_stats *pcpu_ptr;
110 	unsigned int cpu, start;
111 
112 	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
113 
114 	for_each_possible_cpu(cpu) {
115 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
116 
117 		do {
118 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
119 			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
120 			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
121 			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
122 			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
123 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
124 
125 		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
126 	}
127 
128 	s->rx_packets = total_stats.rx_pkts;
129 	s->rx_bytes = total_stats.rx_bytes;
130 	s->tx_packets = total_stats.tx_pkts;
131 	s->tx_bytes = total_stats.tx_bytes;
132 	s->tx_dropped = total_stats.tx_drops;
133 }
134 
135 static const struct net_device_ops rmnet_vnd_ops = {
136 	.ndo_start_xmit = rmnet_vnd_start_xmit,
137 	.ndo_change_mtu = rmnet_vnd_change_mtu,
138 	.ndo_get_iflink = rmnet_vnd_get_iflink,
139 	.ndo_add_slave  = rmnet_add_bridge,
140 	.ndo_del_slave  = rmnet_del_bridge,
141 	.ndo_init       = rmnet_vnd_init,
142 	.ndo_uninit     = rmnet_vnd_uninit,
143 	.ndo_get_stats64 = rmnet_get_stats64,
144 };
145 
146 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
147 	"Checksum ok",
148 	"Checksum valid bit not set",
149 	"Checksum validation failed",
150 	"Checksum error bad buffer",
151 	"Checksum error bad ip version",
152 	"Checksum error bad transport",
153 	"Checksum skipped on ip fragment",
154 	"Checksum skipped",
155 	"Checksum computed in software",
156 };
157 
158 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
159 {
160 	switch (stringset) {
161 	case ETH_SS_STATS:
162 		memcpy(buf, &rmnet_gstrings_stats,
163 		       sizeof(rmnet_gstrings_stats));
164 		break;
165 	}
166 }
167 
168 static int rmnet_get_sset_count(struct net_device *dev, int sset)
169 {
170 	switch (sset) {
171 	case ETH_SS_STATS:
172 		return ARRAY_SIZE(rmnet_gstrings_stats);
173 	default:
174 		return -EOPNOTSUPP;
175 	}
176 }
177 
178 static void rmnet_get_ethtool_stats(struct net_device *dev,
179 				    struct ethtool_stats *stats, u64 *data)
180 {
181 	struct rmnet_priv *priv = netdev_priv(dev);
182 	struct rmnet_priv_stats *st = &priv->stats;
183 
184 	if (!data)
185 		return;
186 
187 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
188 }
189 
190 static const struct ethtool_ops rmnet_ethtool_ops = {
191 	.get_ethtool_stats = rmnet_get_ethtool_stats,
192 	.get_strings = rmnet_get_strings,
193 	.get_sset_count = rmnet_get_sset_count,
194 };
195 
196 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
197  * flags, ARP type, needed headroom, etc...
198  */
199 void rmnet_vnd_setup(struct net_device *rmnet_dev)
200 {
201 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
202 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
203 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
204 	eth_random_addr(rmnet_dev->dev_addr);
205 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
206 
207 	/* Raw IP mode */
208 	rmnet_dev->header_ops = NULL;  /* No header */
209 	rmnet_dev->type = ARPHRD_RAWIP;
210 	rmnet_dev->hard_header_len = 0;
211 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
212 
213 	rmnet_dev->needs_free_netdev = true;
214 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
215 
216 	rmnet_dev->features |= NETIF_F_LLTX;
217 
218 	/* This perm addr will be used as interface identifier by IPv6 */
219 	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
220 	eth_random_addr(rmnet_dev->perm_addr);
221 }
222 
223 /* Exposed API */
224 
225 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
226 		      struct rmnet_port *port,
227 		      struct net_device *real_dev,
228 		      struct rmnet_endpoint *ep,
229 		      struct netlink_ext_ack *extack)
230 
231 {
232 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
233 	int rc;
234 
235 	if (rmnet_get_endpoint(port, id)) {
236 		NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
237 		return -EBUSY;
238 	}
239 
240 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
241 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
242 	rmnet_dev->hw_features |= NETIF_F_SG;
243 
244 	priv->real_dev = real_dev;
245 
246 	rc = register_netdevice(rmnet_dev);
247 	if (!rc) {
248 		ep->egress_dev = rmnet_dev;
249 		ep->mux_id = id;
250 		port->nr_rmnet_devs++;
251 
252 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
253 
254 		priv->mux_id = id;
255 
256 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
257 	}
258 
259 	return rc;
260 }
261 
262 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
263 		      struct rmnet_endpoint *ep)
264 {
265 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
266 		return -EINVAL;
267 
268 	ep->egress_dev = NULL;
269 	port->nr_rmnet_devs--;
270 	return 0;
271 }
272 
273 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
274 {
275 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
276 	/* Although we expect similar number of enable/disable
277 	 * commands, optimize for the disable. That is more
278 	 * latency sensitive than enable
279 	 */
280 	if (unlikely(enable))
281 		netif_wake_queue(rmnet_dev);
282 	else
283 		netif_stop_queue(rmnet_dev);
284 
285 	return 0;
286 }
287