xref: /linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data virtual network driver
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/if_arp.h>
10 #include <net/pkt_sched.h>
11 #include "rmnet_config.h"
12 #include "rmnet_handlers.h"
13 #include "rmnet_private.h"
14 #include "rmnet_map.h"
15 #include "rmnet_vnd.h"
16 
17 /* RX/TX Fixup */
18 
19 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
20 {
21 	struct rmnet_priv *priv = netdev_priv(dev);
22 	struct rmnet_pcpu_stats *pcpu_ptr;
23 
24 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
25 
26 	u64_stats_update_begin(&pcpu_ptr->syncp);
27 	pcpu_ptr->stats.rx_pkts++;
28 	pcpu_ptr->stats.rx_bytes += skb->len;
29 	u64_stats_update_end(&pcpu_ptr->syncp);
30 }
31 
32 void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev)
33 {
34 	struct rmnet_priv *priv = netdev_priv(dev);
35 	struct rmnet_pcpu_stats *pcpu_ptr;
36 
37 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
38 
39 	u64_stats_update_begin(&pcpu_ptr->syncp);
40 	pcpu_ptr->stats.tx_pkts++;
41 	pcpu_ptr->stats.tx_bytes += len;
42 	u64_stats_update_end(&pcpu_ptr->syncp);
43 }
44 
45 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
46 {
47 	rmnet_vnd_tx_fixup_len(skb->len, dev);
48 }
49 
50 /* Network Device Operations */
51 
52 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
53 					struct net_device *dev)
54 {
55 	struct rmnet_priv *priv;
56 
57 	priv = netdev_priv(dev);
58 	if (priv->real_dev) {
59 		rmnet_egress_handler(skb);
60 	} else {
61 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
62 		kfree_skb(skb);
63 	}
64 	return NETDEV_TX_OK;
65 }
66 
67 static int rmnet_vnd_headroom(struct rmnet_port *port)
68 {
69 	u32 headroom;
70 
71 	headroom = sizeof(struct rmnet_map_header);
72 
73 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
74 		headroom += sizeof(struct rmnet_map_ul_csum_header);
75 
76 	return headroom;
77 }
78 
79 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
80 {
81 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
82 	struct rmnet_port *port;
83 	u32 headroom;
84 
85 	port = rmnet_get_port_rtnl(priv->real_dev);
86 
87 	headroom = rmnet_vnd_headroom(port);
88 
89 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
90 	    new_mtu > (priv->real_dev->mtu - headroom))
91 		return -EINVAL;
92 
93 	rmnet_dev->mtu = new_mtu;
94 	return 0;
95 }
96 
97 static int rmnet_vnd_get_iflink(const struct net_device *dev)
98 {
99 	struct rmnet_priv *priv = netdev_priv(dev);
100 
101 	return priv->real_dev->ifindex;
102 }
103 
104 static int rmnet_vnd_init(struct net_device *dev)
105 {
106 	struct rmnet_priv *priv = netdev_priv(dev);
107 	int err;
108 
109 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
110 	if (!priv->pcpu_stats)
111 		return -ENOMEM;
112 
113 	err = gro_cells_init(&priv->gro_cells, dev);
114 	if (err) {
115 		free_percpu(priv->pcpu_stats);
116 		return err;
117 	}
118 
119 	return 0;
120 }
121 
122 static void rmnet_vnd_uninit(struct net_device *dev)
123 {
124 	struct rmnet_priv *priv = netdev_priv(dev);
125 
126 	gro_cells_destroy(&priv->gro_cells);
127 	free_percpu(priv->pcpu_stats);
128 }
129 
130 static void rmnet_get_stats64(struct net_device *dev,
131 			      struct rtnl_link_stats64 *s)
132 {
133 	struct rmnet_priv *priv = netdev_priv(dev);
134 	struct rmnet_vnd_stats total_stats = { };
135 	struct rmnet_pcpu_stats *pcpu_ptr;
136 	struct rmnet_vnd_stats snapshot;
137 	unsigned int cpu, start;
138 
139 	for_each_possible_cpu(cpu) {
140 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
141 
142 		do {
143 			start = u64_stats_fetch_begin(&pcpu_ptr->syncp);
144 			snapshot = pcpu_ptr->stats;	/* struct assignment */
145 		} while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start));
146 
147 		total_stats.rx_pkts += snapshot.rx_pkts;
148 		total_stats.rx_bytes += snapshot.rx_bytes;
149 		total_stats.tx_pkts += snapshot.tx_pkts;
150 		total_stats.tx_bytes += snapshot.tx_bytes;
151 		total_stats.tx_drops += snapshot.tx_drops;
152 	}
153 
154 	s->rx_packets = total_stats.rx_pkts;
155 	s->rx_bytes = total_stats.rx_bytes;
156 	s->tx_packets = total_stats.tx_pkts;
157 	s->tx_bytes = total_stats.tx_bytes;
158 	s->tx_dropped = total_stats.tx_drops;
159 }
160 
161 static const struct net_device_ops rmnet_vnd_ops = {
162 	.ndo_start_xmit = rmnet_vnd_start_xmit,
163 	.ndo_change_mtu = rmnet_vnd_change_mtu,
164 	.ndo_get_iflink = rmnet_vnd_get_iflink,
165 	.ndo_add_slave  = rmnet_add_bridge,
166 	.ndo_del_slave  = rmnet_del_bridge,
167 	.ndo_init       = rmnet_vnd_init,
168 	.ndo_uninit     = rmnet_vnd_uninit,
169 	.ndo_get_stats64 = rmnet_get_stats64,
170 };
171 
172 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
173 	"Checksum ok",
174 	"Bad IPv4 header checksum",
175 	"Checksum valid bit not set",
176 	"Checksum validation failed",
177 	"Checksum error bad buffer",
178 	"Checksum error bad ip version",
179 	"Checksum error bad transport",
180 	"Checksum skipped on ip fragment",
181 	"Checksum skipped",
182 	"Checksum computed in software",
183 	"Checksum computed in hardware",
184 };
185 
186 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
187 {
188 	switch (stringset) {
189 	case ETH_SS_STATS:
190 		memcpy(buf, &rmnet_gstrings_stats,
191 		       sizeof(rmnet_gstrings_stats));
192 		break;
193 	}
194 }
195 
196 static int rmnet_get_sset_count(struct net_device *dev, int sset)
197 {
198 	switch (sset) {
199 	case ETH_SS_STATS:
200 		return ARRAY_SIZE(rmnet_gstrings_stats);
201 	default:
202 		return -EOPNOTSUPP;
203 	}
204 }
205 
206 static void rmnet_get_ethtool_stats(struct net_device *dev,
207 				    struct ethtool_stats *stats, u64 *data)
208 {
209 	struct rmnet_priv *priv = netdev_priv(dev);
210 	struct rmnet_priv_stats *st = &priv->stats;
211 
212 	if (!data)
213 		return;
214 
215 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
216 }
217 
218 static int rmnet_get_coalesce(struct net_device *dev,
219 			      struct ethtool_coalesce *coal,
220 			      struct kernel_ethtool_coalesce *kernel_coal,
221 			      struct netlink_ext_ack *extack)
222 {
223 	struct rmnet_priv *priv = netdev_priv(dev);
224 	struct rmnet_port *port;
225 
226 	port = rmnet_get_port_rtnl(priv->real_dev);
227 
228 	memset(kernel_coal, 0, sizeof(*kernel_coal));
229 	kernel_coal->tx_aggr_max_bytes = port->egress_agg_params.bytes;
230 	kernel_coal->tx_aggr_max_frames = port->egress_agg_params.count;
231 	kernel_coal->tx_aggr_time_usecs = div_u64(port->egress_agg_params.time_nsec,
232 						  NSEC_PER_USEC);
233 
234 	return 0;
235 }
236 
237 static int rmnet_set_coalesce(struct net_device *dev,
238 			      struct ethtool_coalesce *coal,
239 			      struct kernel_ethtool_coalesce *kernel_coal,
240 			      struct netlink_ext_ack *extack)
241 {
242 	struct rmnet_priv *priv = netdev_priv(dev);
243 	struct rmnet_port *port;
244 
245 	port = rmnet_get_port_rtnl(priv->real_dev);
246 
247 	if (kernel_coal->tx_aggr_max_frames < 1 || kernel_coal->tx_aggr_max_frames > 64)
248 		return -EINVAL;
249 
250 	if (kernel_coal->tx_aggr_max_bytes > 32768)
251 		return -EINVAL;
252 
253 	rmnet_map_update_ul_agg_config(port, kernel_coal->tx_aggr_max_bytes,
254 				       kernel_coal->tx_aggr_max_frames,
255 				       kernel_coal->tx_aggr_time_usecs);
256 
257 	return 0;
258 }
259 
260 static const struct ethtool_ops rmnet_ethtool_ops = {
261 	.supported_coalesce_params = ETHTOOL_COALESCE_TX_AGGR,
262 	.get_coalesce = rmnet_get_coalesce,
263 	.set_coalesce = rmnet_set_coalesce,
264 	.get_ethtool_stats = rmnet_get_ethtool_stats,
265 	.get_strings = rmnet_get_strings,
266 	.get_sset_count = rmnet_get_sset_count,
267 };
268 
269 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
270  * flags, ARP type, needed headroom, etc...
271  */
272 void rmnet_vnd_setup(struct net_device *rmnet_dev)
273 {
274 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
275 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
276 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
277 	eth_hw_addr_random(rmnet_dev);
278 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
279 
280 	/* Raw IP mode */
281 	rmnet_dev->header_ops = NULL;  /* No header */
282 	rmnet_dev->type = ARPHRD_RAWIP;
283 	rmnet_dev->hard_header_len = 0;
284 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
285 
286 	rmnet_dev->needs_free_netdev = true;
287 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
288 
289 	rmnet_dev->features |= NETIF_F_LLTX;
290 
291 	/* This perm addr will be used as interface identifier by IPv6 */
292 	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
293 	eth_random_addr(rmnet_dev->perm_addr);
294 }
295 
296 /* Exposed API */
297 
298 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
299 		      struct rmnet_port *port,
300 		      struct net_device *real_dev,
301 		      struct rmnet_endpoint *ep,
302 		      struct netlink_ext_ack *extack)
303 
304 {
305 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
306 	u32 headroom;
307 	int rc;
308 
309 	if (rmnet_get_endpoint(port, id)) {
310 		NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
311 		return -EBUSY;
312 	}
313 
314 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
315 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
316 	rmnet_dev->hw_features |= NETIF_F_SG;
317 
318 	priv->real_dev = real_dev;
319 
320 	headroom = rmnet_vnd_headroom(port);
321 
322 	if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
323 		NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
324 		return -EINVAL;
325 	}
326 
327 	rc = register_netdevice(rmnet_dev);
328 	if (!rc) {
329 		ep->egress_dev = rmnet_dev;
330 		ep->mux_id = id;
331 		port->nr_rmnet_devs++;
332 
333 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
334 
335 		priv->mux_id = id;
336 
337 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
338 	}
339 
340 	return rc;
341 }
342 
343 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
344 		      struct rmnet_endpoint *ep)
345 {
346 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
347 		return -EINVAL;
348 
349 	ep->egress_dev = NULL;
350 	port->nr_rmnet_devs--;
351 	return 0;
352 }
353 
354 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
355 {
356 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
357 	/* Although we expect similar number of enable/disable
358 	 * commands, optimize for the disable. That is more
359 	 * latency sensitive than enable
360 	 */
361 	if (unlikely(enable))
362 		netif_wake_queue(rmnet_dev);
363 	else
364 		netif_stop_queue(rmnet_dev);
365 
366 	return 0;
367 }
368 
369 int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
370 {
371 	struct hlist_node *tmp_ep;
372 	struct rmnet_endpoint *ep;
373 	struct rmnet_port *port;
374 	unsigned long bkt_ep;
375 	u32 headroom;
376 
377 	port = rmnet_get_port_rtnl(real_dev);
378 
379 	headroom = rmnet_vnd_headroom(port);
380 
381 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
382 		if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
383 			return -1;
384 	}
385 
386 	return 0;
387 }
388 
389 int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
390 			     struct net_device *real_dev)
391 {
392 	struct hlist_node *tmp_ep;
393 	struct rmnet_endpoint *ep;
394 	unsigned long bkt_ep;
395 	u32 headroom;
396 
397 	headroom = rmnet_vnd_headroom(port);
398 
399 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
400 		if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
401 			continue;
402 
403 		if (rmnet_vnd_change_mtu(ep->egress_dev,
404 					 real_dev->mtu - headroom))
405 			return -1;
406 	}
407 
408 	return 0;
409 }
410