xref: /linux/arch/um/drivers/vector_kern.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - 2019 Cambridge Greys Limited
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6  * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
7  * James Leu (jleu@mindspring.net).
8  * Copyright (C) 2001 by various other people who didn't put their name here.
9  */
10 
11 #include <linux/memblock.h>
12 #include <linux/etherdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/inetdevice.h>
15 #include <linux/init.h>
16 #include <linux/list.h>
17 #include <linux/netdevice.h>
18 #include <linux/platform_device.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/firmware.h>
24 #include <linux/fs.h>
25 #include <uapi/linux/filter.h>
26 #include <init.h>
27 #include <irq_kern.h>
28 #include <irq_user.h>
29 #include <net_kern.h>
30 #include <os.h>
31 #include "mconsole_kern.h"
32 #include "vector_user.h"
33 #include "vector_kern.h"
34 
35 /*
36  * Adapted from network devices with the following major changes:
37  * All transports are static - simplifies the code significantly
38  * Multiple FDs/IRQs per device
39  * Vector IO optionally used for read/write, falling back to legacy
40  * based on configuration and/or availability
41  * Configuration is no longer positional - L2TPv3 and GRE require up to
42  * 10 parameters, passing this as positional is not fit for purpose.
43  * Only socket transports are supported
44  */
45 
46 
47 #define DRIVER_NAME "uml-vector"
48 struct vector_cmd_line_arg {
49 	struct list_head list;
50 	int unit;
51 	char *arguments;
52 };
53 
54 struct vector_device {
55 	struct list_head list;
56 	struct net_device *dev;
57 	struct platform_device pdev;
58 	int unit;
59 	int opened;
60 };
61 
62 static LIST_HEAD(vec_cmd_line);
63 
64 static DEFINE_SPINLOCK(vector_devices_lock);
65 static LIST_HEAD(vector_devices);
66 
67 static int driver_registered;
68 
69 static void vector_eth_configure(int n, struct arglist *def);
70 static int vector_mmsg_rx(struct vector_private *vp, int budget);
71 
72 /* Argument accessors to set variables (and/or set default values)
73  * mtu, buffer sizing, default headroom, etc
74  */
75 
76 #define DEFAULT_HEADROOM 2
77 #define SAFETY_MARGIN 32
78 #define DEFAULT_VECTOR_SIZE 64
79 #define TX_SMALL_PACKET 128
80 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
81 
82 static const struct {
83 	const char string[ETH_GSTRING_LEN];
84 } ethtool_stats_keys[] = {
85 	{ "rx_queue_max" },
86 	{ "rx_queue_running_average" },
87 	{ "tx_queue_max" },
88 	{ "tx_queue_running_average" },
89 	{ "rx_encaps_errors" },
90 	{ "tx_timeout_count" },
91 	{ "tx_restart_queue" },
92 	{ "tx_kicks" },
93 	{ "tx_flow_control_xon" },
94 	{ "tx_flow_control_xoff" },
95 	{ "rx_csum_offload_good" },
96 	{ "rx_csum_offload_errors"},
97 	{ "sg_ok"},
98 	{ "sg_linearized"},
99 };
100 
101 #define VECTOR_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
102 
vector_reset_stats(struct vector_private * vp)103 static void vector_reset_stats(struct vector_private *vp)
104 {
105 	vp->estats.rx_queue_max = 0;
106 	vp->estats.rx_queue_running_average = 0;
107 	vp->estats.tx_queue_max = 0;
108 	vp->estats.tx_queue_running_average = 0;
109 	vp->estats.rx_encaps_errors = 0;
110 	vp->estats.tx_timeout_count = 0;
111 	vp->estats.tx_restart_queue = 0;
112 	vp->estats.tx_kicks = 0;
113 	vp->estats.tx_flow_control_xon = 0;
114 	vp->estats.tx_flow_control_xoff = 0;
115 	vp->estats.sg_ok = 0;
116 	vp->estats.sg_linearized = 0;
117 }
118 
get_mtu(struct arglist * def)119 static int get_mtu(struct arglist *def)
120 {
121 	char *mtu = uml_vector_fetch_arg(def, "mtu");
122 	long result;
123 
124 	if (mtu != NULL) {
125 		if (kstrtoul(mtu, 10, &result) == 0)
126 			if ((result < (1 << 16) - 1) && (result >= 576))
127 				return result;
128 	}
129 	return ETH_MAX_PACKET;
130 }
131 
get_bpf_file(struct arglist * def)132 static char *get_bpf_file(struct arglist *def)
133 {
134 	return uml_vector_fetch_arg(def, "bpffile");
135 }
136 
get_bpf_flash(struct arglist * def)137 static bool get_bpf_flash(struct arglist *def)
138 {
139 	char *allow = uml_vector_fetch_arg(def, "bpfflash");
140 	long result;
141 
142 	if (allow != NULL) {
143 		if (kstrtoul(allow, 10, &result) == 0)
144 			return result > 0;
145 	}
146 	return false;
147 }
148 
get_depth(struct arglist * def)149 static int get_depth(struct arglist *def)
150 {
151 	char *mtu = uml_vector_fetch_arg(def, "depth");
152 	long result;
153 
154 	if (mtu != NULL) {
155 		if (kstrtoul(mtu, 10, &result) == 0)
156 			return result;
157 	}
158 	return DEFAULT_VECTOR_SIZE;
159 }
160 
get_headroom(struct arglist * def)161 static int get_headroom(struct arglist *def)
162 {
163 	char *mtu = uml_vector_fetch_arg(def, "headroom");
164 	long result;
165 
166 	if (mtu != NULL) {
167 		if (kstrtoul(mtu, 10, &result) == 0)
168 			return result;
169 	}
170 	return DEFAULT_HEADROOM;
171 }
172 
get_req_size(struct arglist * def)173 static int get_req_size(struct arglist *def)
174 {
175 	char *gro = uml_vector_fetch_arg(def, "gro");
176 	long result;
177 
178 	if (gro != NULL) {
179 		if (kstrtoul(gro, 10, &result) == 0) {
180 			if (result > 0)
181 				return 65536;
182 		}
183 	}
184 	return get_mtu(def) + ETH_HEADER_OTHER +
185 		get_headroom(def) + SAFETY_MARGIN;
186 }
187 
188 
get_transport_options(struct arglist * def)189 static int get_transport_options(struct arglist *def)
190 {
191 	char *transport = uml_vector_fetch_arg(def, "transport");
192 	char *vector = uml_vector_fetch_arg(def, "vec");
193 
194 	int vec_rx = VECTOR_RX;
195 	int vec_tx = VECTOR_TX;
196 	long parsed;
197 	int result = 0;
198 
199 	if (transport == NULL)
200 		return -EINVAL;
201 
202 	if (vector != NULL) {
203 		if (kstrtoul(vector, 10, &parsed) == 0) {
204 			if (parsed == 0) {
205 				vec_rx = 0;
206 				vec_tx = 0;
207 			}
208 		}
209 	}
210 
211 	if (get_bpf_flash(def))
212 		result = VECTOR_BPF_FLASH;
213 
214 	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
215 		return result;
216 	if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
217 		return (result | vec_rx | VECTOR_BPF);
218 	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
219 		return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
220 	return (result | vec_rx | vec_tx);
221 }
222 
223 
224 /* A mini-buffer for packet drop read
225  * All of our supported transports are datagram oriented and we always
226  * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
227  * than the packet size it still counts as full packet read and will
228  * clean the incoming stream to keep sigio/epoll happy
229  */
230 
231 #define DROP_BUFFER_SIZE 32
232 
233 static char *drop_buffer;
234 
235 /* Array backed queues optimized for bulk enqueue/dequeue and
236  * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
237  * For more details and full design rationale see
238  * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
239  */
240 
241 
242 /*
243  * Advance the mmsg queue head by n = advance. Resets the queue to
244  * maximum enqueue/dequeue-at-once capacity if possible. Called by
245  * dequeuers. Caller must hold the head_lock!
246  */
247 
vector_advancehead(struct vector_queue * qi,int advance)248 static int vector_advancehead(struct vector_queue *qi, int advance)
249 {
250 	int queue_depth;
251 
252 	qi->head =
253 		(qi->head + advance)
254 			% qi->max_depth;
255 
256 
257 	spin_lock(&qi->tail_lock);
258 	qi->queue_depth -= advance;
259 
260 	/* we are at 0, use this to
261 	 * reset head and tail so we can use max size vectors
262 	 */
263 
264 	if (qi->queue_depth == 0) {
265 		qi->head = 0;
266 		qi->tail = 0;
267 	}
268 	queue_depth = qi->queue_depth;
269 	spin_unlock(&qi->tail_lock);
270 	return queue_depth;
271 }
272 
273 /*	Advance the queue tail by n = advance.
274  *	This is called by enqueuers which should hold the
275  *	head lock already
276  */
277 
vector_advancetail(struct vector_queue * qi,int advance)278 static int vector_advancetail(struct vector_queue *qi, int advance)
279 {
280 	int queue_depth;
281 
282 	qi->tail =
283 		(qi->tail + advance)
284 			% qi->max_depth;
285 	spin_lock(&qi->head_lock);
286 	qi->queue_depth += advance;
287 	queue_depth = qi->queue_depth;
288 	spin_unlock(&qi->head_lock);
289 	return queue_depth;
290 }
291 
prep_msg(struct vector_private * vp,struct sk_buff * skb,struct iovec * iov)292 static int prep_msg(struct vector_private *vp,
293 	struct sk_buff *skb,
294 	struct iovec *iov)
295 {
296 	int iov_index = 0;
297 	int nr_frags, frag;
298 	skb_frag_t *skb_frag;
299 
300 	nr_frags = skb_shinfo(skb)->nr_frags;
301 	if (nr_frags > MAX_IOV_SIZE) {
302 		if (skb_linearize(skb) != 0)
303 			goto drop;
304 	}
305 	if (vp->header_size > 0) {
306 		iov[iov_index].iov_len = vp->header_size;
307 		vp->form_header(iov[iov_index].iov_base, skb, vp);
308 		iov_index++;
309 	}
310 	iov[iov_index].iov_base = skb->data;
311 	if (nr_frags > 0) {
312 		iov[iov_index].iov_len = skb->len - skb->data_len;
313 		vp->estats.sg_ok++;
314 	} else
315 		iov[iov_index].iov_len = skb->len;
316 	iov_index++;
317 	for (frag = 0; frag < nr_frags; frag++) {
318 		skb_frag = &skb_shinfo(skb)->frags[frag];
319 		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
320 		iov[iov_index].iov_len = skb_frag_size(skb_frag);
321 		iov_index++;
322 	}
323 	return iov_index;
324 drop:
325 	return -1;
326 }
327 /*
328  * Generic vector enqueue with support for forming headers using transport
329  * specific callback. Allows GRE, L2TPv3, RAW and other transports
330  * to use a common enqueue procedure in vector mode
331  */
332 
vector_enqueue(struct vector_queue * qi,struct sk_buff * skb)333 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
334 {
335 	struct vector_private *vp = netdev_priv(qi->dev);
336 	int queue_depth;
337 	int packet_len;
338 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
339 	int iov_count;
340 
341 	spin_lock(&qi->tail_lock);
342 	spin_lock(&qi->head_lock);
343 	queue_depth = qi->queue_depth;
344 	spin_unlock(&qi->head_lock);
345 
346 	if (skb)
347 		packet_len = skb->len;
348 
349 	if (queue_depth < qi->max_depth) {
350 
351 		*(qi->skbuff_vector + qi->tail) = skb;
352 		mmsg_vector += qi->tail;
353 		iov_count = prep_msg(
354 			vp,
355 			skb,
356 			mmsg_vector->msg_hdr.msg_iov
357 		);
358 		if (iov_count < 1)
359 			goto drop;
360 		mmsg_vector->msg_hdr.msg_iovlen = iov_count;
361 		mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
362 		mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
363 		queue_depth = vector_advancetail(qi, 1);
364 	} else
365 		goto drop;
366 	spin_unlock(&qi->tail_lock);
367 	return queue_depth;
368 drop:
369 	qi->dev->stats.tx_dropped++;
370 	if (skb != NULL) {
371 		packet_len = skb->len;
372 		dev_consume_skb_any(skb);
373 		netdev_completed_queue(qi->dev, 1, packet_len);
374 	}
375 	spin_unlock(&qi->tail_lock);
376 	return queue_depth;
377 }
378 
consume_vector_skbs(struct vector_queue * qi,int count)379 static int consume_vector_skbs(struct vector_queue *qi, int count)
380 {
381 	struct sk_buff *skb;
382 	int skb_index;
383 	int bytes_compl = 0;
384 
385 	for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
386 		skb = *(qi->skbuff_vector + skb_index);
387 		/* mark as empty to ensure correct destruction if
388 		 * needed
389 		 */
390 		bytes_compl += skb->len;
391 		*(qi->skbuff_vector + skb_index) = NULL;
392 		dev_consume_skb_any(skb);
393 	}
394 	qi->dev->stats.tx_bytes += bytes_compl;
395 	qi->dev->stats.tx_packets += count;
396 	netdev_completed_queue(qi->dev, count, bytes_compl);
397 	return vector_advancehead(qi, count);
398 }
399 
400 /*
401  * Generic vector deque via sendmmsg with support for forming headers
402  * using transport specific callback. Allows GRE, L2TPv3, RAW and
403  * other transports to use a common dequeue procedure in vector mode
404  */
405 
406 
vector_send(struct vector_queue * qi)407 static int vector_send(struct vector_queue *qi)
408 {
409 	struct vector_private *vp = netdev_priv(qi->dev);
410 	struct mmsghdr *send_from;
411 	int result = 0, send_len, queue_depth = qi->max_depth;
412 
413 	if (spin_trylock(&qi->head_lock)) {
414 		if (spin_trylock(&qi->tail_lock)) {
415 			/* update queue_depth to current value */
416 			queue_depth = qi->queue_depth;
417 			spin_unlock(&qi->tail_lock);
418 			while (queue_depth > 0) {
419 				/* Calculate the start of the vector */
420 				send_len = queue_depth;
421 				send_from = qi->mmsg_vector;
422 				send_from += qi->head;
423 				/* Adjust vector size if wraparound */
424 				if (send_len + qi->head > qi->max_depth)
425 					send_len = qi->max_depth - qi->head;
426 				/* Try to TX as many packets as possible */
427 				if (send_len > 0) {
428 					result = uml_vector_sendmmsg(
429 						 vp->fds->tx_fd,
430 						 send_from,
431 						 send_len,
432 						 0
433 					);
434 					vp->in_write_poll =
435 						(result != send_len);
436 				}
437 				/* For some of the sendmmsg error scenarios
438 				 * we may end being unsure in the TX success
439 				 * for all packets. It is safer to declare
440 				 * them all TX-ed and blame the network.
441 				 */
442 				if (result < 0) {
443 					if (net_ratelimit())
444 						netdev_err(vp->dev, "sendmmsg err=%i\n",
445 							result);
446 					vp->in_error = true;
447 					result = send_len;
448 				}
449 				if (result > 0) {
450 					queue_depth =
451 						consume_vector_skbs(qi, result);
452 					/* This is equivalent to an TX IRQ.
453 					 * Restart the upper layers to feed us
454 					 * more packets.
455 					 */
456 					if (result > vp->estats.tx_queue_max)
457 						vp->estats.tx_queue_max = result;
458 					vp->estats.tx_queue_running_average =
459 						(vp->estats.tx_queue_running_average + result) >> 1;
460 				}
461 				netif_wake_queue(qi->dev);
462 				/* if TX is busy, break out of the send loop,
463 				 *  poll write IRQ will reschedule xmit for us
464 				 */
465 				if (result != send_len) {
466 					vp->estats.tx_restart_queue++;
467 					break;
468 				}
469 			}
470 		}
471 		spin_unlock(&qi->head_lock);
472 	}
473 	return queue_depth;
474 }
475 
476 /* Queue destructor. Deliberately stateless so we can use
477  * it in queue cleanup if initialization fails.
478  */
479 
destroy_queue(struct vector_queue * qi)480 static void destroy_queue(struct vector_queue *qi)
481 {
482 	int i;
483 	struct iovec *iov;
484 	struct vector_private *vp = netdev_priv(qi->dev);
485 	struct mmsghdr *mmsg_vector;
486 
487 	if (qi == NULL)
488 		return;
489 	/* deallocate any skbuffs - we rely on any unused to be
490 	 * set to NULL.
491 	 */
492 	if (qi->skbuff_vector != NULL) {
493 		for (i = 0; i < qi->max_depth; i++) {
494 			if (*(qi->skbuff_vector + i) != NULL)
495 				dev_kfree_skb_any(*(qi->skbuff_vector + i));
496 		}
497 		kfree(qi->skbuff_vector);
498 	}
499 	/* deallocate matching IOV structures including header buffs */
500 	if (qi->mmsg_vector != NULL) {
501 		mmsg_vector = qi->mmsg_vector;
502 		for (i = 0; i < qi->max_depth; i++) {
503 			iov = mmsg_vector->msg_hdr.msg_iov;
504 			if (iov != NULL) {
505 				if ((vp->header_size > 0) &&
506 					(iov->iov_base != NULL))
507 					kfree(iov->iov_base);
508 				kfree(iov);
509 			}
510 			mmsg_vector++;
511 		}
512 		kfree(qi->mmsg_vector);
513 	}
514 	kfree(qi);
515 }
516 
517 /*
518  * Queue constructor. Create a queue with a given side.
519  */
create_queue(struct vector_private * vp,int max_size,int header_size,int num_extra_frags)520 static struct vector_queue *create_queue(
521 	struct vector_private *vp,
522 	int max_size,
523 	int header_size,
524 	int num_extra_frags)
525 {
526 	struct vector_queue *result;
527 	int i;
528 	struct iovec *iov;
529 	struct mmsghdr *mmsg_vector;
530 
531 	result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
532 	if (result == NULL)
533 		return NULL;
534 	result->max_depth = max_size;
535 	result->dev = vp->dev;
536 	result->mmsg_vector = kmalloc(
537 		(sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
538 	if (result->mmsg_vector == NULL)
539 		goto out_mmsg_fail;
540 	result->skbuff_vector = kmalloc(
541 		(sizeof(void *) * max_size), GFP_KERNEL);
542 	if (result->skbuff_vector == NULL)
543 		goto out_skb_fail;
544 
545 	/* further failures can be handled safely by destroy_queue*/
546 
547 	mmsg_vector = result->mmsg_vector;
548 	for (i = 0; i < max_size; i++) {
549 		/* Clear all pointers - we use non-NULL as marking on
550 		 * what to free on destruction
551 		 */
552 		*(result->skbuff_vector + i) = NULL;
553 		mmsg_vector->msg_hdr.msg_iov = NULL;
554 		mmsg_vector++;
555 	}
556 	mmsg_vector = result->mmsg_vector;
557 	result->max_iov_frags = num_extra_frags;
558 	for (i = 0; i < max_size; i++) {
559 		if (vp->header_size > 0)
560 			iov = kmalloc_array(3 + num_extra_frags,
561 					    sizeof(struct iovec),
562 					    GFP_KERNEL
563 			);
564 		else
565 			iov = kmalloc_array(2 + num_extra_frags,
566 					    sizeof(struct iovec),
567 					    GFP_KERNEL
568 			);
569 		if (iov == NULL)
570 			goto out_fail;
571 		mmsg_vector->msg_hdr.msg_iov = iov;
572 		mmsg_vector->msg_hdr.msg_iovlen = 1;
573 		mmsg_vector->msg_hdr.msg_control = NULL;
574 		mmsg_vector->msg_hdr.msg_controllen = 0;
575 		mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
576 		mmsg_vector->msg_hdr.msg_name = NULL;
577 		mmsg_vector->msg_hdr.msg_namelen = 0;
578 		if (vp->header_size > 0) {
579 			iov->iov_base = kmalloc(header_size, GFP_KERNEL);
580 			if (iov->iov_base == NULL)
581 				goto out_fail;
582 			iov->iov_len = header_size;
583 			mmsg_vector->msg_hdr.msg_iovlen = 2;
584 			iov++;
585 		}
586 		iov->iov_base = NULL;
587 		iov->iov_len = 0;
588 		mmsg_vector++;
589 	}
590 	spin_lock_init(&result->head_lock);
591 	spin_lock_init(&result->tail_lock);
592 	result->queue_depth = 0;
593 	result->head = 0;
594 	result->tail = 0;
595 	return result;
596 out_skb_fail:
597 	kfree(result->mmsg_vector);
598 out_mmsg_fail:
599 	kfree(result);
600 	return NULL;
601 out_fail:
602 	destroy_queue(result);
603 	return NULL;
604 }
605 
606 /*
607  * We do not use the RX queue as a proper wraparound queue for now
608  * This is not necessary because the consumption via napi_gro_receive()
609  * happens in-line. While we can try using the return code of
610  * netif_rx() for flow control there are no drivers doing this today.
611  * For this RX specific use we ignore the tail/head locks and
612  * just read into a prepared queue filled with skbuffs.
613  */
614 
prep_skb(struct vector_private * vp,struct user_msghdr * msg)615 static struct sk_buff *prep_skb(
616 	struct vector_private *vp,
617 	struct user_msghdr *msg)
618 {
619 	int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
620 	struct sk_buff *result;
621 	int iov_index = 0, len;
622 	struct iovec *iov = msg->msg_iov;
623 	int err, nr_frags, frag;
624 	skb_frag_t *skb_frag;
625 
626 	if (vp->req_size <= linear)
627 		len = linear;
628 	else
629 		len = vp->req_size;
630 	result = alloc_skb_with_frags(
631 		linear,
632 		len - vp->max_packet,
633 		3,
634 		&err,
635 		GFP_ATOMIC
636 	);
637 	if (vp->header_size > 0)
638 		iov_index++;
639 	if (result == NULL) {
640 		iov[iov_index].iov_base = NULL;
641 		iov[iov_index].iov_len = 0;
642 		goto done;
643 	}
644 	skb_reserve(result, vp->headroom);
645 	result->dev = vp->dev;
646 	skb_put(result, vp->max_packet);
647 	result->data_len = len - vp->max_packet;
648 	result->len += len - vp->max_packet;
649 	skb_reset_mac_header(result);
650 	result->ip_summed = CHECKSUM_NONE;
651 	iov[iov_index].iov_base = result->data;
652 	iov[iov_index].iov_len = vp->max_packet;
653 	iov_index++;
654 
655 	nr_frags = skb_shinfo(result)->nr_frags;
656 	for (frag = 0; frag < nr_frags; frag++) {
657 		skb_frag = &skb_shinfo(result)->frags[frag];
658 		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
659 		if (iov[iov_index].iov_base != NULL)
660 			iov[iov_index].iov_len = skb_frag_size(skb_frag);
661 		else
662 			iov[iov_index].iov_len = 0;
663 		iov_index++;
664 	}
665 done:
666 	msg->msg_iovlen = iov_index;
667 	return result;
668 }
669 
670 
671 /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
672 
prep_queue_for_rx(struct vector_queue * qi)673 static void prep_queue_for_rx(struct vector_queue *qi)
674 {
675 	struct vector_private *vp = netdev_priv(qi->dev);
676 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
677 	void **skbuff_vector = qi->skbuff_vector;
678 	int i;
679 
680 	if (qi->queue_depth == 0)
681 		return;
682 	for (i = 0; i < qi->queue_depth; i++) {
683 		/* it is OK if allocation fails - recvmmsg with NULL data in
684 		 * iov argument still performs an RX, just drops the packet
685 		 * This allows us stop faffing around with a "drop buffer"
686 		 */
687 
688 		*skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
689 		skbuff_vector++;
690 		mmsg_vector++;
691 	}
692 	qi->queue_depth = 0;
693 }
694 
find_device(int n)695 static struct vector_device *find_device(int n)
696 {
697 	struct vector_device *device;
698 	struct list_head *ele;
699 
700 	spin_lock(&vector_devices_lock);
701 	list_for_each(ele, &vector_devices) {
702 		device = list_entry(ele, struct vector_device, list);
703 		if (device->unit == n)
704 			goto out;
705 	}
706 	device = NULL;
707  out:
708 	spin_unlock(&vector_devices_lock);
709 	return device;
710 }
711 
vector_parse(char * str,int * index_out,char ** str_out,char ** error_out)712 static int vector_parse(char *str, int *index_out, char **str_out,
713 			char **error_out)
714 {
715 	int n, err;
716 	char *start = str;
717 
718 	while ((*str != ':') && (strlen(str) > 1))
719 		str++;
720 	if (*str != ':') {
721 		*error_out = "Expected ':' after device number";
722 		return -EINVAL;
723 	}
724 	*str = '\0';
725 
726 	err = kstrtouint(start, 0, &n);
727 	if (err < 0) {
728 		*error_out = "Bad device number";
729 		return err;
730 	}
731 
732 	str++;
733 	if (find_device(n)) {
734 		*error_out = "Device already configured";
735 		return -EINVAL;
736 	}
737 
738 	*index_out = n;
739 	*str_out = str;
740 	return 0;
741 }
742 
vector_config(char * str,char ** error_out)743 static int vector_config(char *str, char **error_out)
744 {
745 	int err, n;
746 	char *params;
747 	struct arglist *parsed;
748 
749 	err = vector_parse(str, &n, &params, error_out);
750 	if (err != 0)
751 		return err;
752 
753 	/* This string is broken up and the pieces used by the underlying
754 	 * driver. We should copy it to make sure things do not go wrong
755 	 * later.
756 	 */
757 
758 	params = kstrdup(params, GFP_KERNEL);
759 	if (params == NULL) {
760 		*error_out = "vector_config failed to strdup string";
761 		return -ENOMEM;
762 	}
763 
764 	parsed = uml_parse_vector_ifspec(params);
765 
766 	if (parsed == NULL) {
767 		*error_out = "vector_config failed to parse parameters";
768 		kfree(params);
769 		return -EINVAL;
770 	}
771 
772 	vector_eth_configure(n, parsed);
773 	return 0;
774 }
775 
vector_id(char ** str,int * start_out,int * end_out)776 static int vector_id(char **str, int *start_out, int *end_out)
777 {
778 	char *end;
779 	int n;
780 
781 	n = simple_strtoul(*str, &end, 0);
782 	if ((*end != '\0') || (end == *str))
783 		return -1;
784 
785 	*start_out = n;
786 	*end_out = n;
787 	*str = end;
788 	return n;
789 }
790 
vector_remove(int n,char ** error_out)791 static int vector_remove(int n, char **error_out)
792 {
793 	struct vector_device *vec_d;
794 	struct net_device *dev;
795 	struct vector_private *vp;
796 
797 	vec_d = find_device(n);
798 	if (vec_d == NULL)
799 		return -ENODEV;
800 	dev = vec_d->dev;
801 	vp = netdev_priv(dev);
802 	if (vp->fds != NULL)
803 		return -EBUSY;
804 	unregister_netdev(dev);
805 	platform_device_unregister(&vec_d->pdev);
806 	return 0;
807 }
808 
809 /*
810  * There is no shared per-transport initialization code, so
811  * we will just initialize each interface one by one and
812  * add them to a list
813  */
814 
815 static struct platform_driver uml_net_driver = {
816 	.driver = {
817 		.name = DRIVER_NAME,
818 	},
819 };
820 
821 
vector_device_release(struct device * dev)822 static void vector_device_release(struct device *dev)
823 {
824 	struct vector_device *device = dev_get_drvdata(dev);
825 	struct net_device *netdev = device->dev;
826 
827 	list_del(&device->list);
828 	kfree(device);
829 	free_netdev(netdev);
830 }
831 
832 /* Bog standard recv using recvmsg - not used normally unless the user
833  * explicitly specifies not to use recvmmsg vector RX.
834  */
835 
vector_legacy_rx(struct vector_private * vp)836 static int vector_legacy_rx(struct vector_private *vp)
837 {
838 	int pkt_len;
839 	struct user_msghdr hdr;
840 	struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
841 	int iovpos = 0;
842 	struct sk_buff *skb;
843 	int header_check;
844 
845 	hdr.msg_name = NULL;
846 	hdr.msg_namelen = 0;
847 	hdr.msg_iov = (struct iovec *) &iov;
848 	hdr.msg_control = NULL;
849 	hdr.msg_controllen = 0;
850 	hdr.msg_flags = 0;
851 
852 	if (vp->header_size > 0) {
853 		iov[0].iov_base = vp->header_rxbuffer;
854 		iov[0].iov_len = vp->header_size;
855 	}
856 
857 	skb = prep_skb(vp, &hdr);
858 
859 	if (skb == NULL) {
860 		/* Read a packet into drop_buffer and don't do
861 		 * anything with it.
862 		 */
863 		iov[iovpos].iov_base = drop_buffer;
864 		iov[iovpos].iov_len = DROP_BUFFER_SIZE;
865 		hdr.msg_iovlen = 1;
866 		vp->dev->stats.rx_dropped++;
867 	}
868 
869 	pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
870 	if (pkt_len < 0) {
871 		vp->in_error = true;
872 		return pkt_len;
873 	}
874 
875 	if (skb != NULL) {
876 		if (pkt_len > vp->header_size) {
877 			if (vp->header_size > 0) {
878 				header_check = vp->verify_header(
879 					vp->header_rxbuffer, skb, vp);
880 				if (header_check < 0) {
881 					dev_kfree_skb_irq(skb);
882 					vp->dev->stats.rx_dropped++;
883 					vp->estats.rx_encaps_errors++;
884 					return 0;
885 				}
886 				if (header_check > 0) {
887 					vp->estats.rx_csum_offload_good++;
888 					skb->ip_summed = CHECKSUM_UNNECESSARY;
889 				}
890 			}
891 			pskb_trim(skb, pkt_len - vp->rx_header_size);
892 			skb->protocol = eth_type_trans(skb, skb->dev);
893 			vp->dev->stats.rx_bytes += skb->len;
894 			vp->dev->stats.rx_packets++;
895 			napi_gro_receive(&vp->napi, skb);
896 		} else {
897 			dev_kfree_skb_irq(skb);
898 		}
899 	}
900 	return pkt_len;
901 }
902 
903 /*
904  * Packet at a time TX which falls back to vector TX if the
905  * underlying transport is busy.
906  */
907 
908 
909 
writev_tx(struct vector_private * vp,struct sk_buff * skb)910 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
911 {
912 	struct iovec iov[3 + MAX_IOV_SIZE];
913 	int iov_count, pkt_len = 0;
914 
915 	iov[0].iov_base = vp->header_txbuffer;
916 	iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
917 
918 	if (iov_count < 1)
919 		goto drop;
920 
921 	pkt_len = uml_vector_writev(
922 		vp->fds->tx_fd,
923 		(struct iovec *) &iov,
924 		iov_count
925 	);
926 
927 	if (pkt_len < 0)
928 		goto drop;
929 
930 	netif_trans_update(vp->dev);
931 	netif_wake_queue(vp->dev);
932 
933 	if (pkt_len > 0) {
934 		vp->dev->stats.tx_bytes += skb->len;
935 		vp->dev->stats.tx_packets++;
936 	} else {
937 		vp->dev->stats.tx_dropped++;
938 	}
939 	consume_skb(skb);
940 	return pkt_len;
941 drop:
942 	vp->dev->stats.tx_dropped++;
943 	consume_skb(skb);
944 	if (pkt_len < 0)
945 		vp->in_error = true;
946 	return pkt_len;
947 }
948 
949 /*
950  * Receive as many messages as we can in one call using the special
951  * mmsg vector matched to an skb vector which we prepared earlier.
952  */
953 
vector_mmsg_rx(struct vector_private * vp,int budget)954 static int vector_mmsg_rx(struct vector_private *vp, int budget)
955 {
956 	int packet_count, i;
957 	struct vector_queue *qi = vp->rx_queue;
958 	struct sk_buff *skb;
959 	struct mmsghdr *mmsg_vector = qi->mmsg_vector;
960 	void **skbuff_vector = qi->skbuff_vector;
961 	int header_check;
962 
963 	/* Refresh the vector and make sure it is with new skbs and the
964 	 * iovs are updated to point to them.
965 	 */
966 
967 	prep_queue_for_rx(qi);
968 
969 	/* Fire the Lazy Gun - get as many packets as we can in one go. */
970 
971 	if (budget > qi->max_depth)
972 		budget = qi->max_depth;
973 
974 	packet_count = uml_vector_recvmmsg(
975 		vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
976 
977 	if (packet_count < 0)
978 		vp->in_error = true;
979 
980 	if (packet_count <= 0)
981 		return packet_count;
982 
983 	/* We treat packet processing as enqueue, buffer refresh as dequeue
984 	 * The queue_depth tells us how many buffers have been used and how
985 	 * many do we need to prep the next time prep_queue_for_rx() is called.
986 	 */
987 
988 	qi->queue_depth = packet_count;
989 
990 	for (i = 0; i < packet_count; i++) {
991 		skb = (*skbuff_vector);
992 		if (mmsg_vector->msg_len > vp->header_size) {
993 			if (vp->header_size > 0) {
994 				header_check = vp->verify_header(
995 					mmsg_vector->msg_hdr.msg_iov->iov_base,
996 					skb,
997 					vp
998 				);
999 				if (header_check < 0) {
1000 				/* Overlay header failed to verify - discard.
1001 				 * We can actually keep this skb and reuse it,
1002 				 * but that will make the prep logic too
1003 				 * complex.
1004 				 */
1005 					dev_kfree_skb_irq(skb);
1006 					vp->estats.rx_encaps_errors++;
1007 					continue;
1008 				}
1009 				if (header_check > 0) {
1010 					vp->estats.rx_csum_offload_good++;
1011 					skb->ip_summed = CHECKSUM_UNNECESSARY;
1012 				}
1013 			}
1014 			pskb_trim(skb,
1015 				mmsg_vector->msg_len - vp->rx_header_size);
1016 			skb->protocol = eth_type_trans(skb, skb->dev);
1017 			/*
1018 			 * We do not need to lock on updating stats here
1019 			 * The interrupt loop is non-reentrant.
1020 			 */
1021 			vp->dev->stats.rx_bytes += skb->len;
1022 			vp->dev->stats.rx_packets++;
1023 			napi_gro_receive(&vp->napi, skb);
1024 		} else {
1025 			/* Overlay header too short to do anything - discard.
1026 			 * We can actually keep this skb and reuse it,
1027 			 * but that will make the prep logic too complex.
1028 			 */
1029 			if (skb != NULL)
1030 				dev_kfree_skb_irq(skb);
1031 		}
1032 		(*skbuff_vector) = NULL;
1033 		/* Move to the next buffer element */
1034 		mmsg_vector++;
1035 		skbuff_vector++;
1036 	}
1037 	if (packet_count > 0) {
1038 		if (vp->estats.rx_queue_max < packet_count)
1039 			vp->estats.rx_queue_max = packet_count;
1040 		vp->estats.rx_queue_running_average =
1041 			(vp->estats.rx_queue_running_average + packet_count) >> 1;
1042 	}
1043 	return packet_count;
1044 }
1045 
vector_net_start_xmit(struct sk_buff * skb,struct net_device * dev)1046 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
1047 {
1048 	struct vector_private *vp = netdev_priv(dev);
1049 	int queue_depth = 0;
1050 
1051 	if (vp->in_error) {
1052 		deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1053 		if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1054 			deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1055 		return NETDEV_TX_BUSY;
1056 	}
1057 
1058 	if ((vp->options & VECTOR_TX) == 0) {
1059 		writev_tx(vp, skb);
1060 		return NETDEV_TX_OK;
1061 	}
1062 
1063 	/* We do BQL only in the vector path, no point doing it in
1064 	 * packet at a time mode as there is no device queue
1065 	 */
1066 
1067 	netdev_sent_queue(vp->dev, skb->len);
1068 	queue_depth = vector_enqueue(vp->tx_queue, skb);
1069 
1070 	if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
1071 		mod_timer(&vp->tl, vp->coalesce);
1072 		return NETDEV_TX_OK;
1073 	} else {
1074 		queue_depth = vector_send(vp->tx_queue);
1075 		if (queue_depth > 0)
1076 			napi_schedule(&vp->napi);
1077 	}
1078 
1079 	return NETDEV_TX_OK;
1080 }
1081 
vector_rx_interrupt(int irq,void * dev_id)1082 static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
1083 {
1084 	struct net_device *dev = dev_id;
1085 	struct vector_private *vp = netdev_priv(dev);
1086 
1087 	if (!netif_running(dev))
1088 		return IRQ_NONE;
1089 	napi_schedule(&vp->napi);
1090 	return IRQ_HANDLED;
1091 
1092 }
1093 
vector_tx_interrupt(int irq,void * dev_id)1094 static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
1095 {
1096 	struct net_device *dev = dev_id;
1097 	struct vector_private *vp = netdev_priv(dev);
1098 
1099 	if (!netif_running(dev))
1100 		return IRQ_NONE;
1101 	/* We need to pay attention to it only if we got
1102 	 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
1103 	 * we ignore it. In the future, it may be worth
1104 	 * it to improve the IRQ controller a bit to make
1105 	 * tweaking the IRQ mask less costly
1106 	 */
1107 
1108 	napi_schedule(&vp->napi);
1109 	return IRQ_HANDLED;
1110 
1111 }
1112 
1113 static int irq_rr;
1114 
vector_net_close(struct net_device * dev)1115 static int vector_net_close(struct net_device *dev)
1116 {
1117 	struct vector_private *vp = netdev_priv(dev);
1118 
1119 	netif_stop_queue(dev);
1120 	del_timer(&vp->tl);
1121 
1122 	vp->opened = false;
1123 
1124 	if (vp->fds == NULL)
1125 		return 0;
1126 
1127 	/* Disable and free all IRQS */
1128 	if (vp->rx_irq > 0) {
1129 		um_free_irq(vp->rx_irq, dev);
1130 		vp->rx_irq = 0;
1131 	}
1132 	if (vp->tx_irq > 0) {
1133 		um_free_irq(vp->tx_irq, dev);
1134 		vp->tx_irq = 0;
1135 	}
1136 	napi_disable(&vp->napi);
1137 	netif_napi_del(&vp->napi);
1138 	if (vp->fds->rx_fd > 0) {
1139 		if (vp->bpf)
1140 			uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1141 		os_close_file(vp->fds->rx_fd);
1142 		vp->fds->rx_fd = -1;
1143 	}
1144 	if (vp->fds->tx_fd > 0) {
1145 		os_close_file(vp->fds->tx_fd);
1146 		vp->fds->tx_fd = -1;
1147 	}
1148 	if (vp->bpf != NULL)
1149 		kfree(vp->bpf->filter);
1150 	kfree(vp->bpf);
1151 	vp->bpf = NULL;
1152 	kfree(vp->fds->remote_addr);
1153 	kfree(vp->transport_data);
1154 	kfree(vp->header_rxbuffer);
1155 	kfree(vp->header_txbuffer);
1156 	if (vp->rx_queue != NULL)
1157 		destroy_queue(vp->rx_queue);
1158 	if (vp->tx_queue != NULL)
1159 		destroy_queue(vp->tx_queue);
1160 	kfree(vp->fds);
1161 	vp->fds = NULL;
1162 	vp->in_error = false;
1163 	return 0;
1164 }
1165 
vector_poll(struct napi_struct * napi,int budget)1166 static int vector_poll(struct napi_struct *napi, int budget)
1167 {
1168 	struct vector_private *vp = container_of(napi, struct vector_private, napi);
1169 	int work_done = 0;
1170 	int err;
1171 	bool tx_enqueued = false;
1172 
1173 	if ((vp->options & VECTOR_TX) != 0)
1174 		tx_enqueued = (vector_send(vp->tx_queue) > 0);
1175 	if ((vp->options & VECTOR_RX) > 0)
1176 		err = vector_mmsg_rx(vp, budget);
1177 	else {
1178 		err = vector_legacy_rx(vp);
1179 		if (err > 0)
1180 			err = 1;
1181 	}
1182 	if (err > 0)
1183 		work_done += err;
1184 
1185 	if (tx_enqueued || err > 0)
1186 		napi_schedule(napi);
1187 	if (work_done < budget)
1188 		napi_complete_done(napi, work_done);
1189 	return work_done;
1190 }
1191 
vector_reset_tx(struct work_struct * work)1192 static void vector_reset_tx(struct work_struct *work)
1193 {
1194 	struct vector_private *vp =
1195 		container_of(work, struct vector_private, reset_tx);
1196 	netdev_reset_queue(vp->dev);
1197 	netif_start_queue(vp->dev);
1198 	netif_wake_queue(vp->dev);
1199 }
1200 
vector_net_open(struct net_device * dev)1201 static int vector_net_open(struct net_device *dev)
1202 {
1203 	struct vector_private *vp = netdev_priv(dev);
1204 	int err = -EINVAL;
1205 	struct vector_device *vdevice;
1206 
1207 	if (vp->opened)
1208 		return -ENXIO;
1209 	vp->opened = true;
1210 
1211 	vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
1212 
1213 	vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1214 
1215 	if (vp->fds == NULL)
1216 		goto out_close;
1217 
1218 	if (build_transport_data(vp) < 0)
1219 		goto out_close;
1220 
1221 	if ((vp->options & VECTOR_RX) > 0) {
1222 		vp->rx_queue = create_queue(
1223 			vp,
1224 			get_depth(vp->parsed),
1225 			vp->rx_header_size,
1226 			MAX_IOV_SIZE
1227 		);
1228 		vp->rx_queue->queue_depth = get_depth(vp->parsed);
1229 	} else {
1230 		vp->header_rxbuffer = kmalloc(
1231 			vp->rx_header_size,
1232 			GFP_KERNEL
1233 		);
1234 		if (vp->header_rxbuffer == NULL)
1235 			goto out_close;
1236 	}
1237 	if ((vp->options & VECTOR_TX) > 0) {
1238 		vp->tx_queue = create_queue(
1239 			vp,
1240 			get_depth(vp->parsed),
1241 			vp->header_size,
1242 			MAX_IOV_SIZE
1243 		);
1244 	} else {
1245 		vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1246 		if (vp->header_txbuffer == NULL)
1247 			goto out_close;
1248 	}
1249 
1250 	netif_napi_add_weight(vp->dev, &vp->napi, vector_poll,
1251 			      get_depth(vp->parsed));
1252 	napi_enable(&vp->napi);
1253 
1254 	/* READ IRQ */
1255 	err = um_request_irq(
1256 		irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1257 			IRQ_READ, vector_rx_interrupt,
1258 			IRQF_SHARED, dev->name, dev);
1259 	if (err < 0) {
1260 		netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
1261 		err = -ENETUNREACH;
1262 		goto out_close;
1263 	}
1264 	vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1265 	dev->irq = irq_rr + VECTOR_BASE_IRQ;
1266 	irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1267 
1268 	/* WRITE IRQ - we need it only if we have vector TX */
1269 	if ((vp->options & VECTOR_TX) > 0) {
1270 		err = um_request_irq(
1271 			irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1272 				IRQ_WRITE, vector_tx_interrupt,
1273 				IRQF_SHARED, dev->name, dev);
1274 		if (err < 0) {
1275 			netdev_err(dev,
1276 				"vector_open: failed to get tx irq(%d)\n", err);
1277 			err = -ENETUNREACH;
1278 			goto out_close;
1279 		}
1280 		vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1281 		irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1282 	}
1283 
1284 	if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1285 		if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1286 			vp->options |= VECTOR_BPF;
1287 	}
1288 	if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
1289 		vp->bpf = uml_vector_default_bpf(dev->dev_addr);
1290 
1291 	if (vp->bpf != NULL)
1292 		uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1293 
1294 	netif_start_queue(dev);
1295 	vector_reset_stats(vp);
1296 
1297 	/* clear buffer - it can happen that the host side of the interface
1298 	 * is full when we get here. In this case, new data is never queued,
1299 	 * SIGIOs never arrive, and the net never works.
1300 	 */
1301 
1302 	napi_schedule(&vp->napi);
1303 
1304 	vdevice = find_device(vp->unit);
1305 	vdevice->opened = 1;
1306 
1307 	if ((vp->options & VECTOR_TX) != 0)
1308 		add_timer(&vp->tl);
1309 	return 0;
1310 out_close:
1311 	vector_net_close(dev);
1312 	return err;
1313 }
1314 
1315 
vector_net_set_multicast_list(struct net_device * dev)1316 static void vector_net_set_multicast_list(struct net_device *dev)
1317 {
1318 	/* TODO: - we can do some BPF games here */
1319 	return;
1320 }
1321 
vector_net_tx_timeout(struct net_device * dev,unsigned int txqueue)1322 static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
1323 {
1324 	struct vector_private *vp = netdev_priv(dev);
1325 
1326 	vp->estats.tx_timeout_count++;
1327 	netif_trans_update(dev);
1328 	schedule_work(&vp->reset_tx);
1329 }
1330 
vector_fix_features(struct net_device * dev,netdev_features_t features)1331 static netdev_features_t vector_fix_features(struct net_device *dev,
1332 	netdev_features_t features)
1333 {
1334 	features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
1335 	return features;
1336 }
1337 
vector_set_features(struct net_device * dev,netdev_features_t features)1338 static int vector_set_features(struct net_device *dev,
1339 	netdev_features_t features)
1340 {
1341 	struct vector_private *vp = netdev_priv(dev);
1342 	/* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
1343 	 * no way to negotiate it on raw sockets, so we can change
1344 	 * only our side.
1345 	 */
1346 	if (features & NETIF_F_GRO)
1347 		/* All new frame buffers will be GRO-sized */
1348 		vp->req_size = 65536;
1349 	else
1350 		/* All new frame buffers will be normal sized */
1351 		vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1352 	return 0;
1353 }
1354 
1355 #ifdef CONFIG_NET_POLL_CONTROLLER
vector_net_poll_controller(struct net_device * dev)1356 static void vector_net_poll_controller(struct net_device *dev)
1357 {
1358 	disable_irq(dev->irq);
1359 	vector_rx_interrupt(dev->irq, dev);
1360 	enable_irq(dev->irq);
1361 }
1362 #endif
1363 
vector_net_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1364 static void vector_net_get_drvinfo(struct net_device *dev,
1365 				struct ethtool_drvinfo *info)
1366 {
1367 	strscpy(info->driver, DRIVER_NAME);
1368 }
1369 
vector_net_load_bpf_flash(struct net_device * dev,struct ethtool_flash * efl)1370 static int vector_net_load_bpf_flash(struct net_device *dev,
1371 				struct ethtool_flash *efl)
1372 {
1373 	struct vector_private *vp = netdev_priv(dev);
1374 	struct vector_device *vdevice;
1375 	const struct firmware *fw;
1376 	int result = 0;
1377 
1378 	if (!(vp->options & VECTOR_BPF_FLASH)) {
1379 		netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
1380 		return -1;
1381 	}
1382 
1383 	if (vp->bpf != NULL) {
1384 		if (vp->opened)
1385 			uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1386 		kfree(vp->bpf->filter);
1387 		vp->bpf->filter = NULL;
1388 	} else {
1389 		vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
1390 		if (vp->bpf == NULL) {
1391 			netdev_err(dev, "failed to allocate memory for firmware\n");
1392 			goto flash_fail;
1393 		}
1394 	}
1395 
1396 	vdevice = find_device(vp->unit);
1397 
1398 	if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
1399 		goto flash_fail;
1400 
1401 	vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
1402 	if (!vp->bpf->filter)
1403 		goto free_buffer;
1404 
1405 	vp->bpf->len = fw->size / sizeof(struct sock_filter);
1406 	release_firmware(fw);
1407 
1408 	if (vp->opened)
1409 		result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1410 
1411 	return result;
1412 
1413 free_buffer:
1414 	release_firmware(fw);
1415 
1416 flash_fail:
1417 	if (vp->bpf != NULL)
1418 		kfree(vp->bpf->filter);
1419 	kfree(vp->bpf);
1420 	vp->bpf = NULL;
1421 	return -1;
1422 }
1423 
vector_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)1424 static void vector_get_ringparam(struct net_device *netdev,
1425 				 struct ethtool_ringparam *ring,
1426 				 struct kernel_ethtool_ringparam *kernel_ring,
1427 				 struct netlink_ext_ack *extack)
1428 {
1429 	struct vector_private *vp = netdev_priv(netdev);
1430 
1431 	ring->rx_max_pending = vp->rx_queue->max_depth;
1432 	ring->tx_max_pending = vp->tx_queue->max_depth;
1433 	ring->rx_pending = vp->rx_queue->max_depth;
1434 	ring->tx_pending = vp->tx_queue->max_depth;
1435 }
1436 
vector_get_strings(struct net_device * dev,u32 stringset,u8 * buf)1437 static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1438 {
1439 	switch (stringset) {
1440 	case ETH_SS_TEST:
1441 		*buf = '\0';
1442 		break;
1443 	case ETH_SS_STATS:
1444 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1445 		break;
1446 	default:
1447 		WARN_ON(1);
1448 		break;
1449 	}
1450 }
1451 
vector_get_sset_count(struct net_device * dev,int sset)1452 static int vector_get_sset_count(struct net_device *dev, int sset)
1453 {
1454 	switch (sset) {
1455 	case ETH_SS_TEST:
1456 		return 0;
1457 	case ETH_SS_STATS:
1458 		return VECTOR_NUM_STATS;
1459 	default:
1460 		return -EOPNOTSUPP;
1461 	}
1462 }
1463 
vector_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)1464 static void vector_get_ethtool_stats(struct net_device *dev,
1465 	struct ethtool_stats *estats,
1466 	u64 *tmp_stats)
1467 {
1468 	struct vector_private *vp = netdev_priv(dev);
1469 
1470 	memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1471 }
1472 
vector_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1473 static int vector_get_coalesce(struct net_device *netdev,
1474 			       struct ethtool_coalesce *ec,
1475 			       struct kernel_ethtool_coalesce *kernel_coal,
1476 			       struct netlink_ext_ack *extack)
1477 {
1478 	struct vector_private *vp = netdev_priv(netdev);
1479 
1480 	ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1481 	return 0;
1482 }
1483 
vector_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1484 static int vector_set_coalesce(struct net_device *netdev,
1485 			       struct ethtool_coalesce *ec,
1486 			       struct kernel_ethtool_coalesce *kernel_coal,
1487 			       struct netlink_ext_ack *extack)
1488 {
1489 	struct vector_private *vp = netdev_priv(netdev);
1490 
1491 	vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1492 	if (vp->coalesce == 0)
1493 		vp->coalesce = 1;
1494 	return 0;
1495 }
1496 
1497 static const struct ethtool_ops vector_net_ethtool_ops = {
1498 	.supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
1499 	.get_drvinfo	= vector_net_get_drvinfo,
1500 	.get_link	= ethtool_op_get_link,
1501 	.get_ts_info	= ethtool_op_get_ts_info,
1502 	.get_ringparam	= vector_get_ringparam,
1503 	.get_strings	= vector_get_strings,
1504 	.get_sset_count	= vector_get_sset_count,
1505 	.get_ethtool_stats = vector_get_ethtool_stats,
1506 	.get_coalesce	= vector_get_coalesce,
1507 	.set_coalesce	= vector_set_coalesce,
1508 	.flash_device	= vector_net_load_bpf_flash,
1509 };
1510 
1511 
1512 static const struct net_device_ops vector_netdev_ops = {
1513 	.ndo_open		= vector_net_open,
1514 	.ndo_stop		= vector_net_close,
1515 	.ndo_start_xmit		= vector_net_start_xmit,
1516 	.ndo_set_rx_mode	= vector_net_set_multicast_list,
1517 	.ndo_tx_timeout		= vector_net_tx_timeout,
1518 	.ndo_set_mac_address	= eth_mac_addr,
1519 	.ndo_validate_addr	= eth_validate_addr,
1520 	.ndo_fix_features	= vector_fix_features,
1521 	.ndo_set_features	= vector_set_features,
1522 #ifdef CONFIG_NET_POLL_CONTROLLER
1523 	.ndo_poll_controller = vector_net_poll_controller,
1524 #endif
1525 };
1526 
vector_timer_expire(struct timer_list * t)1527 static void vector_timer_expire(struct timer_list *t)
1528 {
1529 	struct vector_private *vp = from_timer(vp, t, tl);
1530 
1531 	vp->estats.tx_kicks++;
1532 	napi_schedule(&vp->napi);
1533 }
1534 
1535 
1536 
vector_eth_configure(int n,struct arglist * def)1537 static void vector_eth_configure(
1538 		int n,
1539 		struct arglist *def
1540 	)
1541 {
1542 	struct vector_device *device;
1543 	struct net_device *dev;
1544 	struct vector_private *vp;
1545 	int err;
1546 
1547 	device = kzalloc(sizeof(*device), GFP_KERNEL);
1548 	if (device == NULL) {
1549 		printk(KERN_ERR "eth_configure failed to allocate struct "
1550 				 "vector_device\n");
1551 		return;
1552 	}
1553 	dev = alloc_etherdev(sizeof(struct vector_private));
1554 	if (dev == NULL) {
1555 		printk(KERN_ERR "eth_configure: failed to allocate struct "
1556 				 "net_device for vec%d\n", n);
1557 		goto out_free_device;
1558 	}
1559 
1560 	dev->mtu = get_mtu(def);
1561 
1562 	INIT_LIST_HEAD(&device->list);
1563 	device->unit = n;
1564 
1565 	/* If this name ends up conflicting with an existing registered
1566 	 * netdevice, that is OK, register_netdev{,ice}() will notice this
1567 	 * and fail.
1568 	 */
1569 	snprintf(dev->name, sizeof(dev->name), "vec%d", n);
1570 	uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
1571 	vp = netdev_priv(dev);
1572 
1573 	/* sysfs register */
1574 	if (!driver_registered) {
1575 		platform_driver_register(&uml_net_driver);
1576 		driver_registered = 1;
1577 	}
1578 	device->pdev.id = n;
1579 	device->pdev.name = DRIVER_NAME;
1580 	device->pdev.dev.release = vector_device_release;
1581 	dev_set_drvdata(&device->pdev.dev, device);
1582 	if (platform_device_register(&device->pdev))
1583 		goto out_free_netdev;
1584 	SET_NETDEV_DEV(dev, &device->pdev.dev);
1585 
1586 	device->dev = dev;
1587 
1588 	*vp = ((struct vector_private)
1589 		{
1590 		.list			= LIST_HEAD_INIT(vp->list),
1591 		.dev			= dev,
1592 		.unit			= n,
1593 		.options		= get_transport_options(def),
1594 		.rx_irq			= 0,
1595 		.tx_irq			= 0,
1596 		.parsed			= def,
1597 		.max_packet		= get_mtu(def) + ETH_HEADER_OTHER,
1598 		/* TODO - we need to calculate headroom so that ip header
1599 		 * is 16 byte aligned all the time
1600 		 */
1601 		.headroom		= get_headroom(def),
1602 		.form_header		= NULL,
1603 		.verify_header		= NULL,
1604 		.header_rxbuffer	= NULL,
1605 		.header_txbuffer	= NULL,
1606 		.header_size		= 0,
1607 		.rx_header_size		= 0,
1608 		.rexmit_scheduled	= false,
1609 		.opened			= false,
1610 		.transport_data		= NULL,
1611 		.in_write_poll		= false,
1612 		.coalesce		= 2,
1613 		.req_size		= get_req_size(def),
1614 		.in_error		= false,
1615 		.bpf			= NULL
1616 	});
1617 
1618 	dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
1619 	INIT_WORK(&vp->reset_tx, vector_reset_tx);
1620 
1621 	timer_setup(&vp->tl, vector_timer_expire, 0);
1622 
1623 	/* FIXME */
1624 	dev->netdev_ops = &vector_netdev_ops;
1625 	dev->ethtool_ops = &vector_net_ethtool_ops;
1626 	dev->watchdog_timeo = (HZ >> 1);
1627 	/* primary IRQ - fixme */
1628 	dev->irq = 0; /* we will adjust this once opened */
1629 
1630 	rtnl_lock();
1631 	err = register_netdevice(dev);
1632 	rtnl_unlock();
1633 	if (err)
1634 		goto out_undo_user_init;
1635 
1636 	spin_lock(&vector_devices_lock);
1637 	list_add(&device->list, &vector_devices);
1638 	spin_unlock(&vector_devices_lock);
1639 
1640 	return;
1641 
1642 out_undo_user_init:
1643 	return;
1644 out_free_netdev:
1645 	free_netdev(dev);
1646 out_free_device:
1647 	kfree(device);
1648 }
1649 
1650 
1651 
1652 
1653 /*
1654  * Invoked late in the init
1655  */
1656 
vector_init(void)1657 static int __init vector_init(void)
1658 {
1659 	struct list_head *ele;
1660 	struct vector_cmd_line_arg *def;
1661 	struct arglist *parsed;
1662 
1663 	list_for_each(ele, &vec_cmd_line) {
1664 		def = list_entry(ele, struct vector_cmd_line_arg, list);
1665 		parsed = uml_parse_vector_ifspec(def->arguments);
1666 		if (parsed != NULL)
1667 			vector_eth_configure(def->unit, parsed);
1668 	}
1669 	return 0;
1670 }
1671 
1672 
1673 /* Invoked at initial argument parsing, only stores
1674  * arguments until a proper vector_init is called
1675  * later
1676  */
1677 
vector_setup(char * str)1678 static int __init vector_setup(char *str)
1679 {
1680 	char *error;
1681 	int n, err;
1682 	struct vector_cmd_line_arg *new;
1683 
1684 	err = vector_parse(str, &n, &str, &error);
1685 	if (err) {
1686 		printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
1687 				 str, error);
1688 		return 1;
1689 	}
1690 	new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
1691 	if (!new)
1692 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1693 		      sizeof(*new));
1694 	INIT_LIST_HEAD(&new->list);
1695 	new->unit = n;
1696 	new->arguments = str;
1697 	list_add_tail(&new->list, &vec_cmd_line);
1698 	return 1;
1699 }
1700 
1701 __setup("vec", vector_setup);
1702 __uml_help(vector_setup,
1703 "vec[0-9]+:<option>=<value>,<option>=<value>\n"
1704 "	 Configure a vector io network device.\n\n"
1705 );
1706 
1707 late_initcall(vector_init);
1708 
1709 static struct mc_device vector_mc = {
1710 	.list		= LIST_HEAD_INIT(vector_mc.list),
1711 	.name		= "vec",
1712 	.config		= vector_config,
1713 	.get_config	= NULL,
1714 	.id		= vector_id,
1715 	.remove		= vector_remove,
1716 };
1717 
1718 #ifdef CONFIG_INET
vector_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)1719 static int vector_inetaddr_event(
1720 	struct notifier_block *this,
1721 	unsigned long event,
1722 	void *ptr)
1723 {
1724 	return NOTIFY_DONE;
1725 }
1726 
1727 static struct notifier_block vector_inetaddr_notifier = {
1728 	.notifier_call		= vector_inetaddr_event,
1729 };
1730 
inet_register(void)1731 static void inet_register(void)
1732 {
1733 	register_inetaddr_notifier(&vector_inetaddr_notifier);
1734 }
1735 #else
inet_register(void)1736 static inline void inet_register(void)
1737 {
1738 }
1739 #endif
1740 
vector_net_init(void)1741 static int vector_net_init(void)
1742 {
1743 	mconsole_register_dev(&vector_mc);
1744 	inet_register();
1745 	return 0;
1746 }
1747 
1748 __initcall(vector_net_init);
1749 
1750 
1751 
1752