xref: /linux/drivers/net/veth.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  drivers/net/veth.c
4  *
5  *  Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6  *
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9  *
10  */
11 
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
17 
18 #include <net/rtnetlink.h>
19 #include <net/dst.h>
20 #include <net/xfrm.h>
21 #include <net/xdp.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
29 #include <net/page_pool/helpers.h>
30 
31 #define DRV_NAME	"veth"
32 #define DRV_VERSION	"1.0"
33 
34 #define VETH_XDP_FLAG		BIT(0)
35 #define VETH_RING_SIZE		256
36 #define VETH_XDP_HEADROOM	(XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37 
38 #define VETH_XDP_TX_BULK_SIZE	16
39 #define VETH_XDP_BATCH		16
40 
41 struct veth_stats {
42 	u64	rx_drops;
43 	/* xdp */
44 	u64	xdp_packets;
45 	u64	xdp_bytes;
46 	u64	xdp_redirect;
47 	u64	xdp_drops;
48 	u64	xdp_tx;
49 	u64	xdp_tx_err;
50 	u64	peer_tq_xdp_xmit;
51 	u64	peer_tq_xdp_xmit_err;
52 };
53 
54 struct veth_rq_stats {
55 	struct veth_stats	vs;
56 	struct u64_stats_sync	syncp;
57 };
58 
59 struct veth_rq {
60 	struct napi_struct	xdp_napi;
61 	struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
62 	struct net_device	*dev;
63 	struct bpf_prog __rcu	*xdp_prog;
64 	struct xdp_mem_info	xdp_mem;
65 	struct veth_rq_stats	stats;
66 	bool			rx_notify_masked;
67 	struct ptr_ring		xdp_ring;
68 	struct xdp_rxq_info	xdp_rxq;
69 	struct page_pool	*page_pool;
70 };
71 
72 struct veth_priv {
73 	struct net_device __rcu	*peer;
74 	atomic64_t		dropped;
75 	struct bpf_prog		*_xdp_prog;
76 	struct veth_rq		*rq;
77 	unsigned int		requested_headroom;
78 };
79 
80 struct veth_xdp_tx_bq {
81 	struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
82 	unsigned int count;
83 };
84 
85 /*
86  * ethtool interface
87  */
88 
89 struct veth_q_stat_desc {
90 	char	desc[ETH_GSTRING_LEN];
91 	size_t	offset;
92 };
93 
94 #define VETH_RQ_STAT(m)	offsetof(struct veth_stats, m)
95 
96 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
97 	{ "xdp_packets",	VETH_RQ_STAT(xdp_packets) },
98 	{ "xdp_bytes",		VETH_RQ_STAT(xdp_bytes) },
99 	{ "drops",		VETH_RQ_STAT(rx_drops) },
100 	{ "xdp_redirect",	VETH_RQ_STAT(xdp_redirect) },
101 	{ "xdp_drops",		VETH_RQ_STAT(xdp_drops) },
102 	{ "xdp_tx",		VETH_RQ_STAT(xdp_tx) },
103 	{ "xdp_tx_errors",	VETH_RQ_STAT(xdp_tx_err) },
104 };
105 
106 #define VETH_RQ_STATS_LEN	ARRAY_SIZE(veth_rq_stats_desc)
107 
108 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
109 	{ "xdp_xmit",		VETH_RQ_STAT(peer_tq_xdp_xmit) },
110 	{ "xdp_xmit_errors",	VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
111 };
112 
113 #define VETH_TQ_STATS_LEN	ARRAY_SIZE(veth_tq_stats_desc)
114 
115 static struct {
116 	const char string[ETH_GSTRING_LEN];
117 } ethtool_stats_keys[] = {
118 	{ "peer_ifindex" },
119 };
120 
121 struct veth_xdp_buff {
122 	struct xdp_buff xdp;
123 	struct sk_buff *skb;
124 };
125 
126 static int veth_get_link_ksettings(struct net_device *dev,
127 				   struct ethtool_link_ksettings *cmd)
128 {
129 	cmd->base.speed		= SPEED_10000;
130 	cmd->base.duplex	= DUPLEX_FULL;
131 	cmd->base.port		= PORT_TP;
132 	cmd->base.autoneg	= AUTONEG_DISABLE;
133 	return 0;
134 }
135 
136 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
137 {
138 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
139 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
140 }
141 
142 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
143 {
144 	u8 *p = buf;
145 	int i, j;
146 
147 	switch(stringset) {
148 	case ETH_SS_STATS:
149 		memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
150 		p += sizeof(ethtool_stats_keys);
151 		for (i = 0; i < dev->real_num_rx_queues; i++)
152 			for (j = 0; j < VETH_RQ_STATS_LEN; j++)
153 				ethtool_sprintf(&p, "rx_queue_%u_%.18s",
154 						i, veth_rq_stats_desc[j].desc);
155 
156 		for (i = 0; i < dev->real_num_tx_queues; i++)
157 			for (j = 0; j < VETH_TQ_STATS_LEN; j++)
158 				ethtool_sprintf(&p, "tx_queue_%u_%.18s",
159 						i, veth_tq_stats_desc[j].desc);
160 
161 		page_pool_ethtool_stats_get_strings(p);
162 		break;
163 	}
164 }
165 
166 static int veth_get_sset_count(struct net_device *dev, int sset)
167 {
168 	switch (sset) {
169 	case ETH_SS_STATS:
170 		return ARRAY_SIZE(ethtool_stats_keys) +
171 		       VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
172 		       VETH_TQ_STATS_LEN * dev->real_num_tx_queues +
173 		       page_pool_ethtool_stats_get_count();
174 	default:
175 		return -EOPNOTSUPP;
176 	}
177 }
178 
179 static void veth_get_page_pool_stats(struct net_device *dev, u64 *data)
180 {
181 #ifdef CONFIG_PAGE_POOL_STATS
182 	struct veth_priv *priv = netdev_priv(dev);
183 	struct page_pool_stats pp_stats = {};
184 	int i;
185 
186 	for (i = 0; i < dev->real_num_rx_queues; i++) {
187 		if (!priv->rq[i].page_pool)
188 			continue;
189 		page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
190 	}
191 	page_pool_ethtool_stats_get(data, &pp_stats);
192 #endif /* CONFIG_PAGE_POOL_STATS */
193 }
194 
195 static void veth_get_ethtool_stats(struct net_device *dev,
196 		struct ethtool_stats *stats, u64 *data)
197 {
198 	struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
199 	struct net_device *peer = rtnl_dereference(priv->peer);
200 	int i, j, idx, pp_idx;
201 
202 	data[0] = peer ? peer->ifindex : 0;
203 	idx = 1;
204 	for (i = 0; i < dev->real_num_rx_queues; i++) {
205 		const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
206 		const void *stats_base = (void *)&rq_stats->vs;
207 		unsigned int start;
208 		size_t offset;
209 
210 		do {
211 			start = u64_stats_fetch_begin(&rq_stats->syncp);
212 			for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
213 				offset = veth_rq_stats_desc[j].offset;
214 				data[idx + j] = *(u64 *)(stats_base + offset);
215 			}
216 		} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
217 		idx += VETH_RQ_STATS_LEN;
218 	}
219 	pp_idx = idx;
220 
221 	if (!peer)
222 		goto page_pool_stats;
223 
224 	rcv_priv = netdev_priv(peer);
225 	for (i = 0; i < peer->real_num_rx_queues; i++) {
226 		const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
227 		const void *base = (void *)&rq_stats->vs;
228 		unsigned int start, tx_idx = idx;
229 		size_t offset;
230 
231 		tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
232 		do {
233 			start = u64_stats_fetch_begin(&rq_stats->syncp);
234 			for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
235 				offset = veth_tq_stats_desc[j].offset;
236 				data[tx_idx + j] += *(u64 *)(base + offset);
237 			}
238 		} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
239 	}
240 	pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
241 
242 page_pool_stats:
243 	veth_get_page_pool_stats(dev, &data[pp_idx]);
244 }
245 
246 static void veth_get_channels(struct net_device *dev,
247 			      struct ethtool_channels *channels)
248 {
249 	channels->tx_count = dev->real_num_tx_queues;
250 	channels->rx_count = dev->real_num_rx_queues;
251 	channels->max_tx = dev->num_tx_queues;
252 	channels->max_rx = dev->num_rx_queues;
253 }
254 
255 static int veth_set_channels(struct net_device *dev,
256 			     struct ethtool_channels *ch);
257 
258 static const struct ethtool_ops veth_ethtool_ops = {
259 	.get_drvinfo		= veth_get_drvinfo,
260 	.get_link		= ethtool_op_get_link,
261 	.get_strings		= veth_get_strings,
262 	.get_sset_count		= veth_get_sset_count,
263 	.get_ethtool_stats	= veth_get_ethtool_stats,
264 	.get_link_ksettings	= veth_get_link_ksettings,
265 	.get_ts_info		= ethtool_op_get_ts_info,
266 	.get_channels		= veth_get_channels,
267 	.set_channels		= veth_set_channels,
268 };
269 
270 /* general routines */
271 
272 static bool veth_is_xdp_frame(void *ptr)
273 {
274 	return (unsigned long)ptr & VETH_XDP_FLAG;
275 }
276 
277 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
278 {
279 	return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
280 }
281 
282 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
283 {
284 	return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
285 }
286 
287 static void veth_ptr_free(void *ptr)
288 {
289 	if (veth_is_xdp_frame(ptr))
290 		xdp_return_frame(veth_ptr_to_xdp(ptr));
291 	else
292 		kfree_skb(ptr);
293 }
294 
295 static void __veth_xdp_flush(struct veth_rq *rq)
296 {
297 	/* Write ptr_ring before reading rx_notify_masked */
298 	smp_mb();
299 	if (!READ_ONCE(rq->rx_notify_masked) &&
300 	    napi_schedule_prep(&rq->xdp_napi)) {
301 		WRITE_ONCE(rq->rx_notify_masked, true);
302 		__napi_schedule(&rq->xdp_napi);
303 	}
304 }
305 
306 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
307 {
308 	if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
309 		dev_kfree_skb_any(skb);
310 		return NET_RX_DROP;
311 	}
312 
313 	return NET_RX_SUCCESS;
314 }
315 
316 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
317 			    struct veth_rq *rq, bool xdp)
318 {
319 	return __dev_forward_skb(dev, skb) ?: xdp ?
320 		veth_xdp_rx(rq, skb) :
321 		__netif_rx(skb);
322 }
323 
324 /* return true if the specified skb has chances of GRO aggregation
325  * Don't strive for accuracy, but try to avoid GRO overhead in the most
326  * common scenarios.
327  * When XDP is enabled, all traffic is considered eligible, as the xmit
328  * device has TSO off.
329  * When TSO is enabled on the xmit device, we are likely interested only
330  * in UDP aggregation, explicitly check for that if the skb is suspected
331  * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
332  * to belong to locally generated UDP traffic.
333  */
334 static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
335 					 const struct net_device *rcv,
336 					 const struct sk_buff *skb)
337 {
338 	return !(dev->features & NETIF_F_ALL_TSO) ||
339 		(skb->destructor == sock_wfree &&
340 		 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
341 }
342 
343 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
344 {
345 	struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
346 	struct veth_rq *rq = NULL;
347 	int ret = NETDEV_TX_OK;
348 	struct net_device *rcv;
349 	int length = skb->len;
350 	bool use_napi = false;
351 	int rxq;
352 
353 	rcu_read_lock();
354 	rcv = rcu_dereference(priv->peer);
355 	if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
356 		kfree_skb(skb);
357 		goto drop;
358 	}
359 
360 	rcv_priv = netdev_priv(rcv);
361 	rxq = skb_get_queue_mapping(skb);
362 	if (rxq < rcv->real_num_rx_queues) {
363 		rq = &rcv_priv->rq[rxq];
364 
365 		/* The napi pointer is available when an XDP program is
366 		 * attached or when GRO is enabled
367 		 * Don't bother with napi/GRO if the skb can't be aggregated
368 		 */
369 		use_napi = rcu_access_pointer(rq->napi) &&
370 			   veth_skb_is_eligible_for_gro(dev, rcv, skb);
371 	}
372 
373 	skb_tx_timestamp(skb);
374 	if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
375 		if (!use_napi)
376 			dev_sw_netstats_tx_add(dev, 1, length);
377 		else
378 			__veth_xdp_flush(rq);
379 	} else {
380 drop:
381 		atomic64_inc(&priv->dropped);
382 		ret = NET_XMIT_DROP;
383 	}
384 
385 	rcu_read_unlock();
386 
387 	return ret;
388 }
389 
390 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
391 {
392 	struct veth_priv *priv = netdev_priv(dev);
393 	int i;
394 
395 	result->peer_tq_xdp_xmit_err = 0;
396 	result->xdp_packets = 0;
397 	result->xdp_tx_err = 0;
398 	result->xdp_bytes = 0;
399 	result->rx_drops = 0;
400 	for (i = 0; i < dev->num_rx_queues; i++) {
401 		u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
402 		struct veth_rq_stats *stats = &priv->rq[i].stats;
403 		unsigned int start;
404 
405 		do {
406 			start = u64_stats_fetch_begin(&stats->syncp);
407 			peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
408 			xdp_tx_err = stats->vs.xdp_tx_err;
409 			packets = stats->vs.xdp_packets;
410 			bytes = stats->vs.xdp_bytes;
411 			drops = stats->vs.rx_drops;
412 		} while (u64_stats_fetch_retry(&stats->syncp, start));
413 		result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
414 		result->xdp_tx_err += xdp_tx_err;
415 		result->xdp_packets += packets;
416 		result->xdp_bytes += bytes;
417 		result->rx_drops += drops;
418 	}
419 }
420 
421 static void veth_get_stats64(struct net_device *dev,
422 			     struct rtnl_link_stats64 *tot)
423 {
424 	struct veth_priv *priv = netdev_priv(dev);
425 	struct net_device *peer;
426 	struct veth_stats rx;
427 
428 	tot->tx_dropped = atomic64_read(&priv->dropped);
429 	dev_fetch_sw_netstats(tot, dev->tstats);
430 
431 	veth_stats_rx(&rx, dev);
432 	tot->tx_dropped += rx.xdp_tx_err;
433 	tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
434 	tot->rx_bytes += rx.xdp_bytes;
435 	tot->rx_packets += rx.xdp_packets;
436 
437 	rcu_read_lock();
438 	peer = rcu_dereference(priv->peer);
439 	if (peer) {
440 		struct rtnl_link_stats64 tot_peer = {};
441 
442 		dev_fetch_sw_netstats(&tot_peer, peer->tstats);
443 		tot->rx_bytes += tot_peer.tx_bytes;
444 		tot->rx_packets += tot_peer.tx_packets;
445 
446 		veth_stats_rx(&rx, peer);
447 		tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
448 		tot->rx_dropped += rx.xdp_tx_err;
449 		tot->tx_bytes += rx.xdp_bytes;
450 		tot->tx_packets += rx.xdp_packets;
451 	}
452 	rcu_read_unlock();
453 }
454 
455 /* fake multicast ability */
456 static void veth_set_multicast_list(struct net_device *dev)
457 {
458 }
459 
460 static int veth_select_rxq(struct net_device *dev)
461 {
462 	return smp_processor_id() % dev->real_num_rx_queues;
463 }
464 
465 static struct net_device *veth_peer_dev(struct net_device *dev)
466 {
467 	struct veth_priv *priv = netdev_priv(dev);
468 
469 	/* Callers must be under RCU read side. */
470 	return rcu_dereference(priv->peer);
471 }
472 
473 static int veth_xdp_xmit(struct net_device *dev, int n,
474 			 struct xdp_frame **frames,
475 			 u32 flags, bool ndo_xmit)
476 {
477 	struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
478 	int i, ret = -ENXIO, nxmit = 0;
479 	struct net_device *rcv;
480 	unsigned int max_len;
481 	struct veth_rq *rq;
482 
483 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
484 		return -EINVAL;
485 
486 	rcu_read_lock();
487 	rcv = rcu_dereference(priv->peer);
488 	if (unlikely(!rcv))
489 		goto out;
490 
491 	rcv_priv = netdev_priv(rcv);
492 	rq = &rcv_priv->rq[veth_select_rxq(rcv)];
493 	/* The napi pointer is set if NAPI is enabled, which ensures that
494 	 * xdp_ring is initialized on receive side and the peer device is up.
495 	 */
496 	if (!rcu_access_pointer(rq->napi))
497 		goto out;
498 
499 	max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
500 
501 	spin_lock(&rq->xdp_ring.producer_lock);
502 	for (i = 0; i < n; i++) {
503 		struct xdp_frame *frame = frames[i];
504 		void *ptr = veth_xdp_to_ptr(frame);
505 
506 		if (unlikely(xdp_get_frame_len(frame) > max_len ||
507 			     __ptr_ring_produce(&rq->xdp_ring, ptr)))
508 			break;
509 		nxmit++;
510 	}
511 	spin_unlock(&rq->xdp_ring.producer_lock);
512 
513 	if (flags & XDP_XMIT_FLUSH)
514 		__veth_xdp_flush(rq);
515 
516 	ret = nxmit;
517 	if (ndo_xmit) {
518 		u64_stats_update_begin(&rq->stats.syncp);
519 		rq->stats.vs.peer_tq_xdp_xmit += nxmit;
520 		rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
521 		u64_stats_update_end(&rq->stats.syncp);
522 	}
523 
524 out:
525 	rcu_read_unlock();
526 
527 	return ret;
528 }
529 
530 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
531 			     struct xdp_frame **frames, u32 flags)
532 {
533 	int err;
534 
535 	err = veth_xdp_xmit(dev, n, frames, flags, true);
536 	if (err < 0) {
537 		struct veth_priv *priv = netdev_priv(dev);
538 
539 		atomic64_add(n, &priv->dropped);
540 	}
541 
542 	return err;
543 }
544 
545 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
546 {
547 	int sent, i, err = 0, drops;
548 
549 	sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
550 	if (sent < 0) {
551 		err = sent;
552 		sent = 0;
553 	}
554 
555 	for (i = sent; unlikely(i < bq->count); i++)
556 		xdp_return_frame(bq->q[i]);
557 
558 	drops = bq->count - sent;
559 	trace_xdp_bulk_tx(rq->dev, sent, drops, err);
560 
561 	u64_stats_update_begin(&rq->stats.syncp);
562 	rq->stats.vs.xdp_tx += sent;
563 	rq->stats.vs.xdp_tx_err += drops;
564 	u64_stats_update_end(&rq->stats.syncp);
565 
566 	bq->count = 0;
567 }
568 
569 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
570 {
571 	struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
572 	struct net_device *rcv;
573 	struct veth_rq *rcv_rq;
574 
575 	rcu_read_lock();
576 	veth_xdp_flush_bq(rq, bq);
577 	rcv = rcu_dereference(priv->peer);
578 	if (unlikely(!rcv))
579 		goto out;
580 
581 	rcv_priv = netdev_priv(rcv);
582 	rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
583 	/* xdp_ring is initialized on receive side? */
584 	if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
585 		goto out;
586 
587 	__veth_xdp_flush(rcv_rq);
588 out:
589 	rcu_read_unlock();
590 }
591 
592 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
593 		       struct veth_xdp_tx_bq *bq)
594 {
595 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
596 
597 	if (unlikely(!frame))
598 		return -EOVERFLOW;
599 
600 	if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
601 		veth_xdp_flush_bq(rq, bq);
602 
603 	bq->q[bq->count++] = frame;
604 
605 	return 0;
606 }
607 
608 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
609 					  struct xdp_frame *frame,
610 					  struct veth_xdp_tx_bq *bq,
611 					  struct veth_stats *stats)
612 {
613 	struct xdp_frame orig_frame;
614 	struct bpf_prog *xdp_prog;
615 
616 	rcu_read_lock();
617 	xdp_prog = rcu_dereference(rq->xdp_prog);
618 	if (likely(xdp_prog)) {
619 		struct veth_xdp_buff vxbuf;
620 		struct xdp_buff *xdp = &vxbuf.xdp;
621 		u32 act;
622 
623 		xdp_convert_frame_to_buff(frame, xdp);
624 		xdp->rxq = &rq->xdp_rxq;
625 		vxbuf.skb = NULL;
626 
627 		act = bpf_prog_run_xdp(xdp_prog, xdp);
628 
629 		switch (act) {
630 		case XDP_PASS:
631 			if (xdp_update_frame_from_buff(xdp, frame))
632 				goto err_xdp;
633 			break;
634 		case XDP_TX:
635 			orig_frame = *frame;
636 			xdp->rxq->mem = frame->mem;
637 			if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
638 				trace_xdp_exception(rq->dev, xdp_prog, act);
639 				frame = &orig_frame;
640 				stats->rx_drops++;
641 				goto err_xdp;
642 			}
643 			stats->xdp_tx++;
644 			rcu_read_unlock();
645 			goto xdp_xmit;
646 		case XDP_REDIRECT:
647 			orig_frame = *frame;
648 			xdp->rxq->mem = frame->mem;
649 			if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
650 				frame = &orig_frame;
651 				stats->rx_drops++;
652 				goto err_xdp;
653 			}
654 			stats->xdp_redirect++;
655 			rcu_read_unlock();
656 			goto xdp_xmit;
657 		default:
658 			bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
659 			fallthrough;
660 		case XDP_ABORTED:
661 			trace_xdp_exception(rq->dev, xdp_prog, act);
662 			fallthrough;
663 		case XDP_DROP:
664 			stats->xdp_drops++;
665 			goto err_xdp;
666 		}
667 	}
668 	rcu_read_unlock();
669 
670 	return frame;
671 err_xdp:
672 	rcu_read_unlock();
673 	xdp_return_frame(frame);
674 xdp_xmit:
675 	return NULL;
676 }
677 
678 /* frames array contains VETH_XDP_BATCH at most */
679 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
680 				  int n_xdpf, struct veth_xdp_tx_bq *bq,
681 				  struct veth_stats *stats)
682 {
683 	void *skbs[VETH_XDP_BATCH];
684 	int i;
685 
686 	if (xdp_alloc_skb_bulk(skbs, n_xdpf,
687 			       GFP_ATOMIC | __GFP_ZERO) < 0) {
688 		for (i = 0; i < n_xdpf; i++)
689 			xdp_return_frame(frames[i]);
690 		stats->rx_drops += n_xdpf;
691 
692 		return;
693 	}
694 
695 	for (i = 0; i < n_xdpf; i++) {
696 		struct sk_buff *skb = skbs[i];
697 
698 		skb = __xdp_build_skb_from_frame(frames[i], skb,
699 						 rq->dev);
700 		if (!skb) {
701 			xdp_return_frame(frames[i]);
702 			stats->rx_drops++;
703 			continue;
704 		}
705 		napi_gro_receive(&rq->xdp_napi, skb);
706 	}
707 }
708 
709 static void veth_xdp_get(struct xdp_buff *xdp)
710 {
711 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
712 	int i;
713 
714 	get_page(virt_to_page(xdp->data));
715 	if (likely(!xdp_buff_has_frags(xdp)))
716 		return;
717 
718 	for (i = 0; i < sinfo->nr_frags; i++)
719 		__skb_frag_ref(&sinfo->frags[i]);
720 }
721 
722 static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
723 					struct xdp_buff *xdp,
724 					struct sk_buff **pskb)
725 {
726 	struct sk_buff *skb = *pskb;
727 	u32 frame_sz;
728 
729 	if (skb_shared(skb) || skb_head_is_locked(skb) ||
730 	    skb_shinfo(skb)->nr_frags ||
731 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
732 		if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
733 			goto drop;
734 
735 		skb = *pskb;
736 	}
737 
738 	/* SKB "head" area always have tailroom for skb_shared_info */
739 	frame_sz = skb_end_pointer(skb) - skb->head;
740 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
741 	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
742 	xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
743 			 skb_headlen(skb), true);
744 
745 	if (skb_is_nonlinear(skb)) {
746 		skb_shinfo(skb)->xdp_frags_size = skb->data_len;
747 		xdp_buff_set_frags_flag(xdp);
748 	} else {
749 		xdp_buff_clear_frags_flag(xdp);
750 	}
751 	*pskb = skb;
752 
753 	return 0;
754 drop:
755 	consume_skb(skb);
756 	*pskb = NULL;
757 
758 	return -ENOMEM;
759 }
760 
761 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
762 					struct sk_buff *skb,
763 					struct veth_xdp_tx_bq *bq,
764 					struct veth_stats *stats)
765 {
766 	void *orig_data, *orig_data_end;
767 	struct bpf_prog *xdp_prog;
768 	struct veth_xdp_buff vxbuf;
769 	struct xdp_buff *xdp = &vxbuf.xdp;
770 	u32 act, metalen;
771 	int off;
772 
773 	skb_prepare_for_gro(skb);
774 
775 	rcu_read_lock();
776 	xdp_prog = rcu_dereference(rq->xdp_prog);
777 	if (unlikely(!xdp_prog)) {
778 		rcu_read_unlock();
779 		goto out;
780 	}
781 
782 	__skb_push(skb, skb->data - skb_mac_header(skb));
783 	if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
784 		goto drop;
785 	vxbuf.skb = skb;
786 
787 	orig_data = xdp->data;
788 	orig_data_end = xdp->data_end;
789 
790 	act = bpf_prog_run_xdp(xdp_prog, xdp);
791 
792 	switch (act) {
793 	case XDP_PASS:
794 		break;
795 	case XDP_TX:
796 		veth_xdp_get(xdp);
797 		consume_skb(skb);
798 		xdp->rxq->mem = rq->xdp_mem;
799 		if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
800 			trace_xdp_exception(rq->dev, xdp_prog, act);
801 			stats->rx_drops++;
802 			goto err_xdp;
803 		}
804 		stats->xdp_tx++;
805 		rcu_read_unlock();
806 		goto xdp_xmit;
807 	case XDP_REDIRECT:
808 		veth_xdp_get(xdp);
809 		consume_skb(skb);
810 		xdp->rxq->mem = rq->xdp_mem;
811 		if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
812 			stats->rx_drops++;
813 			goto err_xdp;
814 		}
815 		stats->xdp_redirect++;
816 		rcu_read_unlock();
817 		goto xdp_xmit;
818 	default:
819 		bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
820 		fallthrough;
821 	case XDP_ABORTED:
822 		trace_xdp_exception(rq->dev, xdp_prog, act);
823 		fallthrough;
824 	case XDP_DROP:
825 		stats->xdp_drops++;
826 		goto xdp_drop;
827 	}
828 	rcu_read_unlock();
829 
830 	/* check if bpf_xdp_adjust_head was used */
831 	off = orig_data - xdp->data;
832 	if (off > 0)
833 		__skb_push(skb, off);
834 	else if (off < 0)
835 		__skb_pull(skb, -off);
836 
837 	skb_reset_mac_header(skb);
838 
839 	/* check if bpf_xdp_adjust_tail was used */
840 	off = xdp->data_end - orig_data_end;
841 	if (off != 0)
842 		__skb_put(skb, off); /* positive on grow, negative on shrink */
843 
844 	/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
845 	 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
846 	 */
847 	if (xdp_buff_has_frags(xdp))
848 		skb->data_len = skb_shinfo(skb)->xdp_frags_size;
849 	else
850 		skb->data_len = 0;
851 
852 	skb->protocol = eth_type_trans(skb, rq->dev);
853 
854 	metalen = xdp->data - xdp->data_meta;
855 	if (metalen)
856 		skb_metadata_set(skb, metalen);
857 out:
858 	return skb;
859 drop:
860 	stats->rx_drops++;
861 xdp_drop:
862 	rcu_read_unlock();
863 	kfree_skb(skb);
864 	return NULL;
865 err_xdp:
866 	rcu_read_unlock();
867 	xdp_return_buff(xdp);
868 xdp_xmit:
869 	return NULL;
870 }
871 
872 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
873 			struct veth_xdp_tx_bq *bq,
874 			struct veth_stats *stats)
875 {
876 	int i, done = 0, n_xdpf = 0;
877 	void *xdpf[VETH_XDP_BATCH];
878 
879 	for (i = 0; i < budget; i++) {
880 		void *ptr = __ptr_ring_consume(&rq->xdp_ring);
881 
882 		if (!ptr)
883 			break;
884 
885 		if (veth_is_xdp_frame(ptr)) {
886 			/* ndo_xdp_xmit */
887 			struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
888 
889 			stats->xdp_bytes += xdp_get_frame_len(frame);
890 			frame = veth_xdp_rcv_one(rq, frame, bq, stats);
891 			if (frame) {
892 				/* XDP_PASS */
893 				xdpf[n_xdpf++] = frame;
894 				if (n_xdpf == VETH_XDP_BATCH) {
895 					veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
896 							      bq, stats);
897 					n_xdpf = 0;
898 				}
899 			}
900 		} else {
901 			/* ndo_start_xmit */
902 			struct sk_buff *skb = ptr;
903 
904 			stats->xdp_bytes += skb->len;
905 			skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
906 			if (skb) {
907 				if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
908 					netif_receive_skb(skb);
909 				else
910 					napi_gro_receive(&rq->xdp_napi, skb);
911 			}
912 		}
913 		done++;
914 	}
915 
916 	if (n_xdpf)
917 		veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
918 
919 	u64_stats_update_begin(&rq->stats.syncp);
920 	rq->stats.vs.xdp_redirect += stats->xdp_redirect;
921 	rq->stats.vs.xdp_bytes += stats->xdp_bytes;
922 	rq->stats.vs.xdp_drops += stats->xdp_drops;
923 	rq->stats.vs.rx_drops += stats->rx_drops;
924 	rq->stats.vs.xdp_packets += done;
925 	u64_stats_update_end(&rq->stats.syncp);
926 
927 	return done;
928 }
929 
930 static int veth_poll(struct napi_struct *napi, int budget)
931 {
932 	struct veth_rq *rq =
933 		container_of(napi, struct veth_rq, xdp_napi);
934 	struct veth_stats stats = {};
935 	struct veth_xdp_tx_bq bq;
936 	int done;
937 
938 	bq.count = 0;
939 
940 	xdp_set_return_frame_no_direct();
941 	done = veth_xdp_rcv(rq, budget, &bq, &stats);
942 
943 	if (stats.xdp_redirect > 0)
944 		xdp_do_flush();
945 
946 	if (done < budget && napi_complete_done(napi, done)) {
947 		/* Write rx_notify_masked before reading ptr_ring */
948 		smp_store_mb(rq->rx_notify_masked, false);
949 		if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
950 			if (napi_schedule_prep(&rq->xdp_napi)) {
951 				WRITE_ONCE(rq->rx_notify_masked, true);
952 				__napi_schedule(&rq->xdp_napi);
953 			}
954 		}
955 	}
956 
957 	if (stats.xdp_tx > 0)
958 		veth_xdp_flush(rq, &bq);
959 	xdp_clear_return_frame_no_direct();
960 
961 	return done;
962 }
963 
964 static int veth_create_page_pool(struct veth_rq *rq)
965 {
966 	struct page_pool_params pp_params = {
967 		.order = 0,
968 		.pool_size = VETH_RING_SIZE,
969 		.nid = NUMA_NO_NODE,
970 		.dev = &rq->dev->dev,
971 	};
972 
973 	rq->page_pool = page_pool_create(&pp_params);
974 	if (IS_ERR(rq->page_pool)) {
975 		int err = PTR_ERR(rq->page_pool);
976 
977 		rq->page_pool = NULL;
978 		return err;
979 	}
980 
981 	return 0;
982 }
983 
984 static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
985 {
986 	struct veth_priv *priv = netdev_priv(dev);
987 	int err, i;
988 
989 	for (i = start; i < end; i++) {
990 		err = veth_create_page_pool(&priv->rq[i]);
991 		if (err)
992 			goto err_page_pool;
993 	}
994 
995 	for (i = start; i < end; i++) {
996 		struct veth_rq *rq = &priv->rq[i];
997 
998 		err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
999 		if (err)
1000 			goto err_xdp_ring;
1001 	}
1002 
1003 	for (i = start; i < end; i++) {
1004 		struct veth_rq *rq = &priv->rq[i];
1005 
1006 		napi_enable(&rq->xdp_napi);
1007 		rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1008 	}
1009 
1010 	return 0;
1011 
1012 err_xdp_ring:
1013 	for (i--; i >= start; i--)
1014 		ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
1015 	i = end;
1016 err_page_pool:
1017 	for (i--; i >= start; i--) {
1018 		page_pool_destroy(priv->rq[i].page_pool);
1019 		priv->rq[i].page_pool = NULL;
1020 	}
1021 
1022 	return err;
1023 }
1024 
1025 static int __veth_napi_enable(struct net_device *dev)
1026 {
1027 	return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1028 }
1029 
1030 static void veth_napi_del_range(struct net_device *dev, int start, int end)
1031 {
1032 	struct veth_priv *priv = netdev_priv(dev);
1033 	int i;
1034 
1035 	for (i = start; i < end; i++) {
1036 		struct veth_rq *rq = &priv->rq[i];
1037 
1038 		rcu_assign_pointer(priv->rq[i].napi, NULL);
1039 		napi_disable(&rq->xdp_napi);
1040 		__netif_napi_del(&rq->xdp_napi);
1041 	}
1042 	synchronize_net();
1043 
1044 	for (i = start; i < end; i++) {
1045 		struct veth_rq *rq = &priv->rq[i];
1046 
1047 		rq->rx_notify_masked = false;
1048 		ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
1049 	}
1050 
1051 	for (i = start; i < end; i++) {
1052 		page_pool_destroy(priv->rq[i].page_pool);
1053 		priv->rq[i].page_pool = NULL;
1054 	}
1055 }
1056 
1057 static void veth_napi_del(struct net_device *dev)
1058 {
1059 	veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
1060 }
1061 
1062 static bool veth_gro_requested(const struct net_device *dev)
1063 {
1064 	return !!(dev->wanted_features & NETIF_F_GRO);
1065 }
1066 
1067 static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1068 				 bool napi_already_on)
1069 {
1070 	struct veth_priv *priv = netdev_priv(dev);
1071 	int err, i;
1072 
1073 	for (i = start; i < end; i++) {
1074 		struct veth_rq *rq = &priv->rq[i];
1075 
1076 		if (!napi_already_on)
1077 			netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1078 		err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1079 		if (err < 0)
1080 			goto err_rxq_reg;
1081 
1082 		err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1083 						 MEM_TYPE_PAGE_SHARED,
1084 						 NULL);
1085 		if (err < 0)
1086 			goto err_reg_mem;
1087 
1088 		/* Save original mem info as it can be overwritten */
1089 		rq->xdp_mem = rq->xdp_rxq.mem;
1090 	}
1091 	return 0;
1092 
1093 err_reg_mem:
1094 	xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1095 err_rxq_reg:
1096 	for (i--; i >= start; i--) {
1097 		struct veth_rq *rq = &priv->rq[i];
1098 
1099 		xdp_rxq_info_unreg(&rq->xdp_rxq);
1100 		if (!napi_already_on)
1101 			netif_napi_del(&rq->xdp_napi);
1102 	}
1103 
1104 	return err;
1105 }
1106 
1107 static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1108 				   bool delete_napi)
1109 {
1110 	struct veth_priv *priv = netdev_priv(dev);
1111 	int i;
1112 
1113 	for (i = start; i < end; i++) {
1114 		struct veth_rq *rq = &priv->rq[i];
1115 
1116 		rq->xdp_rxq.mem = rq->xdp_mem;
1117 		xdp_rxq_info_unreg(&rq->xdp_rxq);
1118 
1119 		if (delete_napi)
1120 			netif_napi_del(&rq->xdp_napi);
1121 	}
1122 }
1123 
1124 static int veth_enable_xdp(struct net_device *dev)
1125 {
1126 	bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1127 	struct veth_priv *priv = netdev_priv(dev);
1128 	int err, i;
1129 
1130 	if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1131 		err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1132 		if (err)
1133 			return err;
1134 
1135 		if (!napi_already_on) {
1136 			err = __veth_napi_enable(dev);
1137 			if (err) {
1138 				veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1139 				return err;
1140 			}
1141 		}
1142 	}
1143 
1144 	for (i = 0; i < dev->real_num_rx_queues; i++) {
1145 		rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1146 		rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 static void veth_disable_xdp(struct net_device *dev)
1153 {
1154 	struct veth_priv *priv = netdev_priv(dev);
1155 	int i;
1156 
1157 	for (i = 0; i < dev->real_num_rx_queues; i++)
1158 		rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1159 
1160 	if (!netif_running(dev) || !veth_gro_requested(dev))
1161 		veth_napi_del(dev);
1162 
1163 	veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
1164 }
1165 
1166 static int veth_napi_enable_range(struct net_device *dev, int start, int end)
1167 {
1168 	struct veth_priv *priv = netdev_priv(dev);
1169 	int err, i;
1170 
1171 	for (i = start; i < end; i++) {
1172 		struct veth_rq *rq = &priv->rq[i];
1173 
1174 		netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1175 	}
1176 
1177 	err = __veth_napi_enable_range(dev, start, end);
1178 	if (err) {
1179 		for (i = start; i < end; i++) {
1180 			struct veth_rq *rq = &priv->rq[i];
1181 
1182 			netif_napi_del(&rq->xdp_napi);
1183 		}
1184 		return err;
1185 	}
1186 	return err;
1187 }
1188 
1189 static int veth_napi_enable(struct net_device *dev)
1190 {
1191 	return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1192 }
1193 
1194 static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1195 {
1196 	struct veth_priv *priv = netdev_priv(dev);
1197 
1198 	if (start >= end)
1199 		return;
1200 
1201 	if (priv->_xdp_prog) {
1202 		veth_napi_del_range(dev, start, end);
1203 		veth_disable_xdp_range(dev, start, end, false);
1204 	} else if (veth_gro_requested(dev)) {
1205 		veth_napi_del_range(dev, start, end);
1206 	}
1207 }
1208 
1209 static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1210 {
1211 	struct veth_priv *priv = netdev_priv(dev);
1212 	int err;
1213 
1214 	if (start >= end)
1215 		return 0;
1216 
1217 	if (priv->_xdp_prog) {
1218 		/* these channels are freshly initialized, napi is not on there even
1219 		 * when GRO is requeste
1220 		 */
1221 		err = veth_enable_xdp_range(dev, start, end, false);
1222 		if (err)
1223 			return err;
1224 
1225 		err = __veth_napi_enable_range(dev, start, end);
1226 		if (err) {
1227 			/* on error always delete the newly added napis */
1228 			veth_disable_xdp_range(dev, start, end, true);
1229 			return err;
1230 		}
1231 	} else if (veth_gro_requested(dev)) {
1232 		return veth_napi_enable_range(dev, start, end);
1233 	}
1234 	return 0;
1235 }
1236 
1237 static void veth_set_xdp_features(struct net_device *dev)
1238 {
1239 	struct veth_priv *priv = netdev_priv(dev);
1240 	struct net_device *peer;
1241 
1242 	peer = rtnl_dereference(priv->peer);
1243 	if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
1244 		struct veth_priv *priv_peer = netdev_priv(peer);
1245 		xdp_features_t val = NETDEV_XDP_ACT_BASIC |
1246 				     NETDEV_XDP_ACT_REDIRECT |
1247 				     NETDEV_XDP_ACT_RX_SG;
1248 
1249 		if (priv_peer->_xdp_prog || veth_gro_requested(peer))
1250 			val |= NETDEV_XDP_ACT_NDO_XMIT |
1251 			       NETDEV_XDP_ACT_NDO_XMIT_SG;
1252 		xdp_set_features_flag(dev, val);
1253 	} else {
1254 		xdp_clear_features_flag(dev);
1255 	}
1256 }
1257 
1258 static int veth_set_channels(struct net_device *dev,
1259 			     struct ethtool_channels *ch)
1260 {
1261 	struct veth_priv *priv = netdev_priv(dev);
1262 	unsigned int old_rx_count, new_rx_count;
1263 	struct veth_priv *peer_priv;
1264 	struct net_device *peer;
1265 	int err;
1266 
1267 	/* sanity check. Upper bounds are already enforced by the caller */
1268 	if (!ch->rx_count || !ch->tx_count)
1269 		return -EINVAL;
1270 
1271 	/* avoid braking XDP, if that is enabled */
1272 	peer = rtnl_dereference(priv->peer);
1273 	peer_priv = peer ? netdev_priv(peer) : NULL;
1274 	if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1275 		return -EINVAL;
1276 
1277 	if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1278 		return -EINVAL;
1279 
1280 	old_rx_count = dev->real_num_rx_queues;
1281 	new_rx_count = ch->rx_count;
1282 	if (netif_running(dev)) {
1283 		/* turn device off */
1284 		netif_carrier_off(dev);
1285 		if (peer)
1286 			netif_carrier_off(peer);
1287 
1288 		/* try to allocate new resurces, as needed*/
1289 		err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1290 		if (err)
1291 			goto out;
1292 	}
1293 
1294 	err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1295 	if (err)
1296 		goto revert;
1297 
1298 	err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1299 	if (err) {
1300 		int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1301 
1302 		/* this error condition could happen only if rx and tx change
1303 		 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1304 		 * and we can't do anything to fully restore the original
1305 		 * status
1306 		 */
1307 		if (err2)
1308 			pr_warn("Can't restore rx queues config %d -> %d %d",
1309 				new_rx_count, old_rx_count, err2);
1310 		else
1311 			goto revert;
1312 	}
1313 
1314 out:
1315 	if (netif_running(dev)) {
1316 		/* note that we need to swap the arguments WRT the enable part
1317 		 * to identify the range we have to disable
1318 		 */
1319 		veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1320 		netif_carrier_on(dev);
1321 		if (peer)
1322 			netif_carrier_on(peer);
1323 	}
1324 
1325 	/* update XDP supported features */
1326 	veth_set_xdp_features(dev);
1327 	if (peer)
1328 		veth_set_xdp_features(peer);
1329 
1330 	return err;
1331 
1332 revert:
1333 	new_rx_count = old_rx_count;
1334 	old_rx_count = ch->rx_count;
1335 	goto out;
1336 }
1337 
1338 static int veth_open(struct net_device *dev)
1339 {
1340 	struct veth_priv *priv = netdev_priv(dev);
1341 	struct net_device *peer = rtnl_dereference(priv->peer);
1342 	int err;
1343 
1344 	if (!peer)
1345 		return -ENOTCONN;
1346 
1347 	if (priv->_xdp_prog) {
1348 		err = veth_enable_xdp(dev);
1349 		if (err)
1350 			return err;
1351 	} else if (veth_gro_requested(dev)) {
1352 		err = veth_napi_enable(dev);
1353 		if (err)
1354 			return err;
1355 	}
1356 
1357 	if (peer->flags & IFF_UP) {
1358 		netif_carrier_on(dev);
1359 		netif_carrier_on(peer);
1360 	}
1361 
1362 	veth_set_xdp_features(dev);
1363 
1364 	return 0;
1365 }
1366 
1367 static int veth_close(struct net_device *dev)
1368 {
1369 	struct veth_priv *priv = netdev_priv(dev);
1370 	struct net_device *peer = rtnl_dereference(priv->peer);
1371 
1372 	netif_carrier_off(dev);
1373 	if (peer)
1374 		netif_carrier_off(peer);
1375 
1376 	if (priv->_xdp_prog)
1377 		veth_disable_xdp(dev);
1378 	else if (veth_gro_requested(dev))
1379 		veth_napi_del(dev);
1380 
1381 	return 0;
1382 }
1383 
1384 static int is_valid_veth_mtu(int mtu)
1385 {
1386 	return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1387 }
1388 
1389 static int veth_alloc_queues(struct net_device *dev)
1390 {
1391 	struct veth_priv *priv = netdev_priv(dev);
1392 	int i;
1393 
1394 	priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
1395 			    GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
1396 	if (!priv->rq)
1397 		return -ENOMEM;
1398 
1399 	for (i = 0; i < dev->num_rx_queues; i++) {
1400 		priv->rq[i].dev = dev;
1401 		u64_stats_init(&priv->rq[i].stats.syncp);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void veth_free_queues(struct net_device *dev)
1408 {
1409 	struct veth_priv *priv = netdev_priv(dev);
1410 
1411 	kvfree(priv->rq);
1412 }
1413 
1414 static int veth_dev_init(struct net_device *dev)
1415 {
1416 	netdev_lockdep_set_classes(dev);
1417 	return veth_alloc_queues(dev);
1418 }
1419 
1420 static void veth_dev_free(struct net_device *dev)
1421 {
1422 	veth_free_queues(dev);
1423 }
1424 
1425 #ifdef CONFIG_NET_POLL_CONTROLLER
1426 static void veth_poll_controller(struct net_device *dev)
1427 {
1428 	/* veth only receives frames when its peer sends one
1429 	 * Since it has nothing to do with disabling irqs, we are guaranteed
1430 	 * never to have pending data when we poll for it so
1431 	 * there is nothing to do here.
1432 	 *
1433 	 * We need this though so netpoll recognizes us as an interface that
1434 	 * supports polling, which enables bridge devices in virt setups to
1435 	 * still use netconsole
1436 	 */
1437 }
1438 #endif	/* CONFIG_NET_POLL_CONTROLLER */
1439 
1440 static int veth_get_iflink(const struct net_device *dev)
1441 {
1442 	struct veth_priv *priv = netdev_priv(dev);
1443 	struct net_device *peer;
1444 	int iflink;
1445 
1446 	rcu_read_lock();
1447 	peer = rcu_dereference(priv->peer);
1448 	iflink = peer ? READ_ONCE(peer->ifindex) : 0;
1449 	rcu_read_unlock();
1450 
1451 	return iflink;
1452 }
1453 
1454 static netdev_features_t veth_fix_features(struct net_device *dev,
1455 					   netdev_features_t features)
1456 {
1457 	struct veth_priv *priv = netdev_priv(dev);
1458 	struct net_device *peer;
1459 
1460 	peer = rtnl_dereference(priv->peer);
1461 	if (peer) {
1462 		struct veth_priv *peer_priv = netdev_priv(peer);
1463 
1464 		if (peer_priv->_xdp_prog)
1465 			features &= ~NETIF_F_GSO_SOFTWARE;
1466 	}
1467 	if (priv->_xdp_prog)
1468 		features |= NETIF_F_GRO;
1469 
1470 	return features;
1471 }
1472 
1473 static int veth_set_features(struct net_device *dev,
1474 			     netdev_features_t features)
1475 {
1476 	netdev_features_t changed = features ^ dev->features;
1477 	struct veth_priv *priv = netdev_priv(dev);
1478 	struct net_device *peer;
1479 	int err;
1480 
1481 	if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1482 		return 0;
1483 
1484 	peer = rtnl_dereference(priv->peer);
1485 	if (features & NETIF_F_GRO) {
1486 		err = veth_napi_enable(dev);
1487 		if (err)
1488 			return err;
1489 
1490 		if (peer)
1491 			xdp_features_set_redirect_target(peer, true);
1492 	} else {
1493 		if (peer)
1494 			xdp_features_clear_redirect_target(peer);
1495 		veth_napi_del(dev);
1496 	}
1497 	return 0;
1498 }
1499 
1500 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1501 {
1502 	struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1503 	struct net_device *peer;
1504 
1505 	if (new_hr < 0)
1506 		new_hr = 0;
1507 
1508 	rcu_read_lock();
1509 	peer = rcu_dereference(priv->peer);
1510 	if (unlikely(!peer))
1511 		goto out;
1512 
1513 	peer_priv = netdev_priv(peer);
1514 	priv->requested_headroom = new_hr;
1515 	new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1516 	dev->needed_headroom = new_hr;
1517 	peer->needed_headroom = new_hr;
1518 
1519 out:
1520 	rcu_read_unlock();
1521 }
1522 
1523 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1524 			struct netlink_ext_ack *extack)
1525 {
1526 	struct veth_priv *priv = netdev_priv(dev);
1527 	struct bpf_prog *old_prog;
1528 	struct net_device *peer;
1529 	unsigned int max_mtu;
1530 	int err;
1531 
1532 	old_prog = priv->_xdp_prog;
1533 	priv->_xdp_prog = prog;
1534 	peer = rtnl_dereference(priv->peer);
1535 
1536 	if (prog) {
1537 		if (!peer) {
1538 			NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1539 			err = -ENOTCONN;
1540 			goto err;
1541 		}
1542 
1543 		max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
1544 			  peer->hard_header_len;
1545 		/* Allow increasing the max_mtu if the program supports
1546 		 * XDP fragments.
1547 		 */
1548 		if (prog->aux->xdp_has_frags)
1549 			max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
1550 
1551 		if (peer->mtu > max_mtu) {
1552 			NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1553 			err = -ERANGE;
1554 			goto err;
1555 		}
1556 
1557 		if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1558 			NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1559 			err = -ENOSPC;
1560 			goto err;
1561 		}
1562 
1563 		if (dev->flags & IFF_UP) {
1564 			err = veth_enable_xdp(dev);
1565 			if (err) {
1566 				NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1567 				goto err;
1568 			}
1569 		}
1570 
1571 		if (!old_prog) {
1572 			if (!veth_gro_requested(dev)) {
1573 				/* user-space did not require GRO, but adding
1574 				 * XDP is supposed to get GRO working
1575 				 */
1576 				dev->features |= NETIF_F_GRO;
1577 				netdev_features_change(dev);
1578 			}
1579 
1580 			peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1581 			peer->max_mtu = max_mtu;
1582 		}
1583 
1584 		xdp_features_set_redirect_target(peer, true);
1585 	}
1586 
1587 	if (old_prog) {
1588 		if (!prog) {
1589 			if (peer && !veth_gro_requested(dev))
1590 				xdp_features_clear_redirect_target(peer);
1591 
1592 			if (dev->flags & IFF_UP)
1593 				veth_disable_xdp(dev);
1594 
1595 			/* if user-space did not require GRO, since adding XDP
1596 			 * enabled it, clear it now
1597 			 */
1598 			if (!veth_gro_requested(dev)) {
1599 				dev->features &= ~NETIF_F_GRO;
1600 				netdev_features_change(dev);
1601 			}
1602 
1603 			if (peer) {
1604 				peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1605 				peer->max_mtu = ETH_MAX_MTU;
1606 			}
1607 		}
1608 		bpf_prog_put(old_prog);
1609 	}
1610 
1611 	if ((!!old_prog ^ !!prog) && peer)
1612 		netdev_update_features(peer);
1613 
1614 	return 0;
1615 err:
1616 	priv->_xdp_prog = old_prog;
1617 
1618 	return err;
1619 }
1620 
1621 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1622 {
1623 	switch (xdp->command) {
1624 	case XDP_SETUP_PROG:
1625 		return veth_xdp_set(dev, xdp->prog, xdp->extack);
1626 	default:
1627 		return -EINVAL;
1628 	}
1629 }
1630 
1631 static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
1632 {
1633 	struct veth_xdp_buff *_ctx = (void *)ctx;
1634 
1635 	if (!_ctx->skb)
1636 		return -ENODATA;
1637 
1638 	*timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
1639 	return 0;
1640 }
1641 
1642 static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
1643 			    enum xdp_rss_hash_type *rss_type)
1644 {
1645 	struct veth_xdp_buff *_ctx = (void *)ctx;
1646 	struct sk_buff *skb = _ctx->skb;
1647 
1648 	if (!skb)
1649 		return -ENODATA;
1650 
1651 	*hash = skb_get_hash(skb);
1652 	*rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
1653 
1654 	return 0;
1655 }
1656 
1657 static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
1658 				u16 *vlan_tci)
1659 {
1660 	const struct veth_xdp_buff *_ctx = (void *)ctx;
1661 	const struct sk_buff *skb = _ctx->skb;
1662 	int err;
1663 
1664 	if (!skb)
1665 		return -ENODATA;
1666 
1667 	err = __vlan_hwaccel_get_tag(skb, vlan_tci);
1668 	if (err)
1669 		return err;
1670 
1671 	*vlan_proto = skb->vlan_proto;
1672 	return err;
1673 }
1674 
1675 static const struct net_device_ops veth_netdev_ops = {
1676 	.ndo_init            = veth_dev_init,
1677 	.ndo_open            = veth_open,
1678 	.ndo_stop            = veth_close,
1679 	.ndo_start_xmit      = veth_xmit,
1680 	.ndo_get_stats64     = veth_get_stats64,
1681 	.ndo_set_rx_mode     = veth_set_multicast_list,
1682 	.ndo_set_mac_address = eth_mac_addr,
1683 #ifdef CONFIG_NET_POLL_CONTROLLER
1684 	.ndo_poll_controller	= veth_poll_controller,
1685 #endif
1686 	.ndo_get_iflink		= veth_get_iflink,
1687 	.ndo_fix_features	= veth_fix_features,
1688 	.ndo_set_features	= veth_set_features,
1689 	.ndo_features_check	= passthru_features_check,
1690 	.ndo_set_rx_headroom	= veth_set_rx_headroom,
1691 	.ndo_bpf		= veth_xdp,
1692 	.ndo_xdp_xmit		= veth_ndo_xdp_xmit,
1693 	.ndo_get_peer_dev	= veth_peer_dev,
1694 };
1695 
1696 static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
1697 	.xmo_rx_timestamp		= veth_xdp_rx_timestamp,
1698 	.xmo_rx_hash			= veth_xdp_rx_hash,
1699 	.xmo_rx_vlan_tag		= veth_xdp_rx_vlan_tag,
1700 };
1701 
1702 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1703 		       NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1704 		       NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1705 		       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1706 		       NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1707 
1708 static void veth_setup(struct net_device *dev)
1709 {
1710 	ether_setup(dev);
1711 
1712 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1713 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1714 	dev->priv_flags |= IFF_NO_QUEUE;
1715 	dev->priv_flags |= IFF_PHONY_HEADROOM;
1716 
1717 	dev->netdev_ops = &veth_netdev_ops;
1718 	dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
1719 	dev->ethtool_ops = &veth_ethtool_ops;
1720 	dev->features |= NETIF_F_LLTX;
1721 	dev->features |= VETH_FEATURES;
1722 	dev->vlan_features = dev->features &
1723 			     ~(NETIF_F_HW_VLAN_CTAG_TX |
1724 			       NETIF_F_HW_VLAN_STAG_TX |
1725 			       NETIF_F_HW_VLAN_CTAG_RX |
1726 			       NETIF_F_HW_VLAN_STAG_RX);
1727 	dev->needs_free_netdev = true;
1728 	dev->priv_destructor = veth_dev_free;
1729 	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1730 	dev->max_mtu = ETH_MAX_MTU;
1731 
1732 	dev->hw_features = VETH_FEATURES;
1733 	dev->hw_enc_features = VETH_FEATURES;
1734 	dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1735 	netif_set_tso_max_size(dev, GSO_MAX_SIZE);
1736 }
1737 
1738 /*
1739  * netlink interface
1740  */
1741 
1742 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1743 			 struct netlink_ext_ack *extack)
1744 {
1745 	if (tb[IFLA_ADDRESS]) {
1746 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1747 			return -EINVAL;
1748 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1749 			return -EADDRNOTAVAIL;
1750 	}
1751 	if (tb[IFLA_MTU]) {
1752 		if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1753 			return -EINVAL;
1754 	}
1755 	return 0;
1756 }
1757 
1758 static struct rtnl_link_ops veth_link_ops;
1759 
1760 static void veth_disable_gro(struct net_device *dev)
1761 {
1762 	dev->features &= ~NETIF_F_GRO;
1763 	dev->wanted_features &= ~NETIF_F_GRO;
1764 	netdev_update_features(dev);
1765 }
1766 
1767 static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1768 {
1769 	int err;
1770 
1771 	if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1772 		err = netif_set_real_num_tx_queues(dev, 1);
1773 		if (err)
1774 			return err;
1775 	}
1776 	if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1777 		err = netif_set_real_num_rx_queues(dev, 1);
1778 		if (err)
1779 			return err;
1780 	}
1781 	return 0;
1782 }
1783 
1784 static int veth_newlink(struct net *src_net, struct net_device *dev,
1785 			struct nlattr *tb[], struct nlattr *data[],
1786 			struct netlink_ext_ack *extack)
1787 {
1788 	int err;
1789 	struct net_device *peer;
1790 	struct veth_priv *priv;
1791 	char ifname[IFNAMSIZ];
1792 	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1793 	unsigned char name_assign_type;
1794 	struct ifinfomsg *ifmp;
1795 	struct net *net;
1796 
1797 	/*
1798 	 * create and register peer first
1799 	 */
1800 	if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1801 		struct nlattr *nla_peer;
1802 
1803 		nla_peer = data[VETH_INFO_PEER];
1804 		ifmp = nla_data(nla_peer);
1805 		err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
1806 		if (err < 0)
1807 			return err;
1808 
1809 		err = veth_validate(peer_tb, NULL, extack);
1810 		if (err < 0)
1811 			return err;
1812 
1813 		tbp = peer_tb;
1814 	} else {
1815 		ifmp = NULL;
1816 		tbp = tb;
1817 	}
1818 
1819 	if (ifmp && tbp[IFLA_IFNAME]) {
1820 		nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1821 		name_assign_type = NET_NAME_USER;
1822 	} else {
1823 		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1824 		name_assign_type = NET_NAME_ENUM;
1825 	}
1826 
1827 	net = rtnl_link_get_net(src_net, tbp);
1828 	if (IS_ERR(net))
1829 		return PTR_ERR(net);
1830 
1831 	peer = rtnl_create_link(net, ifname, name_assign_type,
1832 				&veth_link_ops, tbp, extack);
1833 	if (IS_ERR(peer)) {
1834 		put_net(net);
1835 		return PTR_ERR(peer);
1836 	}
1837 
1838 	if (!ifmp || !tbp[IFLA_ADDRESS])
1839 		eth_hw_addr_random(peer);
1840 
1841 	if (ifmp && (dev->ifindex != 0))
1842 		peer->ifindex = ifmp->ifi_index;
1843 
1844 	netif_inherit_tso_max(peer, dev);
1845 
1846 	err = register_netdevice(peer);
1847 	put_net(net);
1848 	net = NULL;
1849 	if (err < 0)
1850 		goto err_register_peer;
1851 
1852 	/* keep GRO disabled by default to be consistent with the established
1853 	 * veth behavior
1854 	 */
1855 	veth_disable_gro(peer);
1856 	netif_carrier_off(peer);
1857 
1858 	err = rtnl_configure_link(peer, ifmp, 0, NULL);
1859 	if (err < 0)
1860 		goto err_configure_peer;
1861 
1862 	/*
1863 	 * register dev last
1864 	 *
1865 	 * note, that since we've registered new device the dev's name
1866 	 * should be re-allocated
1867 	 */
1868 
1869 	if (tb[IFLA_ADDRESS] == NULL)
1870 		eth_hw_addr_random(dev);
1871 
1872 	if (tb[IFLA_IFNAME])
1873 		nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1874 	else
1875 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1876 
1877 	err = register_netdevice(dev);
1878 	if (err < 0)
1879 		goto err_register_dev;
1880 
1881 	netif_carrier_off(dev);
1882 
1883 	/*
1884 	 * tie the deviced together
1885 	 */
1886 
1887 	priv = netdev_priv(dev);
1888 	rcu_assign_pointer(priv->peer, peer);
1889 	err = veth_init_queues(dev, tb);
1890 	if (err)
1891 		goto err_queues;
1892 
1893 	priv = netdev_priv(peer);
1894 	rcu_assign_pointer(priv->peer, dev);
1895 	err = veth_init_queues(peer, tb);
1896 	if (err)
1897 		goto err_queues;
1898 
1899 	veth_disable_gro(dev);
1900 	/* update XDP supported features */
1901 	veth_set_xdp_features(dev);
1902 	veth_set_xdp_features(peer);
1903 
1904 	return 0;
1905 
1906 err_queues:
1907 	unregister_netdevice(dev);
1908 err_register_dev:
1909 	/* nothing to do */
1910 err_configure_peer:
1911 	unregister_netdevice(peer);
1912 	return err;
1913 
1914 err_register_peer:
1915 	free_netdev(peer);
1916 	return err;
1917 }
1918 
1919 static void veth_dellink(struct net_device *dev, struct list_head *head)
1920 {
1921 	struct veth_priv *priv;
1922 	struct net_device *peer;
1923 
1924 	priv = netdev_priv(dev);
1925 	peer = rtnl_dereference(priv->peer);
1926 
1927 	/* Note : dellink() is called from default_device_exit_batch(),
1928 	 * before a rcu_synchronize() point. The devices are guaranteed
1929 	 * not being freed before one RCU grace period.
1930 	 */
1931 	RCU_INIT_POINTER(priv->peer, NULL);
1932 	unregister_netdevice_queue(dev, head);
1933 
1934 	if (peer) {
1935 		priv = netdev_priv(peer);
1936 		RCU_INIT_POINTER(priv->peer, NULL);
1937 		unregister_netdevice_queue(peer, head);
1938 	}
1939 }
1940 
1941 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1942 	[VETH_INFO_PEER]	= { .len = sizeof(struct ifinfomsg) },
1943 };
1944 
1945 static struct net *veth_get_link_net(const struct net_device *dev)
1946 {
1947 	struct veth_priv *priv = netdev_priv(dev);
1948 	struct net_device *peer = rtnl_dereference(priv->peer);
1949 
1950 	return peer ? dev_net(peer) : dev_net(dev);
1951 }
1952 
1953 static unsigned int veth_get_num_queues(void)
1954 {
1955 	/* enforce the same queue limit as rtnl_create_link */
1956 	int queues = num_possible_cpus();
1957 
1958 	if (queues > 4096)
1959 		queues = 4096;
1960 	return queues;
1961 }
1962 
1963 static struct rtnl_link_ops veth_link_ops = {
1964 	.kind		= DRV_NAME,
1965 	.priv_size	= sizeof(struct veth_priv),
1966 	.setup		= veth_setup,
1967 	.validate	= veth_validate,
1968 	.newlink	= veth_newlink,
1969 	.dellink	= veth_dellink,
1970 	.policy		= veth_policy,
1971 	.maxtype	= VETH_INFO_MAX,
1972 	.get_link_net	= veth_get_link_net,
1973 	.get_num_tx_queues	= veth_get_num_queues,
1974 	.get_num_rx_queues	= veth_get_num_queues,
1975 };
1976 
1977 /*
1978  * init/fini
1979  */
1980 
1981 static __init int veth_init(void)
1982 {
1983 	return rtnl_link_register(&veth_link_ops);
1984 }
1985 
1986 static __exit void veth_exit(void)
1987 {
1988 	rtnl_link_unregister(&veth_link_ops);
1989 }
1990 
1991 module_init(veth_init);
1992 module_exit(veth_exit);
1993 
1994 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1995 MODULE_LICENSE("GPL v2");
1996 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1997