1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
17
18 #include <net/rtnetlink.h>
19 #include <net/dst.h>
20 #include <net/netdev_lock.h>
21 #include <net/xfrm.h>
22 #include <net/xdp.h>
23 #include <linux/veth.h>
24 #include <linux/module.h>
25 #include <linux/bpf.h>
26 #include <linux/filter.h>
27 #include <linux/ptr_ring.h>
28 #include <linux/bpf_trace.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/skbuff_ref.h>
31 #include <net/page_pool/helpers.h>
32
33 #define DRV_NAME "veth"
34 #define DRV_VERSION "1.0"
35
36 #define VETH_XDP_FLAG BIT(0)
37 #define VETH_RING_SIZE 256
38 #define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
39
40 #define VETH_XDP_TX_BULK_SIZE 16
41 #define VETH_XDP_BATCH 16
42
43 struct veth_stats {
44 u64 rx_drops;
45 /* xdp */
46 u64 xdp_packets;
47 u64 xdp_bytes;
48 u64 xdp_redirect;
49 u64 xdp_drops;
50 u64 xdp_tx;
51 u64 xdp_tx_err;
52 u64 peer_tq_xdp_xmit;
53 u64 peer_tq_xdp_xmit_err;
54 };
55
56 struct veth_rq_stats {
57 struct veth_stats vs;
58 struct u64_stats_sync syncp;
59 };
60
61 struct veth_rq {
62 struct napi_struct xdp_napi;
63 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
64 struct net_device *dev;
65 struct bpf_prog __rcu *xdp_prog;
66 struct xdp_mem_info xdp_mem;
67 struct veth_rq_stats stats;
68 bool rx_notify_masked;
69 struct ptr_ring xdp_ring;
70 struct xdp_rxq_info xdp_rxq;
71 struct page_pool *page_pool;
72 };
73
74 struct veth_priv {
75 struct net_device __rcu *peer;
76 atomic64_t dropped;
77 struct bpf_prog *_xdp_prog;
78 struct veth_rq *rq;
79 unsigned int requested_headroom;
80 };
81
82 struct veth_xdp_tx_bq {
83 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
84 unsigned int count;
85 };
86
87 /*
88 * ethtool interface
89 */
90
91 struct veth_q_stat_desc {
92 char desc[ETH_GSTRING_LEN];
93 size_t offset;
94 };
95
96 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
97
98 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
99 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
100 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
101 { "drops", VETH_RQ_STAT(rx_drops) },
102 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
103 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
104 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
105 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
106 };
107
108 #define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
109
110 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
111 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
112 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
113 };
114
115 #define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
116
117 static struct {
118 const char string[ETH_GSTRING_LEN];
119 } ethtool_stats_keys[] = {
120 { "peer_ifindex" },
121 };
122
123 struct veth_xdp_buff {
124 struct xdp_buff xdp;
125 struct sk_buff *skb;
126 };
127
veth_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)128 static int veth_get_link_ksettings(struct net_device *dev,
129 struct ethtool_link_ksettings *cmd)
130 {
131 cmd->base.speed = SPEED_10000;
132 cmd->base.duplex = DUPLEX_FULL;
133 cmd->base.port = PORT_TP;
134 cmd->base.autoneg = AUTONEG_DISABLE;
135 return 0;
136 }
137
veth_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)138 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
139 {
140 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
141 strscpy(info->version, DRV_VERSION, sizeof(info->version));
142 }
143
veth_get_strings(struct net_device * dev,u32 stringset,u8 * buf)144 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
145 {
146 u8 *p = buf;
147 int i, j;
148
149 switch(stringset) {
150 case ETH_SS_STATS:
151 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
152 p += sizeof(ethtool_stats_keys);
153 for (i = 0; i < dev->real_num_rx_queues; i++)
154 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
155 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
156 i, veth_rq_stats_desc[j].desc);
157
158 for (i = 0; i < dev->real_num_tx_queues; i++)
159 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
160 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
161 i, veth_tq_stats_desc[j].desc);
162
163 page_pool_ethtool_stats_get_strings(p);
164 break;
165 }
166 }
167
veth_get_sset_count(struct net_device * dev,int sset)168 static int veth_get_sset_count(struct net_device *dev, int sset)
169 {
170 switch (sset) {
171 case ETH_SS_STATS:
172 return ARRAY_SIZE(ethtool_stats_keys) +
173 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
174 VETH_TQ_STATS_LEN * dev->real_num_tx_queues +
175 page_pool_ethtool_stats_get_count();
176 default:
177 return -EOPNOTSUPP;
178 }
179 }
180
veth_get_page_pool_stats(struct net_device * dev,u64 * data)181 static void veth_get_page_pool_stats(struct net_device *dev, u64 *data)
182 {
183 #ifdef CONFIG_PAGE_POOL_STATS
184 struct veth_priv *priv = netdev_priv(dev);
185 struct page_pool_stats pp_stats = {};
186 int i;
187
188 for (i = 0; i < dev->real_num_rx_queues; i++) {
189 if (!priv->rq[i].page_pool)
190 continue;
191 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
192 }
193 page_pool_ethtool_stats_get(data, &pp_stats);
194 #endif /* CONFIG_PAGE_POOL_STATS */
195 }
196
veth_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)197 static void veth_get_ethtool_stats(struct net_device *dev,
198 struct ethtool_stats *stats, u64 *data)
199 {
200 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
201 struct net_device *peer = rtnl_dereference(priv->peer);
202 int i, j, idx, pp_idx;
203
204 data[0] = peer ? peer->ifindex : 0;
205 idx = 1;
206 for (i = 0; i < dev->real_num_rx_queues; i++) {
207 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
208 const void *stats_base = (void *)&rq_stats->vs;
209 unsigned int start;
210 size_t offset;
211
212 do {
213 start = u64_stats_fetch_begin(&rq_stats->syncp);
214 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
215 offset = veth_rq_stats_desc[j].offset;
216 data[idx + j] = *(u64 *)(stats_base + offset);
217 }
218 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
219 idx += VETH_RQ_STATS_LEN;
220 }
221 pp_idx = idx;
222
223 if (!peer)
224 goto page_pool_stats;
225
226 rcv_priv = netdev_priv(peer);
227 for (i = 0; i < peer->real_num_rx_queues; i++) {
228 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
229 const void *base = (void *)&rq_stats->vs;
230 unsigned int start, tx_idx = idx;
231 size_t offset;
232
233 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
234 do {
235 start = u64_stats_fetch_begin(&rq_stats->syncp);
236 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
237 offset = veth_tq_stats_desc[j].offset;
238 data[tx_idx + j] += *(u64 *)(base + offset);
239 }
240 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
241 }
242 pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
243
244 page_pool_stats:
245 veth_get_page_pool_stats(dev, &data[pp_idx]);
246 }
247
veth_get_channels(struct net_device * dev,struct ethtool_channels * channels)248 static void veth_get_channels(struct net_device *dev,
249 struct ethtool_channels *channels)
250 {
251 channels->tx_count = dev->real_num_tx_queues;
252 channels->rx_count = dev->real_num_rx_queues;
253 channels->max_tx = dev->num_tx_queues;
254 channels->max_rx = dev->num_rx_queues;
255 }
256
257 static int veth_set_channels(struct net_device *dev,
258 struct ethtool_channels *ch);
259
260 static const struct ethtool_ops veth_ethtool_ops = {
261 .get_drvinfo = veth_get_drvinfo,
262 .get_link = ethtool_op_get_link,
263 .get_strings = veth_get_strings,
264 .get_sset_count = veth_get_sset_count,
265 .get_ethtool_stats = veth_get_ethtool_stats,
266 .get_link_ksettings = veth_get_link_ksettings,
267 .get_ts_info = ethtool_op_get_ts_info,
268 .get_channels = veth_get_channels,
269 .set_channels = veth_set_channels,
270 };
271
272 /* general routines */
273
veth_is_xdp_frame(void * ptr)274 static bool veth_is_xdp_frame(void *ptr)
275 {
276 return (unsigned long)ptr & VETH_XDP_FLAG;
277 }
278
veth_ptr_to_xdp(void * ptr)279 static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
280 {
281 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
282 }
283
veth_xdp_to_ptr(struct xdp_frame * xdp)284 static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
285 {
286 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
287 }
288
veth_ptr_free(void * ptr)289 static void veth_ptr_free(void *ptr)
290 {
291 if (veth_is_xdp_frame(ptr))
292 xdp_return_frame(veth_ptr_to_xdp(ptr));
293 else
294 kfree_skb(ptr);
295 }
296
__veth_xdp_flush(struct veth_rq * rq)297 static void __veth_xdp_flush(struct veth_rq *rq)
298 {
299 /* Write ptr_ring before reading rx_notify_masked */
300 smp_mb();
301 if (!READ_ONCE(rq->rx_notify_masked) &&
302 napi_schedule_prep(&rq->xdp_napi)) {
303 WRITE_ONCE(rq->rx_notify_masked, true);
304 __napi_schedule(&rq->xdp_napi);
305 }
306 }
307
veth_xdp_rx(struct veth_rq * rq,struct sk_buff * skb)308 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
309 {
310 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb)))
311 return NETDEV_TX_BUSY; /* signal qdisc layer */
312
313 return NET_RX_SUCCESS; /* same as NETDEV_TX_OK */
314 }
315
veth_forward_skb(struct net_device * dev,struct sk_buff * skb,struct veth_rq * rq,bool xdp)316 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
317 struct veth_rq *rq, bool xdp)
318 {
319 return __dev_forward_skb(dev, skb) ?: xdp ?
320 veth_xdp_rx(rq, skb) :
321 __netif_rx(skb);
322 }
323
324 /* return true if the specified skb has chances of GRO aggregation
325 * Don't strive for accuracy, but try to avoid GRO overhead in the most
326 * common scenarios.
327 * When XDP is enabled, all traffic is considered eligible, as the xmit
328 * device has TSO off.
329 * When TSO is enabled on the xmit device, we are likely interested only
330 * in UDP aggregation, explicitly check for that if the skb is suspected
331 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
332 * to belong to locally generated UDP traffic.
333 */
veth_skb_is_eligible_for_gro(const struct net_device * dev,const struct net_device * rcv,const struct sk_buff * skb)334 static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
335 const struct net_device *rcv,
336 const struct sk_buff *skb)
337 {
338 return !(dev->features & NETIF_F_ALL_TSO) ||
339 (skb->destructor == sock_wfree &&
340 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
341 }
342
veth_xmit(struct sk_buff * skb,struct net_device * dev)343 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
344 {
345 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
346 struct veth_rq *rq = NULL;
347 struct netdev_queue *txq;
348 struct net_device *rcv;
349 int length = skb->len;
350 bool use_napi = false;
351 int ret, rxq;
352
353 rcu_read_lock();
354 rcv = rcu_dereference(priv->peer);
355 if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
356 kfree_skb(skb);
357 goto drop;
358 }
359
360 rcv_priv = netdev_priv(rcv);
361 rxq = skb_get_queue_mapping(skb);
362 if (rxq < rcv->real_num_rx_queues) {
363 rq = &rcv_priv->rq[rxq];
364
365 /* The napi pointer is available when an XDP program is
366 * attached or when GRO is enabled
367 * Don't bother with napi/GRO if the skb can't be aggregated
368 */
369 use_napi = rcu_access_pointer(rq->napi) &&
370 veth_skb_is_eligible_for_gro(dev, rcv, skb);
371 }
372
373 skb_tx_timestamp(skb);
374
375 ret = veth_forward_skb(rcv, skb, rq, use_napi);
376 switch (ret) {
377 case NET_RX_SUCCESS: /* same as NETDEV_TX_OK */
378 if (!use_napi)
379 dev_sw_netstats_tx_add(dev, 1, length);
380 else
381 __veth_xdp_flush(rq);
382 break;
383 case NETDEV_TX_BUSY:
384 /* If a qdisc is attached to our virtual device, returning
385 * NETDEV_TX_BUSY is allowed.
386 */
387 txq = netdev_get_tx_queue(dev, rxq);
388
389 if (qdisc_txq_has_no_queue(txq)) {
390 dev_kfree_skb_any(skb);
391 goto drop;
392 }
393 /* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
394 __skb_push(skb, ETH_HLEN);
395 /* Depend on prior success packets started NAPI consumer via
396 * __veth_xdp_flush(). Cancel TXQ stop if consumer stopped,
397 * paired with empty check in veth_poll().
398 */
399 netif_tx_stop_queue(txq);
400 smp_mb__after_atomic();
401 if (unlikely(__ptr_ring_empty(&rq->xdp_ring)))
402 netif_tx_wake_queue(txq);
403 break;
404 case NET_RX_DROP: /* same as NET_XMIT_DROP */
405 drop:
406 atomic64_inc(&priv->dropped);
407 ret = NET_XMIT_DROP;
408 break;
409 default:
410 net_crit_ratelimited("%s(%s): Invalid return code(%d)",
411 __func__, dev->name, ret);
412 }
413 rcu_read_unlock();
414
415 return ret;
416 }
417
veth_stats_rx(struct veth_stats * result,struct net_device * dev)418 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
419 {
420 struct veth_priv *priv = netdev_priv(dev);
421 int i;
422
423 result->peer_tq_xdp_xmit_err = 0;
424 result->xdp_packets = 0;
425 result->xdp_tx_err = 0;
426 result->xdp_bytes = 0;
427 result->rx_drops = 0;
428 for (i = 0; i < dev->num_rx_queues; i++) {
429 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
430 struct veth_rq_stats *stats = &priv->rq[i].stats;
431 unsigned int start;
432
433 do {
434 start = u64_stats_fetch_begin(&stats->syncp);
435 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
436 xdp_tx_err = stats->vs.xdp_tx_err;
437 packets = stats->vs.xdp_packets;
438 bytes = stats->vs.xdp_bytes;
439 drops = stats->vs.rx_drops;
440 } while (u64_stats_fetch_retry(&stats->syncp, start));
441 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
442 result->xdp_tx_err += xdp_tx_err;
443 result->xdp_packets += packets;
444 result->xdp_bytes += bytes;
445 result->rx_drops += drops;
446 }
447 }
448
veth_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * tot)449 static void veth_get_stats64(struct net_device *dev,
450 struct rtnl_link_stats64 *tot)
451 {
452 struct veth_priv *priv = netdev_priv(dev);
453 struct net_device *peer;
454 struct veth_stats rx;
455
456 tot->tx_dropped = atomic64_read(&priv->dropped);
457 dev_fetch_sw_netstats(tot, dev->tstats);
458
459 veth_stats_rx(&rx, dev);
460 tot->tx_dropped += rx.xdp_tx_err;
461 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
462 tot->rx_bytes += rx.xdp_bytes;
463 tot->rx_packets += rx.xdp_packets;
464
465 rcu_read_lock();
466 peer = rcu_dereference(priv->peer);
467 if (peer) {
468 struct rtnl_link_stats64 tot_peer = {};
469
470 dev_fetch_sw_netstats(&tot_peer, peer->tstats);
471 tot->rx_bytes += tot_peer.tx_bytes;
472 tot->rx_packets += tot_peer.tx_packets;
473
474 veth_stats_rx(&rx, peer);
475 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
476 tot->rx_dropped += rx.xdp_tx_err;
477 tot->tx_bytes += rx.xdp_bytes;
478 tot->tx_packets += rx.xdp_packets;
479 }
480 rcu_read_unlock();
481 }
482
483 /* fake multicast ability */
veth_set_multicast_list(struct net_device * dev)484 static void veth_set_multicast_list(struct net_device *dev)
485 {
486 }
487
veth_select_rxq(struct net_device * dev)488 static int veth_select_rxq(struct net_device *dev)
489 {
490 return smp_processor_id() % dev->real_num_rx_queues;
491 }
492
veth_peer_dev(struct net_device * dev)493 static struct net_device *veth_peer_dev(struct net_device *dev)
494 {
495 struct veth_priv *priv = netdev_priv(dev);
496
497 /* Callers must be under RCU read side. */
498 return rcu_dereference(priv->peer);
499 }
500
veth_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags,bool ndo_xmit)501 static int veth_xdp_xmit(struct net_device *dev, int n,
502 struct xdp_frame **frames,
503 u32 flags, bool ndo_xmit)
504 {
505 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
506 int i, ret = -ENXIO, nxmit = 0;
507 struct net_device *rcv;
508 unsigned int max_len;
509 struct veth_rq *rq;
510
511 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
512 return -EINVAL;
513
514 rcu_read_lock();
515 rcv = rcu_dereference(priv->peer);
516 if (unlikely(!rcv))
517 goto out;
518
519 rcv_priv = netdev_priv(rcv);
520 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
521 /* The napi pointer is set if NAPI is enabled, which ensures that
522 * xdp_ring is initialized on receive side and the peer device is up.
523 */
524 if (!rcu_access_pointer(rq->napi))
525 goto out;
526
527 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
528
529 spin_lock(&rq->xdp_ring.producer_lock);
530 for (i = 0; i < n; i++) {
531 struct xdp_frame *frame = frames[i];
532 void *ptr = veth_xdp_to_ptr(frame);
533
534 if (unlikely(xdp_get_frame_len(frame) > max_len ||
535 __ptr_ring_produce(&rq->xdp_ring, ptr)))
536 break;
537 nxmit++;
538 }
539 spin_unlock(&rq->xdp_ring.producer_lock);
540
541 if (flags & XDP_XMIT_FLUSH)
542 __veth_xdp_flush(rq);
543
544 ret = nxmit;
545 if (ndo_xmit) {
546 u64_stats_update_begin(&rq->stats.syncp);
547 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
548 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
549 u64_stats_update_end(&rq->stats.syncp);
550 }
551
552 out:
553 rcu_read_unlock();
554
555 return ret;
556 }
557
veth_ndo_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)558 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
559 struct xdp_frame **frames, u32 flags)
560 {
561 int err;
562
563 err = veth_xdp_xmit(dev, n, frames, flags, true);
564 if (err < 0) {
565 struct veth_priv *priv = netdev_priv(dev);
566
567 atomic64_add(n, &priv->dropped);
568 }
569
570 return err;
571 }
572
veth_xdp_flush_bq(struct veth_rq * rq,struct veth_xdp_tx_bq * bq)573 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
574 {
575 int sent, i, err = 0, drops;
576
577 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
578 if (sent < 0) {
579 err = sent;
580 sent = 0;
581 }
582
583 for (i = sent; unlikely(i < bq->count); i++)
584 xdp_return_frame(bq->q[i]);
585
586 drops = bq->count - sent;
587 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
588
589 u64_stats_update_begin(&rq->stats.syncp);
590 rq->stats.vs.xdp_tx += sent;
591 rq->stats.vs.xdp_tx_err += drops;
592 u64_stats_update_end(&rq->stats.syncp);
593
594 bq->count = 0;
595 }
596
veth_xdp_flush(struct veth_rq * rq,struct veth_xdp_tx_bq * bq)597 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
598 {
599 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
600 struct net_device *rcv;
601 struct veth_rq *rcv_rq;
602
603 rcu_read_lock();
604 veth_xdp_flush_bq(rq, bq);
605 rcv = rcu_dereference(priv->peer);
606 if (unlikely(!rcv))
607 goto out;
608
609 rcv_priv = netdev_priv(rcv);
610 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
611 /* xdp_ring is initialized on receive side? */
612 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
613 goto out;
614
615 __veth_xdp_flush(rcv_rq);
616 out:
617 rcu_read_unlock();
618 }
619
veth_xdp_tx(struct veth_rq * rq,struct xdp_buff * xdp,struct veth_xdp_tx_bq * bq)620 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
621 struct veth_xdp_tx_bq *bq)
622 {
623 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
624
625 if (unlikely(!frame))
626 return -EOVERFLOW;
627
628 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
629 veth_xdp_flush_bq(rq, bq);
630
631 bq->q[bq->count++] = frame;
632
633 return 0;
634 }
635
veth_xdp_rcv_one(struct veth_rq * rq,struct xdp_frame * frame,struct veth_xdp_tx_bq * bq,struct veth_stats * stats)636 static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
637 struct xdp_frame *frame,
638 struct veth_xdp_tx_bq *bq,
639 struct veth_stats *stats)
640 {
641 struct xdp_frame orig_frame;
642 struct bpf_prog *xdp_prog;
643
644 rcu_read_lock();
645 xdp_prog = rcu_dereference(rq->xdp_prog);
646 if (likely(xdp_prog)) {
647 struct veth_xdp_buff vxbuf;
648 struct xdp_buff *xdp = &vxbuf.xdp;
649 u32 act;
650
651 xdp_convert_frame_to_buff(frame, xdp);
652 xdp->rxq = &rq->xdp_rxq;
653 vxbuf.skb = NULL;
654
655 act = bpf_prog_run_xdp(xdp_prog, xdp);
656
657 switch (act) {
658 case XDP_PASS:
659 if (xdp_update_frame_from_buff(xdp, frame))
660 goto err_xdp;
661 break;
662 case XDP_TX:
663 orig_frame = *frame;
664 xdp->rxq->mem.type = frame->mem_type;
665 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
666 trace_xdp_exception(rq->dev, xdp_prog, act);
667 frame = &orig_frame;
668 stats->rx_drops++;
669 goto err_xdp;
670 }
671 stats->xdp_tx++;
672 rcu_read_unlock();
673 goto xdp_xmit;
674 case XDP_REDIRECT:
675 orig_frame = *frame;
676 xdp->rxq->mem.type = frame->mem_type;
677 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
678 frame = &orig_frame;
679 stats->rx_drops++;
680 goto err_xdp;
681 }
682 stats->xdp_redirect++;
683 rcu_read_unlock();
684 goto xdp_xmit;
685 default:
686 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
687 fallthrough;
688 case XDP_ABORTED:
689 trace_xdp_exception(rq->dev, xdp_prog, act);
690 fallthrough;
691 case XDP_DROP:
692 stats->xdp_drops++;
693 goto err_xdp;
694 }
695 }
696 rcu_read_unlock();
697
698 return frame;
699 err_xdp:
700 rcu_read_unlock();
701 xdp_return_frame(frame);
702 xdp_xmit:
703 return NULL;
704 }
705
706 /* frames array contains VETH_XDP_BATCH at most */
veth_xdp_rcv_bulk_skb(struct veth_rq * rq,void ** frames,int n_xdpf,struct veth_xdp_tx_bq * bq,struct veth_stats * stats)707 static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
708 int n_xdpf, struct veth_xdp_tx_bq *bq,
709 struct veth_stats *stats)
710 {
711 void *skbs[VETH_XDP_BATCH];
712 int i;
713
714 if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) {
715 for (i = 0; i < n_xdpf; i++)
716 xdp_return_frame(frames[i]);
717 stats->rx_drops += n_xdpf;
718
719 return;
720 }
721
722 for (i = 0; i < n_xdpf; i++) {
723 struct sk_buff *skb = skbs[i];
724
725 skb = __xdp_build_skb_from_frame(frames[i], skb,
726 rq->dev);
727 if (!skb) {
728 xdp_return_frame(frames[i]);
729 stats->rx_drops++;
730 continue;
731 }
732 napi_gro_receive(&rq->xdp_napi, skb);
733 }
734 }
735
veth_xdp_get(struct xdp_buff * xdp)736 static void veth_xdp_get(struct xdp_buff *xdp)
737 {
738 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
739 int i;
740
741 get_page(virt_to_page(xdp->data));
742 if (likely(!xdp_buff_has_frags(xdp)))
743 return;
744
745 for (i = 0; i < sinfo->nr_frags; i++)
746 __skb_frag_ref(&sinfo->frags[i]);
747 }
748
veth_convert_skb_to_xdp_buff(struct veth_rq * rq,struct xdp_buff * xdp,struct sk_buff ** pskb)749 static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
750 struct xdp_buff *xdp,
751 struct sk_buff **pskb)
752 {
753 struct sk_buff *skb = *pskb;
754 u32 frame_sz;
755
756 if (skb_shared(skb) || skb_head_is_locked(skb) ||
757 skb_shinfo(skb)->nr_frags ||
758 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
759 if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
760 goto drop;
761
762 skb = *pskb;
763 }
764
765 /* SKB "head" area always have tailroom for skb_shared_info */
766 frame_sz = skb_end_pointer(skb) - skb->head;
767 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
768 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
769 xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
770 skb_headlen(skb), true);
771
772 if (skb_is_nonlinear(skb)) {
773 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
774 xdp_buff_set_frags_flag(xdp);
775 } else {
776 xdp_buff_clear_frags_flag(xdp);
777 }
778 *pskb = skb;
779
780 return 0;
781 drop:
782 consume_skb(skb);
783 *pskb = NULL;
784
785 return -ENOMEM;
786 }
787
veth_xdp_rcv_skb(struct veth_rq * rq,struct sk_buff * skb,struct veth_xdp_tx_bq * bq,struct veth_stats * stats)788 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
789 struct sk_buff *skb,
790 struct veth_xdp_tx_bq *bq,
791 struct veth_stats *stats)
792 {
793 void *orig_data, *orig_data_end;
794 struct bpf_prog *xdp_prog;
795 struct veth_xdp_buff vxbuf;
796 struct xdp_buff *xdp = &vxbuf.xdp;
797 u32 act, metalen;
798 int off;
799
800 skb_prepare_for_gro(skb);
801
802 rcu_read_lock();
803 xdp_prog = rcu_dereference(rq->xdp_prog);
804 if (unlikely(!xdp_prog)) {
805 rcu_read_unlock();
806 goto out;
807 }
808
809 __skb_push(skb, skb->data - skb_mac_header(skb));
810 if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
811 goto drop;
812 vxbuf.skb = skb;
813
814 orig_data = xdp->data;
815 orig_data_end = xdp->data_end;
816
817 act = bpf_prog_run_xdp(xdp_prog, xdp);
818
819 switch (act) {
820 case XDP_PASS:
821 break;
822 case XDP_TX:
823 veth_xdp_get(xdp);
824 consume_skb(skb);
825 xdp->rxq->mem = rq->xdp_mem;
826 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
827 trace_xdp_exception(rq->dev, xdp_prog, act);
828 stats->rx_drops++;
829 goto err_xdp;
830 }
831 stats->xdp_tx++;
832 rcu_read_unlock();
833 goto xdp_xmit;
834 case XDP_REDIRECT:
835 veth_xdp_get(xdp);
836 consume_skb(skb);
837 xdp->rxq->mem = rq->xdp_mem;
838 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
839 stats->rx_drops++;
840 goto err_xdp;
841 }
842 stats->xdp_redirect++;
843 rcu_read_unlock();
844 goto xdp_xmit;
845 default:
846 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
847 fallthrough;
848 case XDP_ABORTED:
849 trace_xdp_exception(rq->dev, xdp_prog, act);
850 fallthrough;
851 case XDP_DROP:
852 stats->xdp_drops++;
853 goto xdp_drop;
854 }
855 rcu_read_unlock();
856
857 /* check if bpf_xdp_adjust_head was used */
858 off = orig_data - xdp->data;
859 if (off > 0)
860 __skb_push(skb, off);
861 else if (off < 0)
862 __skb_pull(skb, -off);
863
864 skb_reset_mac_header(skb);
865
866 /* check if bpf_xdp_adjust_tail was used */
867 off = xdp->data_end - orig_data_end;
868 if (off != 0)
869 __skb_put(skb, off); /* positive on grow, negative on shrink */
870
871 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
872 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
873 */
874 if (xdp_buff_has_frags(xdp))
875 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
876 else
877 skb->data_len = 0;
878
879 skb->protocol = eth_type_trans(skb, rq->dev);
880
881 metalen = xdp->data - xdp->data_meta;
882 if (metalen)
883 skb_metadata_set(skb, metalen);
884 out:
885 return skb;
886 drop:
887 stats->rx_drops++;
888 xdp_drop:
889 rcu_read_unlock();
890 kfree_skb(skb);
891 return NULL;
892 err_xdp:
893 rcu_read_unlock();
894 xdp_return_buff(xdp);
895 xdp_xmit:
896 return NULL;
897 }
898
veth_xdp_rcv(struct veth_rq * rq,int budget,struct veth_xdp_tx_bq * bq,struct veth_stats * stats)899 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
900 struct veth_xdp_tx_bq *bq,
901 struct veth_stats *stats)
902 {
903 struct veth_priv *priv = netdev_priv(rq->dev);
904 int queue_idx = rq->xdp_rxq.queue_index;
905 struct netdev_queue *peer_txq;
906 struct net_device *peer_dev;
907 int i, done = 0, n_xdpf = 0;
908 void *xdpf[VETH_XDP_BATCH];
909
910 /* NAPI functions as RCU section */
911 peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
912 peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
913
914 for (i = 0; i < budget; i++) {
915 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
916
917 if (!ptr)
918 break;
919
920 if (veth_is_xdp_frame(ptr)) {
921 /* ndo_xdp_xmit */
922 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
923
924 stats->xdp_bytes += xdp_get_frame_len(frame);
925 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
926 if (frame) {
927 /* XDP_PASS */
928 xdpf[n_xdpf++] = frame;
929 if (n_xdpf == VETH_XDP_BATCH) {
930 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
931 bq, stats);
932 n_xdpf = 0;
933 }
934 }
935 } else {
936 /* ndo_start_xmit */
937 struct sk_buff *skb = ptr;
938
939 stats->xdp_bytes += skb->len;
940 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
941 if (skb) {
942 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
943 netif_receive_skb(skb);
944 else
945 napi_gro_receive(&rq->xdp_napi, skb);
946 }
947 }
948 done++;
949 }
950
951 if (n_xdpf)
952 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
953
954 u64_stats_update_begin(&rq->stats.syncp);
955 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
956 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
957 rq->stats.vs.xdp_drops += stats->xdp_drops;
958 rq->stats.vs.rx_drops += stats->rx_drops;
959 rq->stats.vs.xdp_packets += done;
960 u64_stats_update_end(&rq->stats.syncp);
961
962 if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
963 netif_tx_wake_queue(peer_txq);
964
965 return done;
966 }
967
veth_poll(struct napi_struct * napi,int budget)968 static int veth_poll(struct napi_struct *napi, int budget)
969 {
970 struct veth_rq *rq =
971 container_of(napi, struct veth_rq, xdp_napi);
972 struct veth_stats stats = {};
973 struct veth_xdp_tx_bq bq;
974 int done;
975
976 bq.count = 0;
977
978 xdp_set_return_frame_no_direct();
979 done = veth_xdp_rcv(rq, budget, &bq, &stats);
980
981 if (stats.xdp_redirect > 0)
982 xdp_do_flush();
983
984 if (done < budget && napi_complete_done(napi, done)) {
985 /* Write rx_notify_masked before reading ptr_ring */
986 smp_store_mb(rq->rx_notify_masked, false);
987 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
988 if (napi_schedule_prep(&rq->xdp_napi)) {
989 WRITE_ONCE(rq->rx_notify_masked, true);
990 __napi_schedule(&rq->xdp_napi);
991 }
992 }
993 }
994
995 if (stats.xdp_tx > 0)
996 veth_xdp_flush(rq, &bq);
997 xdp_clear_return_frame_no_direct();
998
999 return done;
1000 }
1001
veth_create_page_pool(struct veth_rq * rq)1002 static int veth_create_page_pool(struct veth_rq *rq)
1003 {
1004 struct page_pool_params pp_params = {
1005 .order = 0,
1006 .pool_size = VETH_RING_SIZE,
1007 .nid = NUMA_NO_NODE,
1008 .dev = &rq->dev->dev,
1009 };
1010
1011 rq->page_pool = page_pool_create(&pp_params);
1012 if (IS_ERR(rq->page_pool)) {
1013 int err = PTR_ERR(rq->page_pool);
1014
1015 rq->page_pool = NULL;
1016 return err;
1017 }
1018
1019 return 0;
1020 }
1021
__veth_napi_enable_range(struct net_device * dev,int start,int end)1022 static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
1023 {
1024 struct veth_priv *priv = netdev_priv(dev);
1025 int err, i;
1026
1027 for (i = start; i < end; i++) {
1028 err = veth_create_page_pool(&priv->rq[i]);
1029 if (err)
1030 goto err_page_pool;
1031 }
1032
1033 for (i = start; i < end; i++) {
1034 struct veth_rq *rq = &priv->rq[i];
1035
1036 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
1037 if (err)
1038 goto err_xdp_ring;
1039 }
1040
1041 for (i = start; i < end; i++) {
1042 struct veth_rq *rq = &priv->rq[i];
1043
1044 napi_enable(&rq->xdp_napi);
1045 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1046 }
1047
1048 return 0;
1049
1050 err_xdp_ring:
1051 for (i--; i >= start; i--)
1052 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
1053 i = end;
1054 err_page_pool:
1055 for (i--; i >= start; i--) {
1056 page_pool_destroy(priv->rq[i].page_pool);
1057 priv->rq[i].page_pool = NULL;
1058 }
1059
1060 return err;
1061 }
1062
__veth_napi_enable(struct net_device * dev)1063 static int __veth_napi_enable(struct net_device *dev)
1064 {
1065 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1066 }
1067
veth_napi_del_range(struct net_device * dev,int start,int end)1068 static void veth_napi_del_range(struct net_device *dev, int start, int end)
1069 {
1070 struct veth_priv *priv = netdev_priv(dev);
1071 int i;
1072
1073 for (i = start; i < end; i++) {
1074 struct veth_rq *rq = &priv->rq[i];
1075
1076 rcu_assign_pointer(priv->rq[i].napi, NULL);
1077 napi_disable(&rq->xdp_napi);
1078 __netif_napi_del(&rq->xdp_napi);
1079 }
1080 synchronize_net();
1081
1082 for (i = start; i < end; i++) {
1083 struct veth_rq *rq = &priv->rq[i];
1084
1085 rq->rx_notify_masked = false;
1086 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
1087 }
1088
1089 for (i = start; i < end; i++) {
1090 page_pool_destroy(priv->rq[i].page_pool);
1091 priv->rq[i].page_pool = NULL;
1092 }
1093 }
1094
veth_napi_del(struct net_device * dev)1095 static void veth_napi_del(struct net_device *dev)
1096 {
1097 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
1098 }
1099
veth_gro_requested(const struct net_device * dev)1100 static bool veth_gro_requested(const struct net_device *dev)
1101 {
1102 return !!(dev->wanted_features & NETIF_F_GRO);
1103 }
1104
veth_enable_xdp_range(struct net_device * dev,int start,int end,bool napi_already_on)1105 static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1106 bool napi_already_on)
1107 {
1108 struct veth_priv *priv = netdev_priv(dev);
1109 int err, i;
1110
1111 for (i = start; i < end; i++) {
1112 struct veth_rq *rq = &priv->rq[i];
1113
1114 if (!napi_already_on)
1115 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1116 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1117 if (err < 0)
1118 goto err_rxq_reg;
1119
1120 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1121 MEM_TYPE_PAGE_SHARED,
1122 NULL);
1123 if (err < 0)
1124 goto err_reg_mem;
1125
1126 /* Save original mem info as it can be overwritten */
1127 rq->xdp_mem = rq->xdp_rxq.mem;
1128 }
1129 return 0;
1130
1131 err_reg_mem:
1132 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1133 err_rxq_reg:
1134 for (i--; i >= start; i--) {
1135 struct veth_rq *rq = &priv->rq[i];
1136
1137 xdp_rxq_info_unreg(&rq->xdp_rxq);
1138 if (!napi_already_on)
1139 netif_napi_del(&rq->xdp_napi);
1140 }
1141
1142 return err;
1143 }
1144
veth_disable_xdp_range(struct net_device * dev,int start,int end,bool delete_napi)1145 static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1146 bool delete_napi)
1147 {
1148 struct veth_priv *priv = netdev_priv(dev);
1149 int i;
1150
1151 for (i = start; i < end; i++) {
1152 struct veth_rq *rq = &priv->rq[i];
1153
1154 rq->xdp_rxq.mem = rq->xdp_mem;
1155 xdp_rxq_info_unreg(&rq->xdp_rxq);
1156
1157 if (delete_napi)
1158 netif_napi_del(&rq->xdp_napi);
1159 }
1160 }
1161
veth_enable_xdp(struct net_device * dev)1162 static int veth_enable_xdp(struct net_device *dev)
1163 {
1164 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1165 struct veth_priv *priv = netdev_priv(dev);
1166 int err, i;
1167
1168 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1169 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1170 if (err)
1171 return err;
1172
1173 if (!napi_already_on) {
1174 err = __veth_napi_enable(dev);
1175 if (err) {
1176 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1177 return err;
1178 }
1179 }
1180 }
1181
1182 for (i = 0; i < dev->real_num_rx_queues; i++) {
1183 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1184 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1185 }
1186
1187 return 0;
1188 }
1189
veth_disable_xdp(struct net_device * dev)1190 static void veth_disable_xdp(struct net_device *dev)
1191 {
1192 struct veth_priv *priv = netdev_priv(dev);
1193 int i;
1194
1195 for (i = 0; i < dev->real_num_rx_queues; i++)
1196 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1197
1198 if (!netif_running(dev) || !veth_gro_requested(dev))
1199 veth_napi_del(dev);
1200
1201 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
1202 }
1203
veth_napi_enable_range(struct net_device * dev,int start,int end)1204 static int veth_napi_enable_range(struct net_device *dev, int start, int end)
1205 {
1206 struct veth_priv *priv = netdev_priv(dev);
1207 int err, i;
1208
1209 for (i = start; i < end; i++) {
1210 struct veth_rq *rq = &priv->rq[i];
1211
1212 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1213 }
1214
1215 err = __veth_napi_enable_range(dev, start, end);
1216 if (err) {
1217 for (i = start; i < end; i++) {
1218 struct veth_rq *rq = &priv->rq[i];
1219
1220 netif_napi_del(&rq->xdp_napi);
1221 }
1222 return err;
1223 }
1224 return err;
1225 }
1226
veth_napi_enable(struct net_device * dev)1227 static int veth_napi_enable(struct net_device *dev)
1228 {
1229 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1230 }
1231
veth_disable_range_safe(struct net_device * dev,int start,int end)1232 static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1233 {
1234 struct veth_priv *priv = netdev_priv(dev);
1235
1236 if (start >= end)
1237 return;
1238
1239 if (priv->_xdp_prog) {
1240 veth_napi_del_range(dev, start, end);
1241 veth_disable_xdp_range(dev, start, end, false);
1242 } else if (veth_gro_requested(dev)) {
1243 veth_napi_del_range(dev, start, end);
1244 }
1245 }
1246
veth_enable_range_safe(struct net_device * dev,int start,int end)1247 static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1248 {
1249 struct veth_priv *priv = netdev_priv(dev);
1250 int err;
1251
1252 if (start >= end)
1253 return 0;
1254
1255 if (priv->_xdp_prog) {
1256 /* these channels are freshly initialized, napi is not on there even
1257 * when GRO is requeste
1258 */
1259 err = veth_enable_xdp_range(dev, start, end, false);
1260 if (err)
1261 return err;
1262
1263 err = __veth_napi_enable_range(dev, start, end);
1264 if (err) {
1265 /* on error always delete the newly added napis */
1266 veth_disable_xdp_range(dev, start, end, true);
1267 return err;
1268 }
1269 } else if (veth_gro_requested(dev)) {
1270 return veth_napi_enable_range(dev, start, end);
1271 }
1272 return 0;
1273 }
1274
veth_set_xdp_features(struct net_device * dev)1275 static void veth_set_xdp_features(struct net_device *dev)
1276 {
1277 struct veth_priv *priv = netdev_priv(dev);
1278 struct net_device *peer;
1279
1280 peer = rtnl_dereference(priv->peer);
1281 if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
1282 struct veth_priv *priv_peer = netdev_priv(peer);
1283 xdp_features_t val = NETDEV_XDP_ACT_BASIC |
1284 NETDEV_XDP_ACT_REDIRECT |
1285 NETDEV_XDP_ACT_RX_SG;
1286
1287 if (priv_peer->_xdp_prog || veth_gro_requested(peer))
1288 val |= NETDEV_XDP_ACT_NDO_XMIT |
1289 NETDEV_XDP_ACT_NDO_XMIT_SG;
1290 xdp_set_features_flag(dev, val);
1291 } else {
1292 xdp_clear_features_flag(dev);
1293 }
1294 }
1295
veth_set_channels(struct net_device * dev,struct ethtool_channels * ch)1296 static int veth_set_channels(struct net_device *dev,
1297 struct ethtool_channels *ch)
1298 {
1299 struct veth_priv *priv = netdev_priv(dev);
1300 unsigned int old_rx_count, new_rx_count;
1301 struct veth_priv *peer_priv;
1302 struct net_device *peer;
1303 int err;
1304
1305 /* sanity check. Upper bounds are already enforced by the caller */
1306 if (!ch->rx_count || !ch->tx_count)
1307 return -EINVAL;
1308
1309 /* avoid braking XDP, if that is enabled */
1310 peer = rtnl_dereference(priv->peer);
1311 peer_priv = peer ? netdev_priv(peer) : NULL;
1312 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1313 return -EINVAL;
1314
1315 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1316 return -EINVAL;
1317
1318 old_rx_count = dev->real_num_rx_queues;
1319 new_rx_count = ch->rx_count;
1320 if (netif_running(dev)) {
1321 /* turn device off */
1322 netif_carrier_off(dev);
1323 if (peer)
1324 netif_carrier_off(peer);
1325
1326 /* try to allocate new resurces, as needed*/
1327 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1328 if (err)
1329 goto out;
1330 }
1331
1332 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1333 if (err)
1334 goto revert;
1335
1336 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1337 if (err) {
1338 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1339
1340 /* this error condition could happen only if rx and tx change
1341 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1342 * and we can't do anything to fully restore the original
1343 * status
1344 */
1345 if (err2)
1346 pr_warn("Can't restore rx queues config %d -> %d %d",
1347 new_rx_count, old_rx_count, err2);
1348 else
1349 goto revert;
1350 }
1351
1352 out:
1353 if (netif_running(dev)) {
1354 /* note that we need to swap the arguments WRT the enable part
1355 * to identify the range we have to disable
1356 */
1357 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1358 netif_carrier_on(dev);
1359 if (peer)
1360 netif_carrier_on(peer);
1361 }
1362
1363 /* update XDP supported features */
1364 veth_set_xdp_features(dev);
1365 if (peer)
1366 veth_set_xdp_features(peer);
1367
1368 return err;
1369
1370 revert:
1371 new_rx_count = old_rx_count;
1372 old_rx_count = ch->rx_count;
1373 goto out;
1374 }
1375
veth_open(struct net_device * dev)1376 static int veth_open(struct net_device *dev)
1377 {
1378 struct veth_priv *priv = netdev_priv(dev);
1379 struct net_device *peer = rtnl_dereference(priv->peer);
1380 int err;
1381
1382 if (!peer)
1383 return -ENOTCONN;
1384
1385 if (priv->_xdp_prog) {
1386 err = veth_enable_xdp(dev);
1387 if (err)
1388 return err;
1389 } else if (veth_gro_requested(dev)) {
1390 err = veth_napi_enable(dev);
1391 if (err)
1392 return err;
1393 }
1394
1395 if (peer->flags & IFF_UP) {
1396 netif_carrier_on(dev);
1397 netif_carrier_on(peer);
1398 }
1399
1400 veth_set_xdp_features(dev);
1401
1402 return 0;
1403 }
1404
veth_close(struct net_device * dev)1405 static int veth_close(struct net_device *dev)
1406 {
1407 struct veth_priv *priv = netdev_priv(dev);
1408 struct net_device *peer = rtnl_dereference(priv->peer);
1409
1410 netif_carrier_off(dev);
1411 if (peer)
1412 netif_carrier_off(peer);
1413
1414 if (priv->_xdp_prog)
1415 veth_disable_xdp(dev);
1416 else if (veth_gro_requested(dev))
1417 veth_napi_del(dev);
1418
1419 return 0;
1420 }
1421
is_valid_veth_mtu(int mtu)1422 static int is_valid_veth_mtu(int mtu)
1423 {
1424 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1425 }
1426
veth_alloc_queues(struct net_device * dev)1427 static int veth_alloc_queues(struct net_device *dev)
1428 {
1429 struct veth_priv *priv = netdev_priv(dev);
1430 int i;
1431
1432 priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
1433 GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
1434 if (!priv->rq)
1435 return -ENOMEM;
1436
1437 for (i = 0; i < dev->num_rx_queues; i++) {
1438 priv->rq[i].dev = dev;
1439 u64_stats_init(&priv->rq[i].stats.syncp);
1440 }
1441
1442 return 0;
1443 }
1444
veth_free_queues(struct net_device * dev)1445 static void veth_free_queues(struct net_device *dev)
1446 {
1447 struct veth_priv *priv = netdev_priv(dev);
1448
1449 kvfree(priv->rq);
1450 }
1451
veth_dev_init(struct net_device * dev)1452 static int veth_dev_init(struct net_device *dev)
1453 {
1454 netdev_lockdep_set_classes(dev);
1455 return veth_alloc_queues(dev);
1456 }
1457
veth_dev_free(struct net_device * dev)1458 static void veth_dev_free(struct net_device *dev)
1459 {
1460 veth_free_queues(dev);
1461 }
1462
1463 #ifdef CONFIG_NET_POLL_CONTROLLER
veth_poll_controller(struct net_device * dev)1464 static void veth_poll_controller(struct net_device *dev)
1465 {
1466 /* veth only receives frames when its peer sends one
1467 * Since it has nothing to do with disabling irqs, we are guaranteed
1468 * never to have pending data when we poll for it so
1469 * there is nothing to do here.
1470 *
1471 * We need this though so netpoll recognizes us as an interface that
1472 * supports polling, which enables bridge devices in virt setups to
1473 * still use netconsole
1474 */
1475 }
1476 #endif /* CONFIG_NET_POLL_CONTROLLER */
1477
veth_get_iflink(const struct net_device * dev)1478 static int veth_get_iflink(const struct net_device *dev)
1479 {
1480 struct veth_priv *priv = netdev_priv(dev);
1481 struct net_device *peer;
1482 int iflink;
1483
1484 rcu_read_lock();
1485 peer = rcu_dereference(priv->peer);
1486 iflink = peer ? READ_ONCE(peer->ifindex) : 0;
1487 rcu_read_unlock();
1488
1489 return iflink;
1490 }
1491
veth_fix_features(struct net_device * dev,netdev_features_t features)1492 static netdev_features_t veth_fix_features(struct net_device *dev,
1493 netdev_features_t features)
1494 {
1495 struct veth_priv *priv = netdev_priv(dev);
1496 struct net_device *peer;
1497
1498 peer = rtnl_dereference(priv->peer);
1499 if (peer) {
1500 struct veth_priv *peer_priv = netdev_priv(peer);
1501
1502 if (peer_priv->_xdp_prog)
1503 features &= ~NETIF_F_GSO_SOFTWARE;
1504 }
1505
1506 return features;
1507 }
1508
veth_set_features(struct net_device * dev,netdev_features_t features)1509 static int veth_set_features(struct net_device *dev,
1510 netdev_features_t features)
1511 {
1512 netdev_features_t changed = features ^ dev->features;
1513 struct veth_priv *priv = netdev_priv(dev);
1514 struct net_device *peer;
1515 int err;
1516
1517 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1518 return 0;
1519
1520 peer = rtnl_dereference(priv->peer);
1521 if (features & NETIF_F_GRO) {
1522 err = veth_napi_enable(dev);
1523 if (err)
1524 return err;
1525
1526 if (peer)
1527 xdp_features_set_redirect_target(peer, true);
1528 } else {
1529 if (peer)
1530 xdp_features_clear_redirect_target(peer);
1531 veth_napi_del(dev);
1532 }
1533 return 0;
1534 }
1535
veth_set_rx_headroom(struct net_device * dev,int new_hr)1536 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1537 {
1538 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1539 struct net_device *peer;
1540
1541 if (new_hr < 0)
1542 new_hr = 0;
1543
1544 rcu_read_lock();
1545 peer = rcu_dereference(priv->peer);
1546 if (unlikely(!peer))
1547 goto out;
1548
1549 peer_priv = netdev_priv(peer);
1550 priv->requested_headroom = new_hr;
1551 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1552 dev->needed_headroom = new_hr;
1553 peer->needed_headroom = new_hr;
1554
1555 out:
1556 rcu_read_unlock();
1557 }
1558
veth_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)1559 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1560 struct netlink_ext_ack *extack)
1561 {
1562 struct veth_priv *priv = netdev_priv(dev);
1563 struct bpf_prog *old_prog;
1564 struct net_device *peer;
1565 unsigned int max_mtu;
1566 int err;
1567
1568 old_prog = priv->_xdp_prog;
1569 priv->_xdp_prog = prog;
1570 peer = rtnl_dereference(priv->peer);
1571
1572 if (prog) {
1573 if (!peer) {
1574 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1575 err = -ENOTCONN;
1576 goto err;
1577 }
1578
1579 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
1580 peer->hard_header_len;
1581 /* Allow increasing the max_mtu if the program supports
1582 * XDP fragments.
1583 */
1584 if (prog->aux->xdp_has_frags)
1585 max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
1586
1587 if (peer->mtu > max_mtu) {
1588 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1589 err = -ERANGE;
1590 goto err;
1591 }
1592
1593 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1594 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1595 err = -ENOSPC;
1596 goto err;
1597 }
1598
1599 if (dev->flags & IFF_UP) {
1600 err = veth_enable_xdp(dev);
1601 if (err) {
1602 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1603 goto err;
1604 }
1605 }
1606
1607 if (!old_prog) {
1608 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1609 peer->max_mtu = max_mtu;
1610 }
1611
1612 xdp_features_set_redirect_target(peer, true);
1613 }
1614
1615 if (old_prog) {
1616 if (!prog) {
1617 if (peer && !veth_gro_requested(dev))
1618 xdp_features_clear_redirect_target(peer);
1619
1620 if (dev->flags & IFF_UP)
1621 veth_disable_xdp(dev);
1622
1623 if (peer) {
1624 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1625 peer->max_mtu = ETH_MAX_MTU;
1626 }
1627 }
1628 bpf_prog_put(old_prog);
1629 }
1630
1631 if ((!!old_prog ^ !!prog) && peer)
1632 netdev_update_features(peer);
1633
1634 return 0;
1635 err:
1636 priv->_xdp_prog = old_prog;
1637
1638 return err;
1639 }
1640
veth_xdp(struct net_device * dev,struct netdev_bpf * xdp)1641 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1642 {
1643 switch (xdp->command) {
1644 case XDP_SETUP_PROG:
1645 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1646 default:
1647 return -EINVAL;
1648 }
1649 }
1650
veth_xdp_rx_timestamp(const struct xdp_md * ctx,u64 * timestamp)1651 static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
1652 {
1653 struct veth_xdp_buff *_ctx = (void *)ctx;
1654
1655 if (!_ctx->skb)
1656 return -ENODATA;
1657
1658 *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
1659 return 0;
1660 }
1661
veth_xdp_rx_hash(const struct xdp_md * ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)1662 static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
1663 enum xdp_rss_hash_type *rss_type)
1664 {
1665 struct veth_xdp_buff *_ctx = (void *)ctx;
1666 struct sk_buff *skb = _ctx->skb;
1667
1668 if (!skb)
1669 return -ENODATA;
1670
1671 *hash = skb_get_hash(skb);
1672 *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
1673
1674 return 0;
1675 }
1676
veth_xdp_rx_vlan_tag(const struct xdp_md * ctx,__be16 * vlan_proto,u16 * vlan_tci)1677 static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
1678 u16 *vlan_tci)
1679 {
1680 const struct veth_xdp_buff *_ctx = (void *)ctx;
1681 const struct sk_buff *skb = _ctx->skb;
1682 int err;
1683
1684 if (!skb)
1685 return -ENODATA;
1686
1687 err = __vlan_hwaccel_get_tag(skb, vlan_tci);
1688 if (err)
1689 return err;
1690
1691 *vlan_proto = skb->vlan_proto;
1692 return err;
1693 }
1694
1695 static const struct net_device_ops veth_netdev_ops = {
1696 .ndo_init = veth_dev_init,
1697 .ndo_open = veth_open,
1698 .ndo_stop = veth_close,
1699 .ndo_start_xmit = veth_xmit,
1700 .ndo_get_stats64 = veth_get_stats64,
1701 .ndo_set_rx_mode = veth_set_multicast_list,
1702 .ndo_set_mac_address = eth_mac_addr,
1703 #ifdef CONFIG_NET_POLL_CONTROLLER
1704 .ndo_poll_controller = veth_poll_controller,
1705 #endif
1706 .ndo_get_iflink = veth_get_iflink,
1707 .ndo_fix_features = veth_fix_features,
1708 .ndo_set_features = veth_set_features,
1709 .ndo_features_check = passthru_features_check,
1710 .ndo_set_rx_headroom = veth_set_rx_headroom,
1711 .ndo_bpf = veth_xdp,
1712 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
1713 .ndo_get_peer_dev = veth_peer_dev,
1714 };
1715
1716 static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
1717 .xmo_rx_timestamp = veth_xdp_rx_timestamp,
1718 .xmo_rx_hash = veth_xdp_rx_hash,
1719 .xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag,
1720 };
1721
1722 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1723 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1724 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1725 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1726 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1727
veth_setup(struct net_device * dev)1728 static void veth_setup(struct net_device *dev)
1729 {
1730 ether_setup(dev);
1731
1732 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1733 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1734 dev->priv_flags |= IFF_NO_QUEUE;
1735 dev->priv_flags |= IFF_PHONY_HEADROOM;
1736 dev->priv_flags |= IFF_DISABLE_NETPOLL;
1737 dev->lltx = true;
1738
1739 dev->netdev_ops = &veth_netdev_ops;
1740 dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
1741 dev->ethtool_ops = &veth_ethtool_ops;
1742 dev->features |= VETH_FEATURES;
1743 dev->vlan_features = dev->features &
1744 ~(NETIF_F_HW_VLAN_CTAG_TX |
1745 NETIF_F_HW_VLAN_STAG_TX |
1746 NETIF_F_HW_VLAN_CTAG_RX |
1747 NETIF_F_HW_VLAN_STAG_RX);
1748 dev->needs_free_netdev = true;
1749 dev->priv_destructor = veth_dev_free;
1750 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1751 dev->max_mtu = ETH_MAX_MTU;
1752
1753 dev->hw_features = VETH_FEATURES;
1754 dev->hw_enc_features = VETH_FEATURES;
1755 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1756 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
1757 }
1758
1759 /*
1760 * netlink interface
1761 */
1762
veth_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1763 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1764 struct netlink_ext_ack *extack)
1765 {
1766 if (tb[IFLA_ADDRESS]) {
1767 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1768 return -EINVAL;
1769 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1770 return -EADDRNOTAVAIL;
1771 }
1772 if (tb[IFLA_MTU]) {
1773 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1774 return -EINVAL;
1775 }
1776 return 0;
1777 }
1778
1779 static struct rtnl_link_ops veth_link_ops;
1780
veth_disable_gro(struct net_device * dev)1781 static void veth_disable_gro(struct net_device *dev)
1782 {
1783 dev->features &= ~NETIF_F_GRO;
1784 dev->wanted_features &= ~NETIF_F_GRO;
1785 netdev_update_features(dev);
1786 }
1787
veth_init_queues(struct net_device * dev,struct nlattr * tb[])1788 static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1789 {
1790 int err;
1791
1792 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1793 err = netif_set_real_num_tx_queues(dev, 1);
1794 if (err)
1795 return err;
1796 }
1797 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1798 err = netif_set_real_num_rx_queues(dev, 1);
1799 if (err)
1800 return err;
1801 }
1802 return 0;
1803 }
1804
veth_newlink(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)1805 static int veth_newlink(struct net_device *dev,
1806 struct rtnl_newlink_params *params,
1807 struct netlink_ext_ack *extack)
1808 {
1809 struct net *peer_net = rtnl_newlink_peer_net(params);
1810 struct nlattr **data = params->data;
1811 struct nlattr **tb = params->tb;
1812 int err;
1813 struct net_device *peer;
1814 struct veth_priv *priv;
1815 char ifname[IFNAMSIZ];
1816 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1817 unsigned char name_assign_type;
1818 struct ifinfomsg *ifmp;
1819
1820 /*
1821 * create and register peer first
1822 */
1823 if (data && data[VETH_INFO_PEER]) {
1824 struct nlattr *nla_peer = data[VETH_INFO_PEER];
1825
1826 ifmp = nla_data(nla_peer);
1827 rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
1828 tbp = peer_tb;
1829 } else {
1830 ifmp = NULL;
1831 tbp = tb;
1832 }
1833
1834 if (ifmp && tbp[IFLA_IFNAME]) {
1835 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1836 name_assign_type = NET_NAME_USER;
1837 } else {
1838 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1839 name_assign_type = NET_NAME_ENUM;
1840 }
1841
1842 peer = rtnl_create_link(peer_net, ifname, name_assign_type,
1843 &veth_link_ops, tbp, extack);
1844 if (IS_ERR(peer))
1845 return PTR_ERR(peer);
1846
1847 if (!ifmp || !tbp[IFLA_ADDRESS])
1848 eth_hw_addr_random(peer);
1849
1850 if (ifmp && (dev->ifindex != 0))
1851 peer->ifindex = ifmp->ifi_index;
1852
1853 netif_inherit_tso_max(peer, dev);
1854
1855 err = register_netdevice(peer);
1856 if (err < 0)
1857 goto err_register_peer;
1858
1859 /* keep GRO disabled by default to be consistent with the established
1860 * veth behavior
1861 */
1862 veth_disable_gro(peer);
1863 netif_carrier_off(peer);
1864
1865 err = rtnl_configure_link(peer, ifmp, 0, NULL);
1866 if (err < 0)
1867 goto err_configure_peer;
1868
1869 /*
1870 * register dev last
1871 *
1872 * note, that since we've registered new device the dev's name
1873 * should be re-allocated
1874 */
1875
1876 if (tb[IFLA_ADDRESS] == NULL)
1877 eth_hw_addr_random(dev);
1878
1879 if (tb[IFLA_IFNAME])
1880 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1881 else
1882 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1883
1884 err = register_netdevice(dev);
1885 if (err < 0)
1886 goto err_register_dev;
1887
1888 netif_carrier_off(dev);
1889
1890 /*
1891 * tie the deviced together
1892 */
1893
1894 priv = netdev_priv(dev);
1895 rcu_assign_pointer(priv->peer, peer);
1896 err = veth_init_queues(dev, tb);
1897 if (err)
1898 goto err_queues;
1899
1900 priv = netdev_priv(peer);
1901 rcu_assign_pointer(priv->peer, dev);
1902 err = veth_init_queues(peer, tb);
1903 if (err)
1904 goto err_queues;
1905
1906 veth_disable_gro(dev);
1907 /* update XDP supported features */
1908 veth_set_xdp_features(dev);
1909 veth_set_xdp_features(peer);
1910
1911 return 0;
1912
1913 err_queues:
1914 unregister_netdevice(dev);
1915 err_register_dev:
1916 /* nothing to do */
1917 err_configure_peer:
1918 unregister_netdevice(peer);
1919 return err;
1920
1921 err_register_peer:
1922 free_netdev(peer);
1923 return err;
1924 }
1925
veth_dellink(struct net_device * dev,struct list_head * head)1926 static void veth_dellink(struct net_device *dev, struct list_head *head)
1927 {
1928 struct veth_priv *priv;
1929 struct net_device *peer;
1930
1931 priv = netdev_priv(dev);
1932 peer = rtnl_dereference(priv->peer);
1933
1934 /* Note : dellink() is called from default_device_exit_batch(),
1935 * before a rcu_synchronize() point. The devices are guaranteed
1936 * not being freed before one RCU grace period.
1937 */
1938 RCU_INIT_POINTER(priv->peer, NULL);
1939 unregister_netdevice_queue(dev, head);
1940
1941 if (peer) {
1942 priv = netdev_priv(peer);
1943 RCU_INIT_POINTER(priv->peer, NULL);
1944 unregister_netdevice_queue(peer, head);
1945 }
1946 }
1947
1948 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1949 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1950 };
1951
veth_get_link_net(const struct net_device * dev)1952 static struct net *veth_get_link_net(const struct net_device *dev)
1953 {
1954 struct veth_priv *priv = netdev_priv(dev);
1955 struct net_device *peer = rtnl_dereference(priv->peer);
1956
1957 return peer ? dev_net(peer) : dev_net(dev);
1958 }
1959
veth_get_num_queues(void)1960 static unsigned int veth_get_num_queues(void)
1961 {
1962 /* enforce the same queue limit as rtnl_create_link */
1963 int queues = num_possible_cpus();
1964
1965 if (queues > 4096)
1966 queues = 4096;
1967 return queues;
1968 }
1969
1970 static struct rtnl_link_ops veth_link_ops = {
1971 .kind = DRV_NAME,
1972 .priv_size = sizeof(struct veth_priv),
1973 .setup = veth_setup,
1974 .validate = veth_validate,
1975 .newlink = veth_newlink,
1976 .dellink = veth_dellink,
1977 .policy = veth_policy,
1978 .peer_type = VETH_INFO_PEER,
1979 .maxtype = VETH_INFO_MAX,
1980 .get_link_net = veth_get_link_net,
1981 .get_num_tx_queues = veth_get_num_queues,
1982 .get_num_rx_queues = veth_get_num_queues,
1983 };
1984
1985 /*
1986 * init/fini
1987 */
1988
veth_init(void)1989 static __init int veth_init(void)
1990 {
1991 return rtnl_link_register(&veth_link_ops);
1992 }
1993
veth_exit(void)1994 static __exit void veth_exit(void)
1995 {
1996 rtnl_link_unregister(&veth_link_ops);
1997 }
1998
1999 module_init(veth_init);
2000 module_exit(veth_exit);
2001
2002 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
2003 MODULE_LICENSE("GPL v2");
2004 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2005