1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <linux/dim.h>
23 #include <net/route.h>
24 #include <net/xdp.h>
25 #include <net/net_failover.h>
26 #include <net/netdev_rx_queue.h>
27 #include <net/netdev_queues.h>
28 #include <net/xdp_sock_drv.h>
29
30 static int napi_weight = NAPI_POLL_WEIGHT;
31 module_param(napi_weight, int, 0444);
32
33 static bool csum = true, gso = true, napi_tx = true;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36 module_param(napi_tx, bool, 0644);
37
38 /* FIXME: MTU in config. */
39 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define GOOD_COPY_LEN 128
41
42 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
43
44 /* Separating two types of XDP xmit */
45 #define VIRTIO_XDP_TX BIT(0)
46 #define VIRTIO_XDP_REDIR BIT(1)
47
48 /* RX packet size EWMA. The average packet size is used to determine the packet
49 * buffer size when refilling RX rings. As the entire RX ring may be refilled
50 * at once, the weight is chosen so that the EWMA will be insensitive to short-
51 * term, transient changes in packet size.
52 */
53 DECLARE_EWMA(pkt_len, 0, 64)
54
55 #define VIRTNET_DRIVER_VERSION "1.0.0"
56
57 static const unsigned long guest_offloads[] = {
58 VIRTIO_NET_F_GUEST_TSO4,
59 VIRTIO_NET_F_GUEST_TSO6,
60 VIRTIO_NET_F_GUEST_ECN,
61 VIRTIO_NET_F_GUEST_UFO,
62 VIRTIO_NET_F_GUEST_CSUM,
63 VIRTIO_NET_F_GUEST_USO4,
64 VIRTIO_NET_F_GUEST_USO6,
65 VIRTIO_NET_F_GUEST_HDRLEN
66 };
67
68 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
69 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
70 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
71 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
72 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
73 (1ULL << VIRTIO_NET_F_GUEST_USO6))
74
75 struct virtnet_stat_desc {
76 char desc[ETH_GSTRING_LEN];
77 size_t offset;
78 size_t qstat_offset;
79 };
80
81 struct virtnet_sq_free_stats {
82 u64 packets;
83 u64 bytes;
84 u64 napi_packets;
85 u64 napi_bytes;
86 u64 xsk;
87 };
88
89 struct virtnet_sq_stats {
90 struct u64_stats_sync syncp;
91 u64_stats_t packets;
92 u64_stats_t bytes;
93 u64_stats_t xdp_tx;
94 u64_stats_t xdp_tx_drops;
95 u64_stats_t kicks;
96 u64_stats_t tx_timeouts;
97 u64_stats_t stop;
98 u64_stats_t wake;
99 };
100
101 struct virtnet_rq_stats {
102 struct u64_stats_sync syncp;
103 u64_stats_t packets;
104 u64_stats_t bytes;
105 u64_stats_t drops;
106 u64_stats_t xdp_packets;
107 u64_stats_t xdp_tx;
108 u64_stats_t xdp_redirects;
109 u64_stats_t xdp_drops;
110 u64_stats_t kicks;
111 };
112
113 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
114 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
115
116 #define VIRTNET_SQ_STAT_QSTAT(name, m) \
117 { \
118 name, \
119 offsetof(struct virtnet_sq_stats, m), \
120 offsetof(struct netdev_queue_stats_tx, m), \
121 }
122
123 #define VIRTNET_RQ_STAT_QSTAT(name, m) \
124 { \
125 name, \
126 offsetof(struct virtnet_rq_stats, m), \
127 offsetof(struct netdev_queue_stats_rx, m), \
128 }
129
130 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
131 VIRTNET_SQ_STAT("xdp_tx", xdp_tx),
132 VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
133 VIRTNET_SQ_STAT("kicks", kicks),
134 VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts),
135 };
136
137 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
138 VIRTNET_RQ_STAT("drops", drops),
139 VIRTNET_RQ_STAT("xdp_packets", xdp_packets),
140 VIRTNET_RQ_STAT("xdp_tx", xdp_tx),
141 VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
142 VIRTNET_RQ_STAT("xdp_drops", xdp_drops),
143 VIRTNET_RQ_STAT("kicks", kicks),
144 };
145
146 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
147 VIRTNET_SQ_STAT_QSTAT("packets", packets),
148 VIRTNET_SQ_STAT_QSTAT("bytes", bytes),
149 VIRTNET_SQ_STAT_QSTAT("stop", stop),
150 VIRTNET_SQ_STAT_QSTAT("wake", wake),
151 };
152
153 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
154 VIRTNET_RQ_STAT_QSTAT("packets", packets),
155 VIRTNET_RQ_STAT_QSTAT("bytes", bytes),
156 };
157
158 #define VIRTNET_STATS_DESC_CQ(name) \
159 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
160
161 #define VIRTNET_STATS_DESC_RX(class, name) \
162 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
163
164 #define VIRTNET_STATS_DESC_TX(class, name) \
165 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
166
167
168 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
169 VIRTNET_STATS_DESC_CQ(command_num),
170 VIRTNET_STATS_DESC_CQ(ok_num),
171 };
172
173 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
174 VIRTNET_STATS_DESC_RX(basic, packets),
175 VIRTNET_STATS_DESC_RX(basic, bytes),
176
177 VIRTNET_STATS_DESC_RX(basic, notifications),
178 VIRTNET_STATS_DESC_RX(basic, interrupts),
179 };
180
181 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
182 VIRTNET_STATS_DESC_TX(basic, packets),
183 VIRTNET_STATS_DESC_TX(basic, bytes),
184
185 VIRTNET_STATS_DESC_TX(basic, notifications),
186 VIRTNET_STATS_DESC_TX(basic, interrupts),
187 };
188
189 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
190 VIRTNET_STATS_DESC_RX(csum, needs_csum),
191 };
192
193 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
194 VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
195 VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
196 };
197
198 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
199 VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
200 };
201
202 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
203 VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
204 };
205
206 #define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \
207 { \
208 #name, \
209 offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \
210 offsetof(struct netdev_queue_stats_rx, qstat_field), \
211 }
212
213 #define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \
214 { \
215 #name, \
216 offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \
217 offsetof(struct netdev_queue_stats_tx, qstat_field), \
218 }
219
220 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
221 VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops),
222 VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
223 };
224
225 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
226 VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops),
227 VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
228 };
229
230 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
231 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
232 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
233 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
234 };
235
236 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
237 VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
238 VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
239 };
240
241 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
242 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
243 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
244 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
245 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
246 };
247
248 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
249 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
250 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
251 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
252 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
253 };
254
255 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
256 VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
257 };
258
259 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
260 VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
261 };
262
263 #define VIRTNET_Q_TYPE_RX 0
264 #define VIRTNET_Q_TYPE_TX 1
265 #define VIRTNET_Q_TYPE_CQ 2
266
267 struct virtnet_interrupt_coalesce {
268 u32 max_packets;
269 u32 max_usecs;
270 };
271
272 /* The dma information of pages allocated at a time. */
273 struct virtnet_rq_dma {
274 dma_addr_t addr;
275 u32 ref;
276 u16 len;
277 u16 need_sync;
278 };
279
280 /* Internal representation of a send virtqueue */
281 struct send_queue {
282 /* Virtqueue associated with this send _queue */
283 struct virtqueue *vq;
284
285 /* TX: fragments + linear part + virtio header */
286 struct scatterlist sg[MAX_SKB_FRAGS + 2];
287
288 /* Name of the send queue: output.$index */
289 char name[16];
290
291 struct virtnet_sq_stats stats;
292
293 struct virtnet_interrupt_coalesce intr_coal;
294
295 struct napi_struct napi;
296
297 /* Record whether sq is in reset state. */
298 bool reset;
299
300 struct xsk_buff_pool *xsk_pool;
301
302 dma_addr_t xsk_hdr_dma_addr;
303 };
304
305 /* Internal representation of a receive virtqueue */
306 struct receive_queue {
307 /* Virtqueue associated with this receive_queue */
308 struct virtqueue *vq;
309
310 struct napi_struct napi;
311
312 struct bpf_prog __rcu *xdp_prog;
313
314 struct virtnet_rq_stats stats;
315
316 /* The number of rx notifications */
317 u16 calls;
318
319 /* Is dynamic interrupt moderation enabled? */
320 bool dim_enabled;
321
322 /* Used to protect dim_enabled and inter_coal */
323 struct mutex dim_lock;
324
325 /* Dynamic Interrupt Moderation */
326 struct dim dim;
327
328 u32 packets_in_napi;
329
330 struct virtnet_interrupt_coalesce intr_coal;
331
332 /* Chain pages by the private ptr. */
333 struct page *pages;
334
335 /* Average packet length for mergeable receive buffers. */
336 struct ewma_pkt_len mrg_avg_pkt_len;
337
338 /* Page frag for packet buffer allocation. */
339 struct page_frag alloc_frag;
340
341 /* RX: fragments + linear part + virtio header */
342 struct scatterlist sg[MAX_SKB_FRAGS + 2];
343
344 /* Min single buffer size for mergeable buffers case. */
345 unsigned int min_buf_len;
346
347 /* Name of this receive queue: input.$index */
348 char name[16];
349
350 struct xdp_rxq_info xdp_rxq;
351
352 /* Record the last dma info to free after new pages is allocated. */
353 struct virtnet_rq_dma *last_dma;
354
355 struct xsk_buff_pool *xsk_pool;
356
357 /* xdp rxq used by xsk */
358 struct xdp_rxq_info xsk_rxq_info;
359
360 struct xdp_buff **xsk_buffs;
361 };
362
363 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
364
365 /* Control VQ buffers: protected by the rtnl lock */
366 struct control_buf {
367 struct virtio_net_ctrl_hdr hdr;
368 virtio_net_ctrl_ack status;
369 };
370
371 struct virtnet_info {
372 struct virtio_device *vdev;
373 struct virtqueue *cvq;
374 struct net_device *dev;
375 struct send_queue *sq;
376 struct receive_queue *rq;
377 unsigned int status;
378
379 /* Max # of queue pairs supported by the device */
380 u16 max_queue_pairs;
381
382 /* # of queue pairs currently used by the driver */
383 u16 curr_queue_pairs;
384
385 /* # of XDP queue pairs currently used by the driver */
386 u16 xdp_queue_pairs;
387
388 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
389 bool xdp_enabled;
390
391 /* I like... big packets and I cannot lie! */
392 bool big_packets;
393
394 /* number of sg entries allocated for big packets */
395 unsigned int big_packets_num_skbfrags;
396
397 /* Host will merge rx buffers for big packets (shake it! shake it!) */
398 bool mergeable_rx_bufs;
399
400 /* Host supports rss and/or hash report */
401 bool has_rss;
402 bool has_rss_hash_report;
403 u8 rss_key_size;
404 u16 rss_indir_table_size;
405 u32 rss_hash_types_supported;
406 u32 rss_hash_types_saved;
407 struct virtio_net_rss_config_hdr *rss_hdr;
408 struct virtio_net_rss_config_trailer rss_trailer;
409 u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
410
411 /* Has control virtqueue */
412 bool has_cvq;
413
414 /* Lock to protect the control VQ */
415 struct mutex cvq_lock;
416
417 /* Host can handle any s/g split between our header and packet data */
418 bool any_header_sg;
419
420 /* Packet virtio header size */
421 u8 hdr_len;
422
423 /* Work struct for delayed refilling if we run low on memory. */
424 struct delayed_work refill;
425
426 /* Is delayed refill enabled? */
427 bool refill_enabled;
428
429 /* The lock to synchronize the access to refill_enabled */
430 spinlock_t refill_lock;
431
432 /* Work struct for config space updates */
433 struct work_struct config_work;
434
435 /* Work struct for setting rx mode */
436 struct work_struct rx_mode_work;
437
438 /* OK to queue work setting RX mode? */
439 bool rx_mode_work_enabled;
440
441 /* Does the affinity hint is set for virtqueues? */
442 bool affinity_hint_set;
443
444 /* CPU hotplug instances for online & dead */
445 struct hlist_node node;
446 struct hlist_node node_dead;
447
448 struct control_buf *ctrl;
449
450 /* Ethtool settings */
451 u8 duplex;
452 u32 speed;
453
454 /* Is rx dynamic interrupt moderation enabled? */
455 bool rx_dim_enabled;
456
457 /* Interrupt coalescing settings */
458 struct virtnet_interrupt_coalesce intr_coal_tx;
459 struct virtnet_interrupt_coalesce intr_coal_rx;
460
461 unsigned long guest_offloads;
462 unsigned long guest_offloads_capable;
463
464 /* failover when STANDBY feature enabled */
465 struct failover *failover;
466
467 u64 device_stats_cap;
468 };
469
470 struct padded_vnet_hdr {
471 struct virtio_net_hdr_v1_hash hdr;
472 /*
473 * hdr is in a separate sg buffer, and data sg buffer shares same page
474 * with this header sg. This padding makes next sg 16 byte aligned
475 * after the header.
476 */
477 char padding[12];
478 };
479
480 struct virtio_net_common_hdr {
481 union {
482 struct virtio_net_hdr hdr;
483 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
484 struct virtio_net_hdr_v1_hash hash_v1_hdr;
485 };
486 };
487
488 static struct virtio_net_common_hdr xsk_hdr;
489
490 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
491 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
492 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
493 struct net_device *dev,
494 unsigned int *xdp_xmit,
495 struct virtnet_rq_stats *stats);
496 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
497 struct sk_buff *skb, u8 flags);
498 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
499 struct sk_buff *curr_skb,
500 struct page *page, void *buf,
501 int len, int truesize);
502 static void virtnet_xsk_completed(struct send_queue *sq, int num);
503
504 enum virtnet_xmit_type {
505 VIRTNET_XMIT_TYPE_SKB,
506 VIRTNET_XMIT_TYPE_SKB_ORPHAN,
507 VIRTNET_XMIT_TYPE_XDP,
508 VIRTNET_XMIT_TYPE_XSK,
509 };
510
virtnet_rss_hdr_size(const struct virtnet_info * vi)511 static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
512 {
513 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
514
515 return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
516 }
517
virtnet_rss_trailer_size(const struct virtnet_info * vi)518 static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
519 {
520 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
521 }
522
523 /* We use the last two bits of the pointer to distinguish the xmit type. */
524 #define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
525
526 #define VIRTIO_XSK_FLAG_OFFSET 2
527
virtnet_xmit_ptr_unpack(void ** ptr)528 static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
529 {
530 unsigned long p = (unsigned long)*ptr;
531
532 *ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
533
534 return p & VIRTNET_XMIT_TYPE_MASK;
535 }
536
virtnet_xmit_ptr_pack(void * ptr,enum virtnet_xmit_type type)537 static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
538 {
539 return (void *)((unsigned long)ptr | type);
540 }
541
virtnet_add_outbuf(struct send_queue * sq,int num,void * data,enum virtnet_xmit_type type)542 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
543 enum virtnet_xmit_type type)
544 {
545 return virtqueue_add_outbuf(sq->vq, sq->sg, num,
546 virtnet_xmit_ptr_pack(data, type),
547 GFP_ATOMIC);
548 }
549
virtnet_ptr_to_xsk_buff_len(void * ptr)550 static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
551 {
552 return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
553 }
554
sg_fill_dma(struct scatterlist * sg,dma_addr_t addr,u32 len)555 static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
556 {
557 sg_dma_address(sg) = addr;
558 sg_dma_len(sg) = len;
559 }
560
__free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi,struct virtnet_sq_free_stats * stats)561 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
562 bool in_napi, struct virtnet_sq_free_stats *stats)
563 {
564 struct xdp_frame *frame;
565 struct sk_buff *skb;
566 unsigned int len;
567 void *ptr;
568
569 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
570 switch (virtnet_xmit_ptr_unpack(&ptr)) {
571 case VIRTNET_XMIT_TYPE_SKB:
572 skb = ptr;
573
574 pr_debug("Sent skb %p\n", skb);
575 stats->napi_packets++;
576 stats->napi_bytes += skb->len;
577 napi_consume_skb(skb, in_napi);
578 break;
579
580 case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
581 skb = ptr;
582
583 stats->packets++;
584 stats->bytes += skb->len;
585 napi_consume_skb(skb, in_napi);
586 break;
587
588 case VIRTNET_XMIT_TYPE_XDP:
589 frame = ptr;
590
591 stats->packets++;
592 stats->bytes += xdp_get_frame_len(frame);
593 xdp_return_frame(frame);
594 break;
595
596 case VIRTNET_XMIT_TYPE_XSK:
597 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
598 stats->xsk++;
599 break;
600 }
601 }
602 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
603 }
604
virtnet_free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi,struct virtnet_sq_free_stats * stats)605 static void virtnet_free_old_xmit(struct send_queue *sq,
606 struct netdev_queue *txq,
607 bool in_napi,
608 struct virtnet_sq_free_stats *stats)
609 {
610 __free_old_xmit(sq, txq, in_napi, stats);
611
612 if (stats->xsk)
613 virtnet_xsk_completed(sq, stats->xsk);
614 }
615
616 /* Converting between virtqueue no. and kernel tx/rx queue no.
617 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
618 */
vq2txq(struct virtqueue * vq)619 static int vq2txq(struct virtqueue *vq)
620 {
621 return (vq->index - 1) / 2;
622 }
623
txq2vq(int txq)624 static int txq2vq(int txq)
625 {
626 return txq * 2 + 1;
627 }
628
vq2rxq(struct virtqueue * vq)629 static int vq2rxq(struct virtqueue *vq)
630 {
631 return vq->index / 2;
632 }
633
rxq2vq(int rxq)634 static int rxq2vq(int rxq)
635 {
636 return rxq * 2;
637 }
638
vq_type(struct virtnet_info * vi,int qid)639 static int vq_type(struct virtnet_info *vi, int qid)
640 {
641 if (qid == vi->max_queue_pairs * 2)
642 return VIRTNET_Q_TYPE_CQ;
643
644 if (qid % 2)
645 return VIRTNET_Q_TYPE_TX;
646
647 return VIRTNET_Q_TYPE_RX;
648 }
649
650 static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff * skb)651 skb_vnet_common_hdr(struct sk_buff *skb)
652 {
653 return (struct virtio_net_common_hdr *)skb->cb;
654 }
655
656 /*
657 * private is used to chain pages for big packets, put the whole
658 * most recent used list in the beginning for reuse
659 */
give_pages(struct receive_queue * rq,struct page * page)660 static void give_pages(struct receive_queue *rq, struct page *page)
661 {
662 struct page *end;
663
664 /* Find end of list, sew whole thing into vi->rq.pages. */
665 for (end = page; end->private; end = (struct page *)end->private);
666 end->private = (unsigned long)rq->pages;
667 rq->pages = page;
668 }
669
get_a_page(struct receive_queue * rq,gfp_t gfp_mask)670 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
671 {
672 struct page *p = rq->pages;
673
674 if (p) {
675 rq->pages = (struct page *)p->private;
676 /* clear private here, it is used to chain pages */
677 p->private = 0;
678 } else
679 p = alloc_page(gfp_mask);
680 return p;
681 }
682
virtnet_rq_free_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf)683 static void virtnet_rq_free_buf(struct virtnet_info *vi,
684 struct receive_queue *rq, void *buf)
685 {
686 if (vi->mergeable_rx_bufs)
687 put_page(virt_to_head_page(buf));
688 else if (vi->big_packets)
689 give_pages(rq, buf);
690 else
691 put_page(virt_to_head_page(buf));
692 }
693
enable_delayed_refill(struct virtnet_info * vi)694 static void enable_delayed_refill(struct virtnet_info *vi)
695 {
696 spin_lock_bh(&vi->refill_lock);
697 vi->refill_enabled = true;
698 spin_unlock_bh(&vi->refill_lock);
699 }
700
disable_delayed_refill(struct virtnet_info * vi)701 static void disable_delayed_refill(struct virtnet_info *vi)
702 {
703 spin_lock_bh(&vi->refill_lock);
704 vi->refill_enabled = false;
705 spin_unlock_bh(&vi->refill_lock);
706 }
707
enable_rx_mode_work(struct virtnet_info * vi)708 static void enable_rx_mode_work(struct virtnet_info *vi)
709 {
710 rtnl_lock();
711 vi->rx_mode_work_enabled = true;
712 rtnl_unlock();
713 }
714
disable_rx_mode_work(struct virtnet_info * vi)715 static void disable_rx_mode_work(struct virtnet_info *vi)
716 {
717 rtnl_lock();
718 vi->rx_mode_work_enabled = false;
719 rtnl_unlock();
720 }
721
virtqueue_napi_schedule(struct napi_struct * napi,struct virtqueue * vq)722 static void virtqueue_napi_schedule(struct napi_struct *napi,
723 struct virtqueue *vq)
724 {
725 if (napi_schedule_prep(napi)) {
726 virtqueue_disable_cb(vq);
727 __napi_schedule(napi);
728 }
729 }
730
virtqueue_napi_complete(struct napi_struct * napi,struct virtqueue * vq,int processed)731 static bool virtqueue_napi_complete(struct napi_struct *napi,
732 struct virtqueue *vq, int processed)
733 {
734 int opaque;
735
736 opaque = virtqueue_enable_cb_prepare(vq);
737 if (napi_complete_done(napi, processed)) {
738 if (unlikely(virtqueue_poll(vq, opaque)))
739 virtqueue_napi_schedule(napi, vq);
740 else
741 return true;
742 } else {
743 virtqueue_disable_cb(vq);
744 }
745
746 return false;
747 }
748
skb_xmit_done(struct virtqueue * vq)749 static void skb_xmit_done(struct virtqueue *vq)
750 {
751 struct virtnet_info *vi = vq->vdev->priv;
752 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
753
754 /* Suppress further interrupts. */
755 virtqueue_disable_cb(vq);
756
757 if (napi->weight)
758 virtqueue_napi_schedule(napi, vq);
759 else
760 /* We were probably waiting for more output buffers. */
761 netif_wake_subqueue(vi->dev, vq2txq(vq));
762 }
763
764 #define MRG_CTX_HEADER_SHIFT 22
mergeable_len_to_ctx(unsigned int truesize,unsigned int headroom)765 static void *mergeable_len_to_ctx(unsigned int truesize,
766 unsigned int headroom)
767 {
768 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
769 }
770
mergeable_ctx_to_headroom(void * mrg_ctx)771 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
772 {
773 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
774 }
775
mergeable_ctx_to_truesize(void * mrg_ctx)776 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
777 {
778 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
779 }
780
virtnet_build_skb(void * buf,unsigned int buflen,unsigned int headroom,unsigned int len)781 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
782 unsigned int headroom,
783 unsigned int len)
784 {
785 struct sk_buff *skb;
786
787 skb = build_skb(buf, buflen);
788 if (unlikely(!skb))
789 return NULL;
790
791 skb_reserve(skb, headroom);
792 skb_put(skb, len);
793
794 return skb;
795 }
796
797 /* Called from bottom half context */
page_to_skb(struct virtnet_info * vi,struct receive_queue * rq,struct page * page,unsigned int offset,unsigned int len,unsigned int truesize,unsigned int headroom)798 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
799 struct receive_queue *rq,
800 struct page *page, unsigned int offset,
801 unsigned int len, unsigned int truesize,
802 unsigned int headroom)
803 {
804 struct sk_buff *skb;
805 struct virtio_net_common_hdr *hdr;
806 unsigned int copy, hdr_len, hdr_padded_len;
807 struct page *page_to_free = NULL;
808 int tailroom, shinfo_size;
809 char *p, *hdr_p, *buf;
810
811 p = page_address(page) + offset;
812 hdr_p = p;
813
814 hdr_len = vi->hdr_len;
815 if (vi->mergeable_rx_bufs)
816 hdr_padded_len = hdr_len;
817 else
818 hdr_padded_len = sizeof(struct padded_vnet_hdr);
819
820 buf = p - headroom;
821 len -= hdr_len;
822 offset += hdr_padded_len;
823 p += hdr_padded_len;
824 tailroom = truesize - headroom - hdr_padded_len - len;
825
826 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
827
828 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
829 skb = virtnet_build_skb(buf, truesize, p - buf, len);
830 if (unlikely(!skb))
831 return NULL;
832
833 page = (struct page *)page->private;
834 if (page)
835 give_pages(rq, page);
836 goto ok;
837 }
838
839 /* copy small packet so we can reuse these pages for small data */
840 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
841 if (unlikely(!skb))
842 return NULL;
843
844 /* Copy all frame if it fits skb->head, otherwise
845 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
846 */
847 if (len <= skb_tailroom(skb))
848 copy = len;
849 else
850 copy = ETH_HLEN;
851 skb_put_data(skb, p, copy);
852
853 len -= copy;
854 offset += copy;
855
856 if (vi->mergeable_rx_bufs) {
857 if (len)
858 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
859 else
860 page_to_free = page;
861 goto ok;
862 }
863
864 /*
865 * Verify that we can indeed put this data into a skb.
866 * This is here to handle cases when the device erroneously
867 * tries to receive more than is possible. This is usually
868 * the case of a broken device.
869 */
870 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
871 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
872 dev_kfree_skb(skb);
873 return NULL;
874 }
875 BUG_ON(offset >= PAGE_SIZE);
876 while (len) {
877 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
878 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
879 frag_size, truesize);
880 len -= frag_size;
881 page = (struct page *)page->private;
882 offset = 0;
883 }
884
885 if (page)
886 give_pages(rq, page);
887
888 ok:
889 hdr = skb_vnet_common_hdr(skb);
890 memcpy(hdr, hdr_p, hdr_len);
891 if (page_to_free)
892 put_page(page_to_free);
893
894 return skb;
895 }
896
virtnet_rq_unmap(struct receive_queue * rq,void * buf,u32 len)897 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
898 {
899 struct virtnet_info *vi = rq->vq->vdev->priv;
900 struct page *page = virt_to_head_page(buf);
901 struct virtnet_rq_dma *dma;
902 void *head;
903 int offset;
904
905 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
906
907 head = page_address(page);
908
909 dma = head;
910
911 --dma->ref;
912
913 if (dma->need_sync && len) {
914 offset = buf - (head + sizeof(*dma));
915
916 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
917 offset, len,
918 DMA_FROM_DEVICE);
919 }
920
921 if (dma->ref)
922 return;
923
924 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
925 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
926 put_page(page);
927 }
928
virtnet_rq_get_buf(struct receive_queue * rq,u32 * len,void ** ctx)929 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
930 {
931 struct virtnet_info *vi = rq->vq->vdev->priv;
932 void *buf;
933
934 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
935
936 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
937 if (buf)
938 virtnet_rq_unmap(rq, buf, *len);
939
940 return buf;
941 }
942
virtnet_rq_init_one_sg(struct receive_queue * rq,void * buf,u32 len)943 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
944 {
945 struct virtnet_info *vi = rq->vq->vdev->priv;
946 struct virtnet_rq_dma *dma;
947 dma_addr_t addr;
948 u32 offset;
949 void *head;
950
951 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
952
953 head = page_address(rq->alloc_frag.page);
954
955 offset = buf - head;
956
957 dma = head;
958
959 addr = dma->addr - sizeof(*dma) + offset;
960
961 sg_init_table(rq->sg, 1);
962 sg_fill_dma(rq->sg, addr, len);
963 }
964
virtnet_rq_alloc(struct receive_queue * rq,u32 size,gfp_t gfp)965 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
966 {
967 struct page_frag *alloc_frag = &rq->alloc_frag;
968 struct virtnet_info *vi = rq->vq->vdev->priv;
969 struct virtnet_rq_dma *dma;
970 void *buf, *head;
971 dma_addr_t addr;
972
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
974
975 head = page_address(alloc_frag->page);
976
977 dma = head;
978
979 /* new pages */
980 if (!alloc_frag->offset) {
981 if (rq->last_dma) {
982 /* Now, the new page is allocated, the last dma
983 * will not be used. So the dma can be unmapped
984 * if the ref is 0.
985 */
986 virtnet_rq_unmap(rq, rq->last_dma, 0);
987 rq->last_dma = NULL;
988 }
989
990 dma->len = alloc_frag->size - sizeof(*dma);
991
992 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
993 dma->len, DMA_FROM_DEVICE, 0);
994 if (virtqueue_dma_mapping_error(rq->vq, addr))
995 return NULL;
996
997 dma->addr = addr;
998 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
999
1000 /* Add a reference to dma to prevent the entire dma from
1001 * being released during error handling. This reference
1002 * will be freed after the pages are no longer used.
1003 */
1004 get_page(alloc_frag->page);
1005 dma->ref = 1;
1006 alloc_frag->offset = sizeof(*dma);
1007
1008 rq->last_dma = dma;
1009 }
1010
1011 ++dma->ref;
1012
1013 buf = head + alloc_frag->offset;
1014
1015 get_page(alloc_frag->page);
1016 alloc_frag->offset += size;
1017
1018 return buf;
1019 }
1020
virtnet_rq_unmap_free_buf(struct virtqueue * vq,void * buf)1021 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
1022 {
1023 struct virtnet_info *vi = vq->vdev->priv;
1024 struct receive_queue *rq;
1025 int i = vq2rxq(vq);
1026
1027 rq = &vi->rq[i];
1028
1029 if (rq->xsk_pool) {
1030 xsk_buff_free((struct xdp_buff *)buf);
1031 return;
1032 }
1033
1034 if (!vi->big_packets || vi->mergeable_rx_bufs)
1035 virtnet_rq_unmap(rq, buf, 0);
1036
1037 virtnet_rq_free_buf(vi, rq, buf);
1038 }
1039
free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi)1040 static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
1041 bool in_napi)
1042 {
1043 struct virtnet_sq_free_stats stats = {0};
1044
1045 virtnet_free_old_xmit(sq, txq, in_napi, &stats);
1046
1047 /* Avoid overhead when no packets have been processed
1048 * happens when called speculatively from start_xmit.
1049 */
1050 if (!stats.packets && !stats.napi_packets)
1051 return;
1052
1053 u64_stats_update_begin(&sq->stats.syncp);
1054 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
1055 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
1056 u64_stats_update_end(&sq->stats.syncp);
1057 }
1058
is_xdp_raw_buffer_queue(struct virtnet_info * vi,int q)1059 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1060 {
1061 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1062 return false;
1063 else if (q < vi->curr_queue_pairs)
1064 return true;
1065 else
1066 return false;
1067 }
1068
tx_may_stop(struct virtnet_info * vi,struct net_device * dev,struct send_queue * sq)1069 static bool tx_may_stop(struct virtnet_info *vi,
1070 struct net_device *dev,
1071 struct send_queue *sq)
1072 {
1073 int qnum;
1074
1075 qnum = sq - vi->sq;
1076
1077 /* If running out of space, stop queue to avoid getting packets that we
1078 * are then unable to transmit.
1079 * An alternative would be to force queuing layer to requeue the skb by
1080 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1081 * returned in a normal path of operation: it means that driver is not
1082 * maintaining the TX queue stop/start state properly, and causes
1083 * the stack to do a non-trivial amount of useless work.
1084 * Since most packets only take 1 or 2 ring slots, stopping the queue
1085 * early means 16 slots are typically wasted.
1086 */
1087 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1088 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1089
1090 netif_tx_stop_queue(txq);
1091 u64_stats_update_begin(&sq->stats.syncp);
1092 u64_stats_inc(&sq->stats.stop);
1093 u64_stats_update_end(&sq->stats.syncp);
1094
1095 return true;
1096 }
1097
1098 return false;
1099 }
1100
check_sq_full_and_disable(struct virtnet_info * vi,struct net_device * dev,struct send_queue * sq)1101 static void check_sq_full_and_disable(struct virtnet_info *vi,
1102 struct net_device *dev,
1103 struct send_queue *sq)
1104 {
1105 bool use_napi = sq->napi.weight;
1106 int qnum;
1107
1108 qnum = sq - vi->sq;
1109
1110 if (tx_may_stop(vi, dev, sq)) {
1111 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1112
1113 if (use_napi) {
1114 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1115 virtqueue_napi_schedule(&sq->napi, sq->vq);
1116 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1117 /* More just got used, free them then recheck. */
1118 free_old_xmit(sq, txq, false);
1119 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1120 netif_start_subqueue(dev, qnum);
1121 u64_stats_update_begin(&sq->stats.syncp);
1122 u64_stats_inc(&sq->stats.wake);
1123 u64_stats_update_end(&sq->stats.syncp);
1124 virtqueue_disable_cb(sq->vq);
1125 }
1126 }
1127 }
1128 }
1129
buf_to_xdp(struct virtnet_info * vi,struct receive_queue * rq,void * buf,u32 len)1130 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
1131 struct receive_queue *rq, void *buf, u32 len)
1132 {
1133 struct xdp_buff *xdp;
1134 u32 bufsize;
1135
1136 xdp = (struct xdp_buff *)buf;
1137
1138 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
1139
1140 if (unlikely(len > bufsize)) {
1141 pr_debug("%s: rx error: len %u exceeds truesize %u\n",
1142 vi->dev->name, len, bufsize);
1143 DEV_STATS_INC(vi->dev, rx_length_errors);
1144 xsk_buff_free(xdp);
1145 return NULL;
1146 }
1147
1148 xsk_buff_set_size(xdp, len);
1149 xsk_buff_dma_sync_for_cpu(xdp);
1150
1151 return xdp;
1152 }
1153
xsk_construct_skb(struct receive_queue * rq,struct xdp_buff * xdp)1154 static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
1155 struct xdp_buff *xdp)
1156 {
1157 unsigned int metasize = xdp->data - xdp->data_meta;
1158 struct sk_buff *skb;
1159 unsigned int size;
1160
1161 size = xdp->data_end - xdp->data_hard_start;
1162 skb = napi_alloc_skb(&rq->napi, size);
1163 if (unlikely(!skb)) {
1164 xsk_buff_free(xdp);
1165 return NULL;
1166 }
1167
1168 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
1169
1170 size = xdp->data_end - xdp->data_meta;
1171 memcpy(__skb_put(skb, size), xdp->data_meta, size);
1172
1173 if (metasize) {
1174 __skb_pull(skb, metasize);
1175 skb_metadata_set(skb, metasize);
1176 }
1177
1178 xsk_buff_free(xdp);
1179
1180 return skb;
1181 }
1182
virtnet_receive_xsk_small(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1183 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
1184 struct receive_queue *rq, struct xdp_buff *xdp,
1185 unsigned int *xdp_xmit,
1186 struct virtnet_rq_stats *stats)
1187 {
1188 struct bpf_prog *prog;
1189 u32 ret;
1190
1191 ret = XDP_PASS;
1192 rcu_read_lock();
1193 prog = rcu_dereference(rq->xdp_prog);
1194 if (prog)
1195 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1196 rcu_read_unlock();
1197
1198 switch (ret) {
1199 case XDP_PASS:
1200 return xsk_construct_skb(rq, xdp);
1201
1202 case XDP_TX:
1203 case XDP_REDIRECT:
1204 return NULL;
1205
1206 default:
1207 /* drop packet */
1208 xsk_buff_free(xdp);
1209 u64_stats_inc(&stats->drops);
1210 return NULL;
1211 }
1212 }
1213
xsk_drop_follow_bufs(struct net_device * dev,struct receive_queue * rq,u32 num_buf,struct virtnet_rq_stats * stats)1214 static void xsk_drop_follow_bufs(struct net_device *dev,
1215 struct receive_queue *rq,
1216 u32 num_buf,
1217 struct virtnet_rq_stats *stats)
1218 {
1219 struct xdp_buff *xdp;
1220 u32 len;
1221
1222 while (num_buf-- > 1) {
1223 xdp = virtqueue_get_buf(rq->vq, &len);
1224 if (unlikely(!xdp)) {
1225 pr_debug("%s: rx error: %d buffers missing\n",
1226 dev->name, num_buf);
1227 DEV_STATS_INC(dev, rx_length_errors);
1228 break;
1229 }
1230 u64_stats_add(&stats->bytes, len);
1231 xsk_buff_free(xdp);
1232 }
1233 }
1234
xsk_append_merge_buffer(struct virtnet_info * vi,struct receive_queue * rq,struct sk_buff * head_skb,u32 num_buf,struct virtio_net_hdr_mrg_rxbuf * hdr,struct virtnet_rq_stats * stats)1235 static int xsk_append_merge_buffer(struct virtnet_info *vi,
1236 struct receive_queue *rq,
1237 struct sk_buff *head_skb,
1238 u32 num_buf,
1239 struct virtio_net_hdr_mrg_rxbuf *hdr,
1240 struct virtnet_rq_stats *stats)
1241 {
1242 struct sk_buff *curr_skb;
1243 struct xdp_buff *xdp;
1244 u32 len, truesize;
1245 struct page *page;
1246 void *buf;
1247
1248 curr_skb = head_skb;
1249
1250 while (--num_buf) {
1251 buf = virtqueue_get_buf(rq->vq, &len);
1252 if (unlikely(!buf)) {
1253 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1254 vi->dev->name, num_buf,
1255 virtio16_to_cpu(vi->vdev,
1256 hdr->num_buffers));
1257 DEV_STATS_INC(vi->dev, rx_length_errors);
1258 return -EINVAL;
1259 }
1260
1261 u64_stats_add(&stats->bytes, len);
1262
1263 xdp = buf_to_xdp(vi, rq, buf, len);
1264 if (!xdp)
1265 goto err;
1266
1267 buf = napi_alloc_frag(len);
1268 if (!buf) {
1269 xsk_buff_free(xdp);
1270 goto err;
1271 }
1272
1273 memcpy(buf, xdp->data - vi->hdr_len, len);
1274
1275 xsk_buff_free(xdp);
1276
1277 page = virt_to_page(buf);
1278
1279 truesize = len;
1280
1281 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
1282 buf, len, truesize);
1283 if (!curr_skb) {
1284 put_page(page);
1285 goto err;
1286 }
1287 }
1288
1289 return 0;
1290
1291 err:
1292 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
1293 return -EINVAL;
1294 }
1295
virtnet_receive_xsk_merge(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1296 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
1297 struct receive_queue *rq, struct xdp_buff *xdp,
1298 unsigned int *xdp_xmit,
1299 struct virtnet_rq_stats *stats)
1300 {
1301 struct virtio_net_hdr_mrg_rxbuf *hdr;
1302 struct bpf_prog *prog;
1303 struct sk_buff *skb;
1304 u32 ret, num_buf;
1305
1306 hdr = xdp->data - vi->hdr_len;
1307 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1308
1309 ret = XDP_PASS;
1310 rcu_read_lock();
1311 prog = rcu_dereference(rq->xdp_prog);
1312 /* TODO: support multi buffer. */
1313 if (prog && num_buf == 1)
1314 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1315 rcu_read_unlock();
1316
1317 switch (ret) {
1318 case XDP_PASS:
1319 skb = xsk_construct_skb(rq, xdp);
1320 if (!skb)
1321 goto drop_bufs;
1322
1323 if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
1324 dev_kfree_skb(skb);
1325 goto drop;
1326 }
1327
1328 return skb;
1329
1330 case XDP_TX:
1331 case XDP_REDIRECT:
1332 return NULL;
1333
1334 default:
1335 /* drop packet */
1336 xsk_buff_free(xdp);
1337 }
1338
1339 drop_bufs:
1340 xsk_drop_follow_bufs(dev, rq, num_buf, stats);
1341
1342 drop:
1343 u64_stats_inc(&stats->drops);
1344 return NULL;
1345 }
1346
virtnet_receive_xsk_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,u32 len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1347 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
1348 void *buf, u32 len,
1349 unsigned int *xdp_xmit,
1350 struct virtnet_rq_stats *stats)
1351 {
1352 struct net_device *dev = vi->dev;
1353 struct sk_buff *skb = NULL;
1354 struct xdp_buff *xdp;
1355 u8 flags;
1356
1357 len -= vi->hdr_len;
1358
1359 u64_stats_add(&stats->bytes, len);
1360
1361 xdp = buf_to_xdp(vi, rq, buf, len);
1362 if (!xdp)
1363 return;
1364
1365 if (unlikely(len < ETH_HLEN)) {
1366 pr_debug("%s: short packet %i\n", dev->name, len);
1367 DEV_STATS_INC(dev, rx_length_errors);
1368 xsk_buff_free(xdp);
1369 return;
1370 }
1371
1372 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
1373
1374 if (!vi->mergeable_rx_bufs)
1375 skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
1376 else
1377 skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
1378
1379 if (skb)
1380 virtnet_receive_done(vi, rq, skb, flags);
1381 }
1382
virtnet_add_recvbuf_xsk(struct virtnet_info * vi,struct receive_queue * rq,struct xsk_buff_pool * pool,gfp_t gfp)1383 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
1384 struct xsk_buff_pool *pool, gfp_t gfp)
1385 {
1386 struct xdp_buff **xsk_buffs;
1387 dma_addr_t addr;
1388 int err = 0;
1389 u32 len, i;
1390 int num;
1391
1392 xsk_buffs = rq->xsk_buffs;
1393
1394 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
1395 if (!num)
1396 return -ENOMEM;
1397
1398 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
1399
1400 for (i = 0; i < num; ++i) {
1401 /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
1402 * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
1403 * (see function virtnet_xsk_pool_enable)
1404 */
1405 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
1406
1407 sg_init_table(rq->sg, 1);
1408 sg_fill_dma(rq->sg, addr, len);
1409
1410 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
1411 xsk_buffs[i], NULL, gfp);
1412 if (err)
1413 goto err;
1414 }
1415
1416 return num;
1417
1418 err:
1419 for (; i < num; ++i)
1420 xsk_buff_free(xsk_buffs[i]);
1421
1422 return err;
1423 }
1424
virtnet_xsk_to_ptr(u32 len)1425 static void *virtnet_xsk_to_ptr(u32 len)
1426 {
1427 unsigned long p;
1428
1429 p = len << VIRTIO_XSK_FLAG_OFFSET;
1430
1431 return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
1432 }
1433
virtnet_xsk_xmit_one(struct send_queue * sq,struct xsk_buff_pool * pool,struct xdp_desc * desc)1434 static int virtnet_xsk_xmit_one(struct send_queue *sq,
1435 struct xsk_buff_pool *pool,
1436 struct xdp_desc *desc)
1437 {
1438 struct virtnet_info *vi;
1439 dma_addr_t addr;
1440
1441 vi = sq->vq->vdev->priv;
1442
1443 addr = xsk_buff_raw_get_dma(pool, desc->addr);
1444 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
1445
1446 sg_init_table(sq->sg, 2);
1447 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
1448 sg_fill_dma(sq->sg + 1, addr, desc->len);
1449
1450 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
1451 virtnet_xsk_to_ptr(desc->len),
1452 GFP_ATOMIC);
1453 }
1454
virtnet_xsk_xmit_batch(struct send_queue * sq,struct xsk_buff_pool * pool,unsigned int budget,u64 * kicks)1455 static int virtnet_xsk_xmit_batch(struct send_queue *sq,
1456 struct xsk_buff_pool *pool,
1457 unsigned int budget,
1458 u64 *kicks)
1459 {
1460 struct xdp_desc *descs = pool->tx_descs;
1461 bool kick = false;
1462 u32 nb_pkts, i;
1463 int err;
1464
1465 budget = min_t(u32, budget, sq->vq->num_free);
1466
1467 nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
1468 if (!nb_pkts)
1469 return 0;
1470
1471 for (i = 0; i < nb_pkts; i++) {
1472 err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
1473 if (unlikely(err)) {
1474 xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
1475 break;
1476 }
1477
1478 kick = true;
1479 }
1480
1481 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1482 (*kicks)++;
1483
1484 return i;
1485 }
1486
virtnet_xsk_xmit(struct send_queue * sq,struct xsk_buff_pool * pool,int budget)1487 static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
1488 int budget)
1489 {
1490 struct virtnet_info *vi = sq->vq->vdev->priv;
1491 struct virtnet_sq_free_stats stats = {};
1492 struct net_device *dev = vi->dev;
1493 u64 kicks = 0;
1494 int sent;
1495
1496 /* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
1497 * free_old_xmit().
1498 */
1499 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
1500
1501 if (stats.xsk)
1502 xsk_tx_completed(sq->xsk_pool, stats.xsk);
1503
1504 sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
1505
1506 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1507 check_sq_full_and_disable(vi, vi->dev, sq);
1508
1509 if (sent) {
1510 struct netdev_queue *txq;
1511
1512 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
1513 txq_trans_cond_update(txq);
1514 }
1515
1516 u64_stats_update_begin(&sq->stats.syncp);
1517 u64_stats_add(&sq->stats.packets, stats.packets);
1518 u64_stats_add(&sq->stats.bytes, stats.bytes);
1519 u64_stats_add(&sq->stats.kicks, kicks);
1520 u64_stats_add(&sq->stats.xdp_tx, sent);
1521 u64_stats_update_end(&sq->stats.syncp);
1522
1523 if (xsk_uses_need_wakeup(pool))
1524 xsk_set_tx_need_wakeup(pool);
1525
1526 return sent;
1527 }
1528
xsk_wakeup(struct send_queue * sq)1529 static void xsk_wakeup(struct send_queue *sq)
1530 {
1531 if (napi_if_scheduled_mark_missed(&sq->napi))
1532 return;
1533
1534 local_bh_disable();
1535 virtqueue_napi_schedule(&sq->napi, sq->vq);
1536 local_bh_enable();
1537 }
1538
virtnet_xsk_wakeup(struct net_device * dev,u32 qid,u32 flag)1539 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
1540 {
1541 struct virtnet_info *vi = netdev_priv(dev);
1542 struct send_queue *sq;
1543
1544 if (!netif_running(dev))
1545 return -ENETDOWN;
1546
1547 if (qid >= vi->curr_queue_pairs)
1548 return -EINVAL;
1549
1550 sq = &vi->sq[qid];
1551
1552 xsk_wakeup(sq);
1553 return 0;
1554 }
1555
virtnet_xsk_completed(struct send_queue * sq,int num)1556 static void virtnet_xsk_completed(struct send_queue *sq, int num)
1557 {
1558 xsk_tx_completed(sq->xsk_pool, num);
1559
1560 /* If this is called by rx poll, start_xmit and xdp xmit we should
1561 * wakeup the tx napi to consume the xsk tx queue, because the tx
1562 * interrupt may not be triggered.
1563 */
1564 xsk_wakeup(sq);
1565 }
1566
__virtnet_xdp_xmit_one(struct virtnet_info * vi,struct send_queue * sq,struct xdp_frame * xdpf)1567 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1568 struct send_queue *sq,
1569 struct xdp_frame *xdpf)
1570 {
1571 struct virtio_net_hdr_mrg_rxbuf *hdr;
1572 struct skb_shared_info *shinfo;
1573 u8 nr_frags = 0;
1574 int err, i;
1575
1576 if (unlikely(xdpf->headroom < vi->hdr_len))
1577 return -EOVERFLOW;
1578
1579 if (unlikely(xdp_frame_has_frags(xdpf))) {
1580 shinfo = xdp_get_shared_info_from_frame(xdpf);
1581 nr_frags = shinfo->nr_frags;
1582 }
1583
1584 /* In wrapping function virtnet_xdp_xmit(), we need to free
1585 * up the pending old buffers, where we need to calculate the
1586 * position of skb_shared_info in xdp_get_frame_len() and
1587 * xdp_return_frame(), which will involve to xdpf->data and
1588 * xdpf->headroom. Therefore, we need to update the value of
1589 * headroom synchronously here.
1590 */
1591 xdpf->headroom -= vi->hdr_len;
1592 xdpf->data -= vi->hdr_len;
1593 /* Zero header and leave csum up to XDP layers */
1594 hdr = xdpf->data;
1595 memset(hdr, 0, vi->hdr_len);
1596 xdpf->len += vi->hdr_len;
1597
1598 sg_init_table(sq->sg, nr_frags + 1);
1599 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1600 for (i = 0; i < nr_frags; i++) {
1601 skb_frag_t *frag = &shinfo->frags[i];
1602
1603 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1604 skb_frag_size(frag), skb_frag_off(frag));
1605 }
1606
1607 err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
1608 if (unlikely(err))
1609 return -ENOSPC; /* Caller handle free/refcnt */
1610
1611 return 0;
1612 }
1613
1614 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1615 * the current cpu, so it does not need to be locked.
1616 *
1617 * Here we use marco instead of inline functions because we have to deal with
1618 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1619 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1620 * functions to perfectly solve these three problems at the same time.
1621 */
1622 #define virtnet_xdp_get_sq(vi) ({ \
1623 int cpu = smp_processor_id(); \
1624 struct netdev_queue *txq; \
1625 typeof(vi) v = (vi); \
1626 unsigned int qp; \
1627 \
1628 if (v->curr_queue_pairs > nr_cpu_ids) { \
1629 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1630 qp += cpu; \
1631 txq = netdev_get_tx_queue(v->dev, qp); \
1632 __netif_tx_acquire(txq); \
1633 } else { \
1634 qp = cpu % v->curr_queue_pairs; \
1635 txq = netdev_get_tx_queue(v->dev, qp); \
1636 __netif_tx_lock(txq, cpu); \
1637 } \
1638 v->sq + qp; \
1639 })
1640
1641 #define virtnet_xdp_put_sq(vi, q) { \
1642 struct netdev_queue *txq; \
1643 typeof(vi) v = (vi); \
1644 \
1645 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1646 if (v->curr_queue_pairs > nr_cpu_ids) \
1647 __netif_tx_release(txq); \
1648 else \
1649 __netif_tx_unlock(txq); \
1650 }
1651
virtnet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1652 static int virtnet_xdp_xmit(struct net_device *dev,
1653 int n, struct xdp_frame **frames, u32 flags)
1654 {
1655 struct virtnet_info *vi = netdev_priv(dev);
1656 struct virtnet_sq_free_stats stats = {0};
1657 struct receive_queue *rq = vi->rq;
1658 struct bpf_prog *xdp_prog;
1659 struct send_queue *sq;
1660 int nxmit = 0;
1661 int kicks = 0;
1662 int ret;
1663 int i;
1664
1665 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
1666 * indicate XDP resources have been successfully allocated.
1667 */
1668 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1669 if (!xdp_prog)
1670 return -ENXIO;
1671
1672 sq = virtnet_xdp_get_sq(vi);
1673
1674 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1675 ret = -EINVAL;
1676 goto out;
1677 }
1678
1679 /* Free up any pending old buffers before queueing new ones. */
1680 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
1681 false, &stats);
1682
1683 for (i = 0; i < n; i++) {
1684 struct xdp_frame *xdpf = frames[i];
1685
1686 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1687 break;
1688 nxmit++;
1689 }
1690 ret = nxmit;
1691
1692 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1693 check_sq_full_and_disable(vi, dev, sq);
1694
1695 if (flags & XDP_XMIT_FLUSH) {
1696 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1697 kicks = 1;
1698 }
1699 out:
1700 u64_stats_update_begin(&sq->stats.syncp);
1701 u64_stats_add(&sq->stats.bytes, stats.bytes);
1702 u64_stats_add(&sq->stats.packets, stats.packets);
1703 u64_stats_add(&sq->stats.xdp_tx, n);
1704 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1705 u64_stats_add(&sq->stats.kicks, kicks);
1706 u64_stats_update_end(&sq->stats.syncp);
1707
1708 virtnet_xdp_put_sq(vi, sq);
1709 return ret;
1710 }
1711
put_xdp_frags(struct xdp_buff * xdp)1712 static void put_xdp_frags(struct xdp_buff *xdp)
1713 {
1714 struct skb_shared_info *shinfo;
1715 struct page *xdp_page;
1716 int i;
1717
1718 if (xdp_buff_has_frags(xdp)) {
1719 shinfo = xdp_get_shared_info_from_buff(xdp);
1720 for (i = 0; i < shinfo->nr_frags; i++) {
1721 xdp_page = skb_frag_page(&shinfo->frags[i]);
1722 put_page(xdp_page);
1723 }
1724 }
1725 }
1726
virtnet_xdp_handler(struct bpf_prog * xdp_prog,struct xdp_buff * xdp,struct net_device * dev,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1727 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1728 struct net_device *dev,
1729 unsigned int *xdp_xmit,
1730 struct virtnet_rq_stats *stats)
1731 {
1732 struct xdp_frame *xdpf;
1733 int err;
1734 u32 act;
1735
1736 act = bpf_prog_run_xdp(xdp_prog, xdp);
1737 u64_stats_inc(&stats->xdp_packets);
1738
1739 switch (act) {
1740 case XDP_PASS:
1741 return act;
1742
1743 case XDP_TX:
1744 u64_stats_inc(&stats->xdp_tx);
1745 xdpf = xdp_convert_buff_to_frame(xdp);
1746 if (unlikely(!xdpf)) {
1747 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1748 return XDP_DROP;
1749 }
1750
1751 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1752 if (unlikely(!err)) {
1753 xdp_return_frame_rx_napi(xdpf);
1754 } else if (unlikely(err < 0)) {
1755 trace_xdp_exception(dev, xdp_prog, act);
1756 return XDP_DROP;
1757 }
1758 *xdp_xmit |= VIRTIO_XDP_TX;
1759 return act;
1760
1761 case XDP_REDIRECT:
1762 u64_stats_inc(&stats->xdp_redirects);
1763 err = xdp_do_redirect(dev, xdp, xdp_prog);
1764 if (err)
1765 return XDP_DROP;
1766
1767 *xdp_xmit |= VIRTIO_XDP_REDIR;
1768 return act;
1769
1770 default:
1771 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1772 fallthrough;
1773 case XDP_ABORTED:
1774 trace_xdp_exception(dev, xdp_prog, act);
1775 fallthrough;
1776 case XDP_DROP:
1777 return XDP_DROP;
1778 }
1779 }
1780
virtnet_get_headroom(struct virtnet_info * vi)1781 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1782 {
1783 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
1784 }
1785
1786 /* We copy the packet for XDP in the following cases:
1787 *
1788 * 1) Packet is scattered across multiple rx buffers.
1789 * 2) Headroom space is insufficient.
1790 *
1791 * This is inefficient but it's a temporary condition that
1792 * we hit right after XDP is enabled and until queue is refilled
1793 * with large buffers with sufficient headroom - so it should affect
1794 * at most queue size packets.
1795 * Afterwards, the conditions to enable
1796 * XDP should preclude the underlying device from sending packets
1797 * across multiple buffers (num_buf > 1), and we make sure buffers
1798 * have enough headroom.
1799 */
xdp_linearize_page(struct receive_queue * rq,int * num_buf,struct page * p,int offset,int page_off,unsigned int * len)1800 static struct page *xdp_linearize_page(struct receive_queue *rq,
1801 int *num_buf,
1802 struct page *p,
1803 int offset,
1804 int page_off,
1805 unsigned int *len)
1806 {
1807 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1808 struct page *page;
1809
1810 if (page_off + *len + tailroom > PAGE_SIZE)
1811 return NULL;
1812
1813 page = alloc_page(GFP_ATOMIC);
1814 if (!page)
1815 return NULL;
1816
1817 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1818 page_off += *len;
1819
1820 while (--*num_buf) {
1821 unsigned int buflen;
1822 void *buf;
1823 int off;
1824
1825 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1826 if (unlikely(!buf))
1827 goto err_buf;
1828
1829 p = virt_to_head_page(buf);
1830 off = buf - page_address(p);
1831
1832 /* guard against a misconfigured or uncooperative backend that
1833 * is sending packet larger than the MTU.
1834 */
1835 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1836 put_page(p);
1837 goto err_buf;
1838 }
1839
1840 memcpy(page_address(page) + page_off,
1841 page_address(p) + off, buflen);
1842 page_off += buflen;
1843 put_page(p);
1844 }
1845
1846 /* Headroom does not contribute to packet length */
1847 *len = page_off - XDP_PACKET_HEADROOM;
1848 return page;
1849 err_buf:
1850 __free_pages(page, 0);
1851 return NULL;
1852 }
1853
receive_small_build_skb(struct virtnet_info * vi,unsigned int xdp_headroom,void * buf,unsigned int len)1854 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1855 unsigned int xdp_headroom,
1856 void *buf,
1857 unsigned int len)
1858 {
1859 unsigned int header_offset;
1860 unsigned int headroom;
1861 unsigned int buflen;
1862 struct sk_buff *skb;
1863
1864 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1865 headroom = vi->hdr_len + header_offset;
1866 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1867 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1868
1869 skb = virtnet_build_skb(buf, buflen, headroom, len);
1870 if (unlikely(!skb))
1871 return NULL;
1872
1873 buf += header_offset;
1874 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1875
1876 return skb;
1877 }
1878
receive_small_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,unsigned int xdp_headroom,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1879 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1880 struct virtnet_info *vi,
1881 struct receive_queue *rq,
1882 struct bpf_prog *xdp_prog,
1883 void *buf,
1884 unsigned int xdp_headroom,
1885 unsigned int len,
1886 unsigned int *xdp_xmit,
1887 struct virtnet_rq_stats *stats)
1888 {
1889 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1890 unsigned int headroom = vi->hdr_len + header_offset;
1891 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1892 struct page *page = virt_to_head_page(buf);
1893 struct page *xdp_page;
1894 unsigned int buflen;
1895 struct xdp_buff xdp;
1896 struct sk_buff *skb;
1897 unsigned int metasize = 0;
1898 u32 act;
1899
1900 if (unlikely(hdr->hdr.gso_type))
1901 goto err_xdp;
1902
1903 /* Partially checksummed packets must be dropped. */
1904 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1905 goto err_xdp;
1906
1907 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1908 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1909
1910 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1911 int offset = buf - page_address(page) + header_offset;
1912 unsigned int tlen = len + vi->hdr_len;
1913 int num_buf = 1;
1914
1915 xdp_headroom = virtnet_get_headroom(vi);
1916 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1917 headroom = vi->hdr_len + header_offset;
1918 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1919 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1920 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1921 offset, header_offset,
1922 &tlen);
1923 if (!xdp_page)
1924 goto err_xdp;
1925
1926 buf = page_address(xdp_page);
1927 put_page(page);
1928 page = xdp_page;
1929 }
1930
1931 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1932 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1933 xdp_headroom, len, true);
1934
1935 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1936
1937 switch (act) {
1938 case XDP_PASS:
1939 /* Recalculate length in case bpf program changed it */
1940 len = xdp.data_end - xdp.data;
1941 metasize = xdp.data - xdp.data_meta;
1942 break;
1943
1944 case XDP_TX:
1945 case XDP_REDIRECT:
1946 goto xdp_xmit;
1947
1948 default:
1949 goto err_xdp;
1950 }
1951
1952 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1953 if (unlikely(!skb))
1954 goto err;
1955
1956 if (metasize)
1957 skb_metadata_set(skb, metasize);
1958
1959 return skb;
1960
1961 err_xdp:
1962 u64_stats_inc(&stats->xdp_drops);
1963 err:
1964 u64_stats_inc(&stats->drops);
1965 put_page(page);
1966 xdp_xmit:
1967 return NULL;
1968 }
1969
receive_small(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1970 static struct sk_buff *receive_small(struct net_device *dev,
1971 struct virtnet_info *vi,
1972 struct receive_queue *rq,
1973 void *buf, void *ctx,
1974 unsigned int len,
1975 unsigned int *xdp_xmit,
1976 struct virtnet_rq_stats *stats)
1977 {
1978 unsigned int xdp_headroom = (unsigned long)ctx;
1979 struct page *page = virt_to_head_page(buf);
1980 struct sk_buff *skb;
1981
1982 /* We passed the address of virtnet header to virtio-core,
1983 * so truncate the padding.
1984 */
1985 buf -= VIRTNET_RX_PAD + xdp_headroom;
1986
1987 len -= vi->hdr_len;
1988 u64_stats_add(&stats->bytes, len);
1989
1990 if (unlikely(len > GOOD_PACKET_LEN)) {
1991 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1992 dev->name, len, GOOD_PACKET_LEN);
1993 DEV_STATS_INC(dev, rx_length_errors);
1994 goto err;
1995 }
1996
1997 if (unlikely(vi->xdp_enabled)) {
1998 struct bpf_prog *xdp_prog;
1999
2000 rcu_read_lock();
2001 xdp_prog = rcu_dereference(rq->xdp_prog);
2002 if (xdp_prog) {
2003 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
2004 xdp_headroom, len, xdp_xmit,
2005 stats);
2006 rcu_read_unlock();
2007 return skb;
2008 }
2009 rcu_read_unlock();
2010 }
2011
2012 skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
2013 if (likely(skb))
2014 return skb;
2015
2016 err:
2017 u64_stats_inc(&stats->drops);
2018 put_page(page);
2019 return NULL;
2020 }
2021
receive_big(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,struct virtnet_rq_stats * stats)2022 static struct sk_buff *receive_big(struct net_device *dev,
2023 struct virtnet_info *vi,
2024 struct receive_queue *rq,
2025 void *buf,
2026 unsigned int len,
2027 struct virtnet_rq_stats *stats)
2028 {
2029 struct page *page = buf;
2030 struct sk_buff *skb =
2031 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
2032
2033 u64_stats_add(&stats->bytes, len - vi->hdr_len);
2034 if (unlikely(!skb))
2035 goto err;
2036
2037 return skb;
2038
2039 err:
2040 u64_stats_inc(&stats->drops);
2041 give_pages(rq, page);
2042 return NULL;
2043 }
2044
mergeable_buf_free(struct receive_queue * rq,int num_buf,struct net_device * dev,struct virtnet_rq_stats * stats)2045 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
2046 struct net_device *dev,
2047 struct virtnet_rq_stats *stats)
2048 {
2049 struct page *page;
2050 void *buf;
2051 int len;
2052
2053 while (num_buf-- > 1) {
2054 buf = virtnet_rq_get_buf(rq, &len, NULL);
2055 if (unlikely(!buf)) {
2056 pr_debug("%s: rx error: %d buffers missing\n",
2057 dev->name, num_buf);
2058 DEV_STATS_INC(dev, rx_length_errors);
2059 break;
2060 }
2061 u64_stats_add(&stats->bytes, len);
2062 page = virt_to_head_page(buf);
2063 put_page(page);
2064 }
2065 }
2066
2067 /* Why not use xdp_build_skb_from_frame() ?
2068 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
2069 * virtio-net there are 2 points that do not match its requirements:
2070 * 1. The size of the prefilled buffer is not fixed before xdp is set.
2071 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
2072 * like eth_type_trans() (which virtio-net does in receive_buf()).
2073 */
build_skb_from_xdp_buff(struct net_device * dev,struct virtnet_info * vi,struct xdp_buff * xdp,unsigned int xdp_frags_truesz)2074 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
2075 struct virtnet_info *vi,
2076 struct xdp_buff *xdp,
2077 unsigned int xdp_frags_truesz)
2078 {
2079 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2080 unsigned int headroom, data_len;
2081 struct sk_buff *skb;
2082 int metasize;
2083 u8 nr_frags;
2084
2085 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
2086 pr_debug("Error building skb as missing reserved tailroom for xdp");
2087 return NULL;
2088 }
2089
2090 if (unlikely(xdp_buff_has_frags(xdp)))
2091 nr_frags = sinfo->nr_frags;
2092
2093 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
2094 if (unlikely(!skb))
2095 return NULL;
2096
2097 headroom = xdp->data - xdp->data_hard_start;
2098 data_len = xdp->data_end - xdp->data;
2099 skb_reserve(skb, headroom);
2100 __skb_put(skb, data_len);
2101
2102 metasize = xdp->data - xdp->data_meta;
2103 metasize = metasize > 0 ? metasize : 0;
2104 if (metasize)
2105 skb_metadata_set(skb, metasize);
2106
2107 if (unlikely(xdp_buff_has_frags(xdp)))
2108 xdp_update_skb_shared_info(skb, nr_frags,
2109 sinfo->xdp_frags_size,
2110 xdp_frags_truesz,
2111 xdp_buff_is_frag_pfmemalloc(xdp));
2112
2113 return skb;
2114 }
2115
2116 /* TODO: build xdp in big mode */
virtnet_build_xdp_buff_mrg(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,void * buf,unsigned int len,unsigned int frame_sz,int * num_buf,unsigned int * xdp_frags_truesize,struct virtnet_rq_stats * stats)2117 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
2118 struct virtnet_info *vi,
2119 struct receive_queue *rq,
2120 struct xdp_buff *xdp,
2121 void *buf,
2122 unsigned int len,
2123 unsigned int frame_sz,
2124 int *num_buf,
2125 unsigned int *xdp_frags_truesize,
2126 struct virtnet_rq_stats *stats)
2127 {
2128 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2129 unsigned int headroom, tailroom, room;
2130 unsigned int truesize, cur_frag_size;
2131 struct skb_shared_info *shinfo;
2132 unsigned int xdp_frags_truesz = 0;
2133 struct page *page;
2134 skb_frag_t *frag;
2135 int offset;
2136 void *ctx;
2137
2138 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
2139 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
2140 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
2141
2142 if (!*num_buf)
2143 return 0;
2144
2145 if (*num_buf > 1) {
2146 /* If we want to build multi-buffer xdp, we need
2147 * to specify that the flags of xdp_buff have the
2148 * XDP_FLAGS_HAS_FRAG bit.
2149 */
2150 if (!xdp_buff_has_frags(xdp))
2151 xdp_buff_set_frags_flag(xdp);
2152
2153 shinfo = xdp_get_shared_info_from_buff(xdp);
2154 shinfo->nr_frags = 0;
2155 shinfo->xdp_frags_size = 0;
2156 }
2157
2158 if (*num_buf > MAX_SKB_FRAGS + 1)
2159 return -EINVAL;
2160
2161 while (--*num_buf > 0) {
2162 buf = virtnet_rq_get_buf(rq, &len, &ctx);
2163 if (unlikely(!buf)) {
2164 pr_debug("%s: rx error: %d buffers out of %d missing\n",
2165 dev->name, *num_buf,
2166 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
2167 DEV_STATS_INC(dev, rx_length_errors);
2168 goto err;
2169 }
2170
2171 u64_stats_add(&stats->bytes, len);
2172 page = virt_to_head_page(buf);
2173 offset = buf - page_address(page);
2174
2175 truesize = mergeable_ctx_to_truesize(ctx);
2176 headroom = mergeable_ctx_to_headroom(ctx);
2177 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2178 room = SKB_DATA_ALIGN(headroom + tailroom);
2179
2180 cur_frag_size = truesize;
2181 xdp_frags_truesz += cur_frag_size;
2182 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
2183 put_page(page);
2184 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2185 dev->name, len, (unsigned long)(truesize - room));
2186 DEV_STATS_INC(dev, rx_length_errors);
2187 goto err;
2188 }
2189
2190 frag = &shinfo->frags[shinfo->nr_frags++];
2191 skb_frag_fill_page_desc(frag, page, offset, len);
2192 if (page_is_pfmemalloc(page))
2193 xdp_buff_set_frag_pfmemalloc(xdp);
2194
2195 shinfo->xdp_frags_size += len;
2196 }
2197
2198 *xdp_frags_truesize = xdp_frags_truesz;
2199 return 0;
2200
2201 err:
2202 put_xdp_frags(xdp);
2203 return -EINVAL;
2204 }
2205
mergeable_xdp_get_buf(struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * ctx,unsigned int * frame_sz,int * num_buf,struct page ** page,int offset,unsigned int * len,struct virtio_net_hdr_mrg_rxbuf * hdr)2206 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
2207 struct receive_queue *rq,
2208 struct bpf_prog *xdp_prog,
2209 void *ctx,
2210 unsigned int *frame_sz,
2211 int *num_buf,
2212 struct page **page,
2213 int offset,
2214 unsigned int *len,
2215 struct virtio_net_hdr_mrg_rxbuf *hdr)
2216 {
2217 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2218 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2219 struct page *xdp_page;
2220 unsigned int xdp_room;
2221
2222 /* Transient failure which in theory could occur if
2223 * in-flight packets from before XDP was enabled reach
2224 * the receive path after XDP is loaded.
2225 */
2226 if (unlikely(hdr->hdr.gso_type))
2227 return NULL;
2228
2229 /* Partially checksummed packets must be dropped. */
2230 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
2231 return NULL;
2232
2233 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
2234 * with headroom may add hole in truesize, which
2235 * make their length exceed PAGE_SIZE. So we disabled the
2236 * hole mechanism for xdp. See add_recvbuf_mergeable().
2237 */
2238 *frame_sz = truesize;
2239
2240 if (likely(headroom >= virtnet_get_headroom(vi) &&
2241 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
2242 return page_address(*page) + offset;
2243 }
2244
2245 /* This happens when headroom is not enough because
2246 * of the buffer was prefilled before XDP is set.
2247 * This should only happen for the first several packets.
2248 * In fact, vq reset can be used here to help us clean up
2249 * the prefilled buffers, but many existing devices do not
2250 * support it, and we don't want to bother users who are
2251 * using xdp normally.
2252 */
2253 if (!xdp_prog->aux->xdp_has_frags) {
2254 /* linearize data for XDP */
2255 xdp_page = xdp_linearize_page(rq, num_buf,
2256 *page, offset,
2257 XDP_PACKET_HEADROOM,
2258 len);
2259 if (!xdp_page)
2260 return NULL;
2261 } else {
2262 xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
2263 sizeof(struct skb_shared_info));
2264 if (*len + xdp_room > PAGE_SIZE)
2265 return NULL;
2266
2267 xdp_page = alloc_page(GFP_ATOMIC);
2268 if (!xdp_page)
2269 return NULL;
2270
2271 memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
2272 page_address(*page) + offset, *len);
2273 }
2274
2275 *frame_sz = PAGE_SIZE;
2276
2277 put_page(*page);
2278
2279 *page = xdp_page;
2280
2281 return page_address(*page) + XDP_PACKET_HEADROOM;
2282 }
2283
receive_mergeable_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2284 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
2285 struct virtnet_info *vi,
2286 struct receive_queue *rq,
2287 struct bpf_prog *xdp_prog,
2288 void *buf,
2289 void *ctx,
2290 unsigned int len,
2291 unsigned int *xdp_xmit,
2292 struct virtnet_rq_stats *stats)
2293 {
2294 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2295 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2296 struct page *page = virt_to_head_page(buf);
2297 int offset = buf - page_address(page);
2298 unsigned int xdp_frags_truesz = 0;
2299 struct sk_buff *head_skb;
2300 unsigned int frame_sz;
2301 struct xdp_buff xdp;
2302 void *data;
2303 u32 act;
2304 int err;
2305
2306 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
2307 offset, &len, hdr);
2308 if (unlikely(!data))
2309 goto err_xdp;
2310
2311 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
2312 &num_buf, &xdp_frags_truesz, stats);
2313 if (unlikely(err))
2314 goto err_xdp;
2315
2316 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
2317
2318 switch (act) {
2319 case XDP_PASS:
2320 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
2321 if (unlikely(!head_skb))
2322 break;
2323 return head_skb;
2324
2325 case XDP_TX:
2326 case XDP_REDIRECT:
2327 return NULL;
2328
2329 default:
2330 break;
2331 }
2332
2333 put_xdp_frags(&xdp);
2334
2335 err_xdp:
2336 put_page(page);
2337 mergeable_buf_free(rq, num_buf, dev, stats);
2338
2339 u64_stats_inc(&stats->xdp_drops);
2340 u64_stats_inc(&stats->drops);
2341 return NULL;
2342 }
2343
virtnet_skb_append_frag(struct sk_buff * head_skb,struct sk_buff * curr_skb,struct page * page,void * buf,int len,int truesize)2344 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
2345 struct sk_buff *curr_skb,
2346 struct page *page, void *buf,
2347 int len, int truesize)
2348 {
2349 int num_skb_frags;
2350 int offset;
2351
2352 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2353 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
2354 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
2355
2356 if (unlikely(!nskb))
2357 return NULL;
2358
2359 if (curr_skb == head_skb)
2360 skb_shinfo(curr_skb)->frag_list = nskb;
2361 else
2362 curr_skb->next = nskb;
2363 curr_skb = nskb;
2364 head_skb->truesize += nskb->truesize;
2365 num_skb_frags = 0;
2366 }
2367
2368 if (curr_skb != head_skb) {
2369 head_skb->data_len += len;
2370 head_skb->len += len;
2371 head_skb->truesize += truesize;
2372 }
2373
2374 offset = buf - page_address(page);
2375 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
2376 put_page(page);
2377 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
2378 len, truesize);
2379 } else {
2380 skb_add_rx_frag(curr_skb, num_skb_frags, page,
2381 offset, len, truesize);
2382 }
2383
2384 return curr_skb;
2385 }
2386
receive_mergeable(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2387 static struct sk_buff *receive_mergeable(struct net_device *dev,
2388 struct virtnet_info *vi,
2389 struct receive_queue *rq,
2390 void *buf,
2391 void *ctx,
2392 unsigned int len,
2393 unsigned int *xdp_xmit,
2394 struct virtnet_rq_stats *stats)
2395 {
2396 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2397 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2398 struct page *page = virt_to_head_page(buf);
2399 int offset = buf - page_address(page);
2400 struct sk_buff *head_skb, *curr_skb;
2401 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2402 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2403 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2404 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2405
2406 head_skb = NULL;
2407 u64_stats_add(&stats->bytes, len - vi->hdr_len);
2408
2409 if (unlikely(len > truesize - room)) {
2410 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2411 dev->name, len, (unsigned long)(truesize - room));
2412 DEV_STATS_INC(dev, rx_length_errors);
2413 goto err_skb;
2414 }
2415
2416 if (unlikely(vi->xdp_enabled)) {
2417 struct bpf_prog *xdp_prog;
2418
2419 rcu_read_lock();
2420 xdp_prog = rcu_dereference(rq->xdp_prog);
2421 if (xdp_prog) {
2422 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
2423 len, xdp_xmit, stats);
2424 rcu_read_unlock();
2425 return head_skb;
2426 }
2427 rcu_read_unlock();
2428 }
2429
2430 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
2431 curr_skb = head_skb;
2432
2433 if (unlikely(!curr_skb))
2434 goto err_skb;
2435 while (--num_buf) {
2436 buf = virtnet_rq_get_buf(rq, &len, &ctx);
2437 if (unlikely(!buf)) {
2438 pr_debug("%s: rx error: %d buffers out of %d missing\n",
2439 dev->name, num_buf,
2440 virtio16_to_cpu(vi->vdev,
2441 hdr->num_buffers));
2442 DEV_STATS_INC(dev, rx_length_errors);
2443 goto err_buf;
2444 }
2445
2446 u64_stats_add(&stats->bytes, len);
2447 page = virt_to_head_page(buf);
2448
2449 truesize = mergeable_ctx_to_truesize(ctx);
2450 headroom = mergeable_ctx_to_headroom(ctx);
2451 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2452 room = SKB_DATA_ALIGN(headroom + tailroom);
2453 if (unlikely(len > truesize - room)) {
2454 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2455 dev->name, len, (unsigned long)(truesize - room));
2456 DEV_STATS_INC(dev, rx_length_errors);
2457 goto err_skb;
2458 }
2459
2460 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
2461 buf, len, truesize);
2462 if (!curr_skb)
2463 goto err_skb;
2464 }
2465
2466 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
2467 return head_skb;
2468
2469 err_skb:
2470 put_page(page);
2471 mergeable_buf_free(rq, num_buf, dev, stats);
2472
2473 err_buf:
2474 u64_stats_inc(&stats->drops);
2475 dev_kfree_skb(head_skb);
2476 return NULL;
2477 }
2478
virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash * hdr_hash,struct sk_buff * skb)2479 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
2480 struct sk_buff *skb)
2481 {
2482 enum pkt_hash_types rss_hash_type;
2483
2484 if (!hdr_hash || !skb)
2485 return;
2486
2487 switch (__le16_to_cpu(hdr_hash->hash_report)) {
2488 case VIRTIO_NET_HASH_REPORT_TCPv4:
2489 case VIRTIO_NET_HASH_REPORT_UDPv4:
2490 case VIRTIO_NET_HASH_REPORT_TCPv6:
2491 case VIRTIO_NET_HASH_REPORT_UDPv6:
2492 case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
2493 case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
2494 rss_hash_type = PKT_HASH_TYPE_L4;
2495 break;
2496 case VIRTIO_NET_HASH_REPORT_IPv4:
2497 case VIRTIO_NET_HASH_REPORT_IPv6:
2498 case VIRTIO_NET_HASH_REPORT_IPv6_EX:
2499 rss_hash_type = PKT_HASH_TYPE_L3;
2500 break;
2501 case VIRTIO_NET_HASH_REPORT_NONE:
2502 default:
2503 rss_hash_type = PKT_HASH_TYPE_NONE;
2504 }
2505 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
2506 }
2507
virtnet_receive_done(struct virtnet_info * vi,struct receive_queue * rq,struct sk_buff * skb,u8 flags)2508 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
2509 struct sk_buff *skb, u8 flags)
2510 {
2511 struct virtio_net_common_hdr *hdr;
2512 struct net_device *dev = vi->dev;
2513
2514 hdr = skb_vnet_common_hdr(skb);
2515 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
2516 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
2517
2518 if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
2519 skb->ip_summed = CHECKSUM_UNNECESSARY;
2520
2521 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
2522 virtio_is_little_endian(vi->vdev))) {
2523 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
2524 dev->name, hdr->hdr.gso_type,
2525 hdr->hdr.gso_size);
2526 goto frame_err;
2527 }
2528
2529 skb_record_rx_queue(skb, vq2rxq(rq->vq));
2530 skb->protocol = eth_type_trans(skb, dev);
2531 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
2532 ntohs(skb->protocol), skb->len, skb->pkt_type);
2533
2534 napi_gro_receive(&rq->napi, skb);
2535 return;
2536
2537 frame_err:
2538 DEV_STATS_INC(dev, rx_frame_errors);
2539 dev_kfree_skb(skb);
2540 }
2541
receive_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,void ** ctx,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2542 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
2543 void *buf, unsigned int len, void **ctx,
2544 unsigned int *xdp_xmit,
2545 struct virtnet_rq_stats *stats)
2546 {
2547 struct net_device *dev = vi->dev;
2548 struct sk_buff *skb;
2549 u8 flags;
2550
2551 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
2552 pr_debug("%s: short packet %i\n", dev->name, len);
2553 DEV_STATS_INC(dev, rx_length_errors);
2554 virtnet_rq_free_buf(vi, rq, buf);
2555 return;
2556 }
2557
2558 /* 1. Save the flags early, as the XDP program might overwrite them.
2559 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
2560 * stay valid after XDP processing.
2561 * 2. XDP doesn't work with partially checksummed packets (refer to
2562 * virtnet_xdp_set()), so packets marked as
2563 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
2564 */
2565 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
2566
2567 if (vi->mergeable_rx_bufs)
2568 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
2569 stats);
2570 else if (vi->big_packets)
2571 skb = receive_big(dev, vi, rq, buf, len, stats);
2572 else
2573 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
2574
2575 if (unlikely(!skb))
2576 return;
2577
2578 virtnet_receive_done(vi, rq, skb, flags);
2579 }
2580
2581 /* Unlike mergeable buffers, all buffers are allocated to the
2582 * same size, except for the headroom. For this reason we do
2583 * not need to use mergeable_len_to_ctx here - it is enough
2584 * to store the headroom as the context ignoring the truesize.
2585 */
add_recvbuf_small(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2586 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2587 gfp_t gfp)
2588 {
2589 char *buf;
2590 unsigned int xdp_headroom = virtnet_get_headroom(vi);
2591 void *ctx = (void *)(unsigned long)xdp_headroom;
2592 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
2593 int err;
2594
2595 len = SKB_DATA_ALIGN(len) +
2596 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2597
2598 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
2599 return -ENOMEM;
2600
2601 buf = virtnet_rq_alloc(rq, len, gfp);
2602 if (unlikely(!buf))
2603 return -ENOMEM;
2604
2605 buf += VIRTNET_RX_PAD + xdp_headroom;
2606
2607 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
2608
2609 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2610 if (err < 0) {
2611 virtnet_rq_unmap(rq, buf, 0);
2612 put_page(virt_to_head_page(buf));
2613 }
2614
2615 return err;
2616 }
2617
add_recvbuf_big(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2618 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2619 gfp_t gfp)
2620 {
2621 struct page *first, *list = NULL;
2622 char *p;
2623 int i, err, offset;
2624
2625 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2626
2627 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2628 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
2629 first = get_a_page(rq, gfp);
2630 if (!first) {
2631 if (list)
2632 give_pages(rq, list);
2633 return -ENOMEM;
2634 }
2635 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2636
2637 /* chain new page in list head to match sg */
2638 first->private = (unsigned long)list;
2639 list = first;
2640 }
2641
2642 first = get_a_page(rq, gfp);
2643 if (!first) {
2644 give_pages(rq, list);
2645 return -ENOMEM;
2646 }
2647 p = page_address(first);
2648
2649 /* rq->sg[0], rq->sg[1] share the same page */
2650 /* a separated rq->sg[0] for header - required in case !any_header_sg */
2651 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2652
2653 /* rq->sg[1] for data packet, from offset */
2654 offset = sizeof(struct padded_vnet_hdr);
2655 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2656
2657 /* chain first in list head */
2658 first->private = (unsigned long)list;
2659 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2660 first, gfp);
2661 if (err < 0)
2662 give_pages(rq, first);
2663
2664 return err;
2665 }
2666
get_mergeable_buf_len(struct receive_queue * rq,struct ewma_pkt_len * avg_pkt_len,unsigned int room)2667 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2668 struct ewma_pkt_len *avg_pkt_len,
2669 unsigned int room)
2670 {
2671 struct virtnet_info *vi = rq->vq->vdev->priv;
2672 const size_t hdr_len = vi->hdr_len;
2673 unsigned int len;
2674
2675 if (room)
2676 return PAGE_SIZE - room;
2677
2678 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
2679 rq->min_buf_len, PAGE_SIZE - hdr_len);
2680
2681 return ALIGN(len, L1_CACHE_BYTES);
2682 }
2683
add_recvbuf_mergeable(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2684 static int add_recvbuf_mergeable(struct virtnet_info *vi,
2685 struct receive_queue *rq, gfp_t gfp)
2686 {
2687 struct page_frag *alloc_frag = &rq->alloc_frag;
2688 unsigned int headroom = virtnet_get_headroom(vi);
2689 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2690 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2691 unsigned int len, hole;
2692 void *ctx;
2693 char *buf;
2694 int err;
2695
2696 /* Extra tailroom is needed to satisfy XDP's assumption. This
2697 * means rx frags coalescing won't work, but consider we've
2698 * disabled GSO for XDP, it won't be a big issue.
2699 */
2700 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2701
2702 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
2703 return -ENOMEM;
2704
2705 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
2706 len -= sizeof(struct virtnet_rq_dma);
2707
2708 buf = virtnet_rq_alloc(rq, len + room, gfp);
2709 if (unlikely(!buf))
2710 return -ENOMEM;
2711
2712 buf += headroom; /* advance address leaving hole at front of pkt */
2713 hole = alloc_frag->size - alloc_frag->offset;
2714 if (hole < len + room) {
2715 /* To avoid internal fragmentation, if there is very likely not
2716 * enough space for another buffer, add the remaining space to
2717 * the current buffer.
2718 * XDP core assumes that frame_size of xdp_buff and the length
2719 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
2720 */
2721 if (!headroom)
2722 len += hole;
2723 alloc_frag->offset += hole;
2724 }
2725
2726 virtnet_rq_init_one_sg(rq, buf, len);
2727
2728 ctx = mergeable_len_to_ctx(len + room, headroom);
2729 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2730 if (err < 0) {
2731 virtnet_rq_unmap(rq, buf, 0);
2732 put_page(virt_to_head_page(buf));
2733 }
2734
2735 return err;
2736 }
2737
2738 /*
2739 * Returns false if we couldn't fill entirely (OOM).
2740 *
2741 * Normally run in the receive path, but can also be run from ndo_open
2742 * before we're receiving packets, or from refill_work which is
2743 * careful to disable receiving (using napi_disable).
2744 */
try_fill_recv(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2745 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2746 gfp_t gfp)
2747 {
2748 int err;
2749
2750 if (rq->xsk_pool) {
2751 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
2752 goto kick;
2753 }
2754
2755 do {
2756 if (vi->mergeable_rx_bufs)
2757 err = add_recvbuf_mergeable(vi, rq, gfp);
2758 else if (vi->big_packets)
2759 err = add_recvbuf_big(vi, rq, gfp);
2760 else
2761 err = add_recvbuf_small(vi, rq, gfp);
2762
2763 if (err)
2764 break;
2765 } while (rq->vq->num_free);
2766
2767 kick:
2768 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2769 unsigned long flags;
2770
2771 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2772 u64_stats_inc(&rq->stats.kicks);
2773 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2774 }
2775
2776 return err != -ENOMEM;
2777 }
2778
skb_recv_done(struct virtqueue * rvq)2779 static void skb_recv_done(struct virtqueue *rvq)
2780 {
2781 struct virtnet_info *vi = rvq->vdev->priv;
2782 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2783
2784 rq->calls++;
2785 virtqueue_napi_schedule(&rq->napi, rvq);
2786 }
2787
virtnet_napi_do_enable(struct virtqueue * vq,struct napi_struct * napi)2788 static void virtnet_napi_do_enable(struct virtqueue *vq,
2789 struct napi_struct *napi)
2790 {
2791 napi_enable(napi);
2792
2793 /* If all buffers were filled by other side before we napi_enabled, we
2794 * won't get another interrupt, so process any outstanding packets now.
2795 * Call local_bh_enable after to trigger softIRQ processing.
2796 */
2797 local_bh_disable();
2798 virtqueue_napi_schedule(napi, vq);
2799 local_bh_enable();
2800 }
2801
virtnet_napi_enable(struct receive_queue * rq)2802 static void virtnet_napi_enable(struct receive_queue *rq)
2803 {
2804 struct virtnet_info *vi = rq->vq->vdev->priv;
2805 int qidx = vq2rxq(rq->vq);
2806
2807 virtnet_napi_do_enable(rq->vq, &rq->napi);
2808 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
2809 }
2810
virtnet_napi_tx_enable(struct send_queue * sq)2811 static void virtnet_napi_tx_enable(struct send_queue *sq)
2812 {
2813 struct virtnet_info *vi = sq->vq->vdev->priv;
2814 struct napi_struct *napi = &sq->napi;
2815 int qidx = vq2txq(sq->vq);
2816
2817 if (!napi->weight)
2818 return;
2819
2820 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2821 * enable the feature if this is likely affine with the transmit path.
2822 */
2823 if (!vi->affinity_hint_set) {
2824 napi->weight = 0;
2825 return;
2826 }
2827
2828 virtnet_napi_do_enable(sq->vq, napi);
2829 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
2830 }
2831
virtnet_napi_tx_disable(struct send_queue * sq)2832 static void virtnet_napi_tx_disable(struct send_queue *sq)
2833 {
2834 struct virtnet_info *vi = sq->vq->vdev->priv;
2835 struct napi_struct *napi = &sq->napi;
2836 int qidx = vq2txq(sq->vq);
2837
2838 if (napi->weight) {
2839 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
2840 napi_disable(napi);
2841 }
2842 }
2843
virtnet_napi_disable(struct receive_queue * rq)2844 static void virtnet_napi_disable(struct receive_queue *rq)
2845 {
2846 struct virtnet_info *vi = rq->vq->vdev->priv;
2847 struct napi_struct *napi = &rq->napi;
2848 int qidx = vq2rxq(rq->vq);
2849
2850 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
2851 napi_disable(napi);
2852 }
2853
refill_work(struct work_struct * work)2854 static void refill_work(struct work_struct *work)
2855 {
2856 struct virtnet_info *vi =
2857 container_of(work, struct virtnet_info, refill.work);
2858 bool still_empty;
2859 int i;
2860
2861 for (i = 0; i < vi->curr_queue_pairs; i++) {
2862 struct receive_queue *rq = &vi->rq[i];
2863
2864 /*
2865 * When queue API support is added in the future and the call
2866 * below becomes napi_disable_locked, this driver will need to
2867 * be refactored.
2868 *
2869 * One possible solution would be to:
2870 * - cancel refill_work with cancel_delayed_work (note:
2871 * non-sync)
2872 * - cancel refill_work with cancel_delayed_work_sync in
2873 * virtnet_remove after the netdev is unregistered
2874 * - wrap all of the work in a lock (perhaps the netdev
2875 * instance lock)
2876 * - check netif_running() and return early to avoid a race
2877 */
2878 napi_disable(&rq->napi);
2879 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2880 virtnet_napi_do_enable(rq->vq, &rq->napi);
2881
2882 /* In theory, this can happen: if we don't get any buffers in
2883 * we will *never* try to fill again.
2884 */
2885 if (still_empty)
2886 schedule_delayed_work(&vi->refill, HZ/2);
2887 }
2888 }
2889
virtnet_receive_xsk_bufs(struct virtnet_info * vi,struct receive_queue * rq,int budget,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2890 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
2891 struct receive_queue *rq,
2892 int budget,
2893 unsigned int *xdp_xmit,
2894 struct virtnet_rq_stats *stats)
2895 {
2896 unsigned int len;
2897 int packets = 0;
2898 void *buf;
2899
2900 while (packets < budget) {
2901 buf = virtqueue_get_buf(rq->vq, &len);
2902 if (!buf)
2903 break;
2904
2905 virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
2906 packets++;
2907 }
2908
2909 return packets;
2910 }
2911
virtnet_receive_packets(struct virtnet_info * vi,struct receive_queue * rq,int budget,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2912 static int virtnet_receive_packets(struct virtnet_info *vi,
2913 struct receive_queue *rq,
2914 int budget,
2915 unsigned int *xdp_xmit,
2916 struct virtnet_rq_stats *stats)
2917 {
2918 unsigned int len;
2919 int packets = 0;
2920 void *buf;
2921
2922 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2923 void *ctx;
2924 while (packets < budget &&
2925 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2926 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
2927 packets++;
2928 }
2929 } else {
2930 while (packets < budget &&
2931 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2932 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
2933 packets++;
2934 }
2935 }
2936
2937 return packets;
2938 }
2939
virtnet_receive(struct receive_queue * rq,int budget,unsigned int * xdp_xmit)2940 static int virtnet_receive(struct receive_queue *rq, int budget,
2941 unsigned int *xdp_xmit)
2942 {
2943 struct virtnet_info *vi = rq->vq->vdev->priv;
2944 struct virtnet_rq_stats stats = {};
2945 int i, packets;
2946
2947 if (rq->xsk_pool)
2948 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
2949 else
2950 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
2951
2952 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2953 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2954 spin_lock(&vi->refill_lock);
2955 if (vi->refill_enabled)
2956 schedule_delayed_work(&vi->refill, 0);
2957 spin_unlock(&vi->refill_lock);
2958 }
2959 }
2960
2961 u64_stats_set(&stats.packets, packets);
2962 u64_stats_update_begin(&rq->stats.syncp);
2963 for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
2964 size_t offset = virtnet_rq_stats_desc[i].offset;
2965 u64_stats_t *item, *src;
2966
2967 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2968 src = (u64_stats_t *)((u8 *)&stats + offset);
2969 u64_stats_add(item, u64_stats_read(src));
2970 }
2971
2972 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
2973 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
2974
2975 u64_stats_update_end(&rq->stats.syncp);
2976
2977 return packets;
2978 }
2979
virtnet_poll_cleantx(struct receive_queue * rq,int budget)2980 static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
2981 {
2982 struct virtnet_info *vi = rq->vq->vdev->priv;
2983 unsigned int index = vq2rxq(rq->vq);
2984 struct send_queue *sq = &vi->sq[index];
2985 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2986
2987 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2988 return;
2989
2990 if (__netif_tx_trylock(txq)) {
2991 if (sq->reset) {
2992 __netif_tx_unlock(txq);
2993 return;
2994 }
2995
2996 do {
2997 virtqueue_disable_cb(sq->vq);
2998 free_old_xmit(sq, txq, !!budget);
2999 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
3000
3001 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
3002 if (netif_tx_queue_stopped(txq)) {
3003 u64_stats_update_begin(&sq->stats.syncp);
3004 u64_stats_inc(&sq->stats.wake);
3005 u64_stats_update_end(&sq->stats.syncp);
3006 }
3007 netif_tx_wake_queue(txq);
3008 }
3009
3010 __netif_tx_unlock(txq);
3011 }
3012 }
3013
virtnet_rx_dim_update(struct virtnet_info * vi,struct receive_queue * rq)3014 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
3015 {
3016 struct dim_sample cur_sample = {};
3017
3018 if (!rq->packets_in_napi)
3019 return;
3020
3021 /* Don't need protection when fetching stats, since fetcher and
3022 * updater of the stats are in same context
3023 */
3024 dim_update_sample(rq->calls,
3025 u64_stats_read(&rq->stats.packets),
3026 u64_stats_read(&rq->stats.bytes),
3027 &cur_sample);
3028
3029 net_dim(&rq->dim, &cur_sample);
3030 rq->packets_in_napi = 0;
3031 }
3032
virtnet_poll(struct napi_struct * napi,int budget)3033 static int virtnet_poll(struct napi_struct *napi, int budget)
3034 {
3035 struct receive_queue *rq =
3036 container_of(napi, struct receive_queue, napi);
3037 struct virtnet_info *vi = rq->vq->vdev->priv;
3038 struct send_queue *sq;
3039 unsigned int received;
3040 unsigned int xdp_xmit = 0;
3041 bool napi_complete;
3042
3043 virtnet_poll_cleantx(rq, budget);
3044
3045 received = virtnet_receive(rq, budget, &xdp_xmit);
3046 rq->packets_in_napi += received;
3047
3048 if (xdp_xmit & VIRTIO_XDP_REDIR)
3049 xdp_do_flush();
3050
3051 /* Out of packets? */
3052 if (received < budget) {
3053 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
3054 /* Intentionally not taking dim_lock here. This may result in a
3055 * spurious net_dim call. But if that happens virtnet_rx_dim_work
3056 * will not act on the scheduled work.
3057 */
3058 if (napi_complete && rq->dim_enabled)
3059 virtnet_rx_dim_update(vi, rq);
3060 }
3061
3062 if (xdp_xmit & VIRTIO_XDP_TX) {
3063 sq = virtnet_xdp_get_sq(vi);
3064 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3065 u64_stats_update_begin(&sq->stats.syncp);
3066 u64_stats_inc(&sq->stats.kicks);
3067 u64_stats_update_end(&sq->stats.syncp);
3068 }
3069 virtnet_xdp_put_sq(vi, sq);
3070 }
3071
3072 return received;
3073 }
3074
virtnet_disable_queue_pair(struct virtnet_info * vi,int qp_index)3075 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
3076 {
3077 virtnet_napi_tx_disable(&vi->sq[qp_index]);
3078 virtnet_napi_disable(&vi->rq[qp_index]);
3079 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3080 }
3081
virtnet_enable_queue_pair(struct virtnet_info * vi,int qp_index)3082 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
3083 {
3084 struct net_device *dev = vi->dev;
3085 int err;
3086
3087 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
3088 vi->rq[qp_index].napi.napi_id);
3089 if (err < 0)
3090 return err;
3091
3092 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
3093 MEM_TYPE_PAGE_SHARED, NULL);
3094 if (err < 0)
3095 goto err_xdp_reg_mem_model;
3096
3097 virtnet_napi_enable(&vi->rq[qp_index]);
3098 virtnet_napi_tx_enable(&vi->sq[qp_index]);
3099
3100 return 0;
3101
3102 err_xdp_reg_mem_model:
3103 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3104 return err;
3105 }
3106
virtnet_cancel_dim(struct virtnet_info * vi,struct dim * dim)3107 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
3108 {
3109 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3110 return;
3111 net_dim_work_cancel(dim);
3112 }
3113
virtnet_update_settings(struct virtnet_info * vi)3114 static void virtnet_update_settings(struct virtnet_info *vi)
3115 {
3116 u32 speed;
3117 u8 duplex;
3118
3119 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3120 return;
3121
3122 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3123
3124 if (ethtool_validate_speed(speed))
3125 vi->speed = speed;
3126
3127 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3128
3129 if (ethtool_validate_duplex(duplex))
3130 vi->duplex = duplex;
3131 }
3132
virtnet_open(struct net_device * dev)3133 static int virtnet_open(struct net_device *dev)
3134 {
3135 struct virtnet_info *vi = netdev_priv(dev);
3136 int i, err;
3137
3138 enable_delayed_refill(vi);
3139
3140 for (i = 0; i < vi->max_queue_pairs; i++) {
3141 if (i < vi->curr_queue_pairs)
3142 /* Make sure we have some buffers: if oom use wq. */
3143 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
3144 schedule_delayed_work(&vi->refill, 0);
3145
3146 err = virtnet_enable_queue_pair(vi, i);
3147 if (err < 0)
3148 goto err_enable_qp;
3149 }
3150
3151 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3152 if (vi->status & VIRTIO_NET_S_LINK_UP)
3153 netif_carrier_on(vi->dev);
3154 virtio_config_driver_enable(vi->vdev);
3155 } else {
3156 vi->status = VIRTIO_NET_S_LINK_UP;
3157 netif_carrier_on(dev);
3158 }
3159
3160 return 0;
3161
3162 err_enable_qp:
3163 disable_delayed_refill(vi);
3164 cancel_delayed_work_sync(&vi->refill);
3165
3166 for (i--; i >= 0; i--) {
3167 virtnet_disable_queue_pair(vi, i);
3168 virtnet_cancel_dim(vi, &vi->rq[i].dim);
3169 }
3170
3171 return err;
3172 }
3173
virtnet_poll_tx(struct napi_struct * napi,int budget)3174 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
3175 {
3176 struct send_queue *sq = container_of(napi, struct send_queue, napi);
3177 struct virtnet_info *vi = sq->vq->vdev->priv;
3178 unsigned int index = vq2txq(sq->vq);
3179 struct netdev_queue *txq;
3180 int opaque, xsk_done = 0;
3181 bool done;
3182
3183 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
3184 /* We don't need to enable cb for XDP */
3185 napi_complete_done(napi, 0);
3186 return 0;
3187 }
3188
3189 txq = netdev_get_tx_queue(vi->dev, index);
3190 __netif_tx_lock(txq, raw_smp_processor_id());
3191 virtqueue_disable_cb(sq->vq);
3192
3193 if (sq->xsk_pool)
3194 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
3195 else
3196 free_old_xmit(sq, txq, !!budget);
3197
3198 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
3199 if (netif_tx_queue_stopped(txq)) {
3200 u64_stats_update_begin(&sq->stats.syncp);
3201 u64_stats_inc(&sq->stats.wake);
3202 u64_stats_update_end(&sq->stats.syncp);
3203 }
3204 netif_tx_wake_queue(txq);
3205 }
3206
3207 if (xsk_done >= budget) {
3208 __netif_tx_unlock(txq);
3209 return budget;
3210 }
3211
3212 opaque = virtqueue_enable_cb_prepare(sq->vq);
3213
3214 done = napi_complete_done(napi, 0);
3215
3216 if (!done)
3217 virtqueue_disable_cb(sq->vq);
3218
3219 __netif_tx_unlock(txq);
3220
3221 if (done) {
3222 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
3223 if (napi_schedule_prep(napi)) {
3224 __netif_tx_lock(txq, raw_smp_processor_id());
3225 virtqueue_disable_cb(sq->vq);
3226 __netif_tx_unlock(txq);
3227 __napi_schedule(napi);
3228 }
3229 }
3230 }
3231
3232 return 0;
3233 }
3234
xmit_skb(struct send_queue * sq,struct sk_buff * skb,bool orphan)3235 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
3236 {
3237 struct virtio_net_hdr_mrg_rxbuf *hdr;
3238 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
3239 struct virtnet_info *vi = sq->vq->vdev->priv;
3240 int num_sg;
3241 unsigned hdr_len = vi->hdr_len;
3242 bool can_push;
3243
3244 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
3245
3246 can_push = vi->any_header_sg &&
3247 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
3248 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
3249 /* Even if we can, don't push here yet as this would skew
3250 * csum_start offset below. */
3251 if (can_push)
3252 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
3253 else
3254 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
3255
3256 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
3257 virtio_is_little_endian(vi->vdev), false,
3258 0))
3259 return -EPROTO;
3260
3261 if (vi->mergeable_rx_bufs)
3262 hdr->num_buffers = 0;
3263
3264 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
3265 if (can_push) {
3266 __skb_push(skb, hdr_len);
3267 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
3268 if (unlikely(num_sg < 0))
3269 return num_sg;
3270 /* Pull header back to avoid skew in tx bytes calculations. */
3271 __skb_pull(skb, hdr_len);
3272 } else {
3273 sg_set_buf(sq->sg, hdr, hdr_len);
3274 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
3275 if (unlikely(num_sg < 0))
3276 return num_sg;
3277 num_sg++;
3278 }
3279
3280 return virtnet_add_outbuf(sq, num_sg, skb,
3281 orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
3282 }
3283
start_xmit(struct sk_buff * skb,struct net_device * dev)3284 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
3285 {
3286 struct virtnet_info *vi = netdev_priv(dev);
3287 int qnum = skb_get_queue_mapping(skb);
3288 struct send_queue *sq = &vi->sq[qnum];
3289 int err;
3290 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
3291 bool xmit_more = netdev_xmit_more();
3292 bool use_napi = sq->napi.weight;
3293 bool kick;
3294
3295 if (!use_napi)
3296 free_old_xmit(sq, txq, false);
3297 else
3298 virtqueue_disable_cb(sq->vq);
3299
3300 /* timestamp packet in software */
3301 skb_tx_timestamp(skb);
3302
3303 /* Try to transmit */
3304 err = xmit_skb(sq, skb, !use_napi);
3305
3306 /* This should not happen! */
3307 if (unlikely(err)) {
3308 DEV_STATS_INC(dev, tx_fifo_errors);
3309 if (net_ratelimit())
3310 dev_warn(&dev->dev,
3311 "Unexpected TXQ (%d) queue failure: %d\n",
3312 qnum, err);
3313 DEV_STATS_INC(dev, tx_dropped);
3314 dev_kfree_skb_any(skb);
3315 return NETDEV_TX_OK;
3316 }
3317
3318 /* Don't wait up for transmitted skbs to be freed. */
3319 if (!use_napi) {
3320 skb_orphan(skb);
3321 nf_reset_ct(skb);
3322 }
3323
3324 if (use_napi)
3325 tx_may_stop(vi, dev, sq);
3326 else
3327 check_sq_full_and_disable(vi, dev,sq);
3328
3329 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
3330 !xmit_more || netif_xmit_stopped(txq);
3331 if (kick) {
3332 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3333 u64_stats_update_begin(&sq->stats.syncp);
3334 u64_stats_inc(&sq->stats.kicks);
3335 u64_stats_update_end(&sq->stats.syncp);
3336 }
3337 }
3338
3339 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
3340 virtqueue_napi_schedule(&sq->napi, sq->vq);
3341
3342 return NETDEV_TX_OK;
3343 }
3344
__virtnet_rx_pause(struct virtnet_info * vi,struct receive_queue * rq)3345 static void __virtnet_rx_pause(struct virtnet_info *vi,
3346 struct receive_queue *rq)
3347 {
3348 bool running = netif_running(vi->dev);
3349
3350 if (running) {
3351 virtnet_napi_disable(rq);
3352 virtnet_cancel_dim(vi, &rq->dim);
3353 }
3354 }
3355
virtnet_rx_pause_all(struct virtnet_info * vi)3356 static void virtnet_rx_pause_all(struct virtnet_info *vi)
3357 {
3358 int i;
3359
3360 /*
3361 * Make sure refill_work does not run concurrently to
3362 * avoid napi_disable race which leads to deadlock.
3363 */
3364 disable_delayed_refill(vi);
3365 cancel_delayed_work_sync(&vi->refill);
3366 for (i = 0; i < vi->max_queue_pairs; i++)
3367 __virtnet_rx_pause(vi, &vi->rq[i]);
3368 }
3369
virtnet_rx_pause(struct virtnet_info * vi,struct receive_queue * rq)3370 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
3371 {
3372 /*
3373 * Make sure refill_work does not run concurrently to
3374 * avoid napi_disable race which leads to deadlock.
3375 */
3376 disable_delayed_refill(vi);
3377 cancel_delayed_work_sync(&vi->refill);
3378 __virtnet_rx_pause(vi, rq);
3379 }
3380
__virtnet_rx_resume(struct virtnet_info * vi,struct receive_queue * rq,bool refill)3381 static void __virtnet_rx_resume(struct virtnet_info *vi,
3382 struct receive_queue *rq,
3383 bool refill)
3384 {
3385 bool running = netif_running(vi->dev);
3386 bool schedule_refill = false;
3387
3388 if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
3389 schedule_refill = true;
3390 if (running)
3391 virtnet_napi_enable(rq);
3392
3393 if (schedule_refill)
3394 schedule_delayed_work(&vi->refill, 0);
3395 }
3396
virtnet_rx_resume_all(struct virtnet_info * vi)3397 static void virtnet_rx_resume_all(struct virtnet_info *vi)
3398 {
3399 int i;
3400
3401 enable_delayed_refill(vi);
3402 for (i = 0; i < vi->max_queue_pairs; i++) {
3403 if (i < vi->curr_queue_pairs)
3404 __virtnet_rx_resume(vi, &vi->rq[i], true);
3405 else
3406 __virtnet_rx_resume(vi, &vi->rq[i], false);
3407 }
3408 }
3409
virtnet_rx_resume(struct virtnet_info * vi,struct receive_queue * rq)3410 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
3411 {
3412 enable_delayed_refill(vi);
3413 __virtnet_rx_resume(vi, rq, true);
3414 }
3415
virtnet_rx_resize(struct virtnet_info * vi,struct receive_queue * rq,u32 ring_num)3416 static int virtnet_rx_resize(struct virtnet_info *vi,
3417 struct receive_queue *rq, u32 ring_num)
3418 {
3419 int err, qindex;
3420
3421 qindex = rq - vi->rq;
3422
3423 virtnet_rx_pause(vi, rq);
3424
3425 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
3426 if (err)
3427 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
3428
3429 virtnet_rx_resume(vi, rq);
3430 return err;
3431 }
3432
virtnet_tx_pause(struct virtnet_info * vi,struct send_queue * sq)3433 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
3434 {
3435 bool running = netif_running(vi->dev);
3436 struct netdev_queue *txq;
3437 int qindex;
3438
3439 qindex = sq - vi->sq;
3440
3441 if (running)
3442 virtnet_napi_tx_disable(sq);
3443
3444 txq = netdev_get_tx_queue(vi->dev, qindex);
3445
3446 /* 1. wait all ximt complete
3447 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
3448 */
3449 __netif_tx_lock_bh(txq);
3450
3451 /* Prevent rx poll from accessing sq. */
3452 sq->reset = true;
3453
3454 /* Prevent the upper layer from trying to send packets. */
3455 netif_stop_subqueue(vi->dev, qindex);
3456
3457 __netif_tx_unlock_bh(txq);
3458 }
3459
virtnet_tx_resume(struct virtnet_info * vi,struct send_queue * sq)3460 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
3461 {
3462 bool running = netif_running(vi->dev);
3463 struct netdev_queue *txq;
3464 int qindex;
3465
3466 qindex = sq - vi->sq;
3467
3468 txq = netdev_get_tx_queue(vi->dev, qindex);
3469
3470 __netif_tx_lock_bh(txq);
3471 sq->reset = false;
3472 netif_tx_wake_queue(txq);
3473 __netif_tx_unlock_bh(txq);
3474
3475 if (running)
3476 virtnet_napi_tx_enable(sq);
3477 }
3478
virtnet_tx_resize(struct virtnet_info * vi,struct send_queue * sq,u32 ring_num)3479 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
3480 u32 ring_num)
3481 {
3482 int qindex, err;
3483
3484 qindex = sq - vi->sq;
3485
3486 virtnet_tx_pause(vi, sq);
3487
3488 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
3489 virtnet_sq_free_unused_buf_done);
3490 if (err)
3491 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
3492
3493 virtnet_tx_resume(vi, sq);
3494
3495 return err;
3496 }
3497
3498 /*
3499 * Send command via the control virtqueue and check status. Commands
3500 * supported by the hypervisor, as indicated by feature bits, should
3501 * never fail unless improperly formatted.
3502 */
virtnet_send_command_reply(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out,struct scatterlist * in)3503 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
3504 struct scatterlist *out,
3505 struct scatterlist *in)
3506 {
3507 struct scatterlist *sgs[5], hdr, stat;
3508 u32 out_num = 0, tmp, in_num = 0;
3509 bool ok;
3510 int ret;
3511
3512 /* Caller should know better */
3513 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
3514
3515 mutex_lock(&vi->cvq_lock);
3516 vi->ctrl->status = ~0;
3517 vi->ctrl->hdr.class = class;
3518 vi->ctrl->hdr.cmd = cmd;
3519 /* Add header */
3520 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
3521 sgs[out_num++] = &hdr;
3522
3523 if (out)
3524 sgs[out_num++] = out;
3525
3526 /* Add return status. */
3527 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
3528 sgs[out_num + in_num++] = &stat;
3529
3530 if (in)
3531 sgs[out_num + in_num++] = in;
3532
3533 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
3534 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
3535 if (ret < 0) {
3536 dev_warn(&vi->vdev->dev,
3537 "Failed to add sgs for command vq: %d\n.", ret);
3538 mutex_unlock(&vi->cvq_lock);
3539 return false;
3540 }
3541
3542 if (unlikely(!virtqueue_kick(vi->cvq)))
3543 goto unlock;
3544
3545 /* Spin for a response, the kick causes an ioport write, trapping
3546 * into the hypervisor, so the request should be handled immediately.
3547 */
3548 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
3549 !virtqueue_is_broken(vi->cvq)) {
3550 cond_resched();
3551 cpu_relax();
3552 }
3553
3554 unlock:
3555 ok = vi->ctrl->status == VIRTIO_NET_OK;
3556 mutex_unlock(&vi->cvq_lock);
3557 return ok;
3558 }
3559
virtnet_send_command(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out)3560 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3561 struct scatterlist *out)
3562 {
3563 return virtnet_send_command_reply(vi, class, cmd, out, NULL);
3564 }
3565
virtnet_set_mac_address(struct net_device * dev,void * p)3566 static int virtnet_set_mac_address(struct net_device *dev, void *p)
3567 {
3568 struct virtnet_info *vi = netdev_priv(dev);
3569 struct virtio_device *vdev = vi->vdev;
3570 int ret;
3571 struct sockaddr *addr;
3572 struct scatterlist sg;
3573
3574 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3575 return -EOPNOTSUPP;
3576
3577 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
3578 if (!addr)
3579 return -ENOMEM;
3580
3581 ret = eth_prepare_mac_addr_change(dev, addr);
3582 if (ret)
3583 goto out;
3584
3585 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
3586 sg_init_one(&sg, addr->sa_data, dev->addr_len);
3587 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3588 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
3589 dev_warn(&vdev->dev,
3590 "Failed to set mac address by vq command.\n");
3591 ret = -EINVAL;
3592 goto out;
3593 }
3594 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
3595 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3596 unsigned int i;
3597
3598 /* Naturally, this has an atomicity problem. */
3599 for (i = 0; i < dev->addr_len; i++)
3600 virtio_cwrite8(vdev,
3601 offsetof(struct virtio_net_config, mac) +
3602 i, addr->sa_data[i]);
3603 }
3604
3605 eth_commit_mac_addr_change(dev, p);
3606 ret = 0;
3607
3608 out:
3609 kfree(addr);
3610 return ret;
3611 }
3612
virtnet_stats(struct net_device * dev,struct rtnl_link_stats64 * tot)3613 static void virtnet_stats(struct net_device *dev,
3614 struct rtnl_link_stats64 *tot)
3615 {
3616 struct virtnet_info *vi = netdev_priv(dev);
3617 unsigned int start;
3618 int i;
3619
3620 for (i = 0; i < vi->max_queue_pairs; i++) {
3621 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
3622 struct receive_queue *rq = &vi->rq[i];
3623 struct send_queue *sq = &vi->sq[i];
3624
3625 do {
3626 start = u64_stats_fetch_begin(&sq->stats.syncp);
3627 tpackets = u64_stats_read(&sq->stats.packets);
3628 tbytes = u64_stats_read(&sq->stats.bytes);
3629 terrors = u64_stats_read(&sq->stats.tx_timeouts);
3630 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3631
3632 do {
3633 start = u64_stats_fetch_begin(&rq->stats.syncp);
3634 rpackets = u64_stats_read(&rq->stats.packets);
3635 rbytes = u64_stats_read(&rq->stats.bytes);
3636 rdrops = u64_stats_read(&rq->stats.drops);
3637 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3638
3639 tot->rx_packets += rpackets;
3640 tot->tx_packets += tpackets;
3641 tot->rx_bytes += rbytes;
3642 tot->tx_bytes += tbytes;
3643 tot->rx_dropped += rdrops;
3644 tot->tx_errors += terrors;
3645 }
3646
3647 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3648 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
3649 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
3650 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
3651 }
3652
virtnet_ack_link_announce(struct virtnet_info * vi)3653 static void virtnet_ack_link_announce(struct virtnet_info *vi)
3654 {
3655 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
3656 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
3657 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
3658 }
3659
3660 static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3661
virtnet_rss_update_by_qpairs(struct virtnet_info * vi,u16 queue_pairs)3662 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
3663 {
3664 u32 indir_val = 0;
3665 int i = 0;
3666
3667 for (; i < vi->rss_indir_table_size; ++i) {
3668 indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
3669 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
3670 }
3671 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
3672 }
3673
virtnet_set_queues(struct virtnet_info * vi,u16 queue_pairs)3674 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
3675 {
3676 struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
3677 struct virtio_net_rss_config_hdr *old_rss_hdr;
3678 struct virtio_net_rss_config_trailer old_rss_trailer;
3679 struct net_device *dev = vi->dev;
3680 struct scatterlist sg;
3681
3682 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
3683 return 0;
3684
3685 /* Firstly check if we need update rss. Do updating if both (1) rss enabled and
3686 * (2) no user configuration.
3687 *
3688 * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
3689 * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs
3690 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
3691 */
3692 if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
3693 old_rss_hdr = vi->rss_hdr;
3694 old_rss_trailer = vi->rss_trailer;
3695 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
3696 if (!vi->rss_hdr) {
3697 vi->rss_hdr = old_rss_hdr;
3698 return -ENOMEM;
3699 }
3700
3701 *vi->rss_hdr = *old_rss_hdr;
3702 virtnet_rss_update_by_qpairs(vi, queue_pairs);
3703
3704 if (!virtnet_commit_rss_command(vi)) {
3705 /* restore ctrl_rss if commit_rss_command failed */
3706 devm_kfree(&dev->dev, vi->rss_hdr);
3707 vi->rss_hdr = old_rss_hdr;
3708 vi->rss_trailer = old_rss_trailer;
3709
3710 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
3711 queue_pairs);
3712 return -EINVAL;
3713 }
3714 devm_kfree(&dev->dev, old_rss_hdr);
3715 goto succ;
3716 }
3717
3718 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
3719 if (!mq)
3720 return -ENOMEM;
3721
3722 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3723 sg_init_one(&sg, mq, sizeof(*mq));
3724
3725 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3726 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
3727 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
3728 queue_pairs);
3729 return -EINVAL;
3730 }
3731 succ:
3732 vi->curr_queue_pairs = queue_pairs;
3733 /* virtnet_open() will refill when device is going to up. */
3734 spin_lock_bh(&vi->refill_lock);
3735 if (dev->flags & IFF_UP && vi->refill_enabled)
3736 schedule_delayed_work(&vi->refill, 0);
3737 spin_unlock_bh(&vi->refill_lock);
3738
3739 return 0;
3740 }
3741
virtnet_close(struct net_device * dev)3742 static int virtnet_close(struct net_device *dev)
3743 {
3744 struct virtnet_info *vi = netdev_priv(dev);
3745 int i;
3746
3747 /* Make sure NAPI doesn't schedule refill work */
3748 disable_delayed_refill(vi);
3749 /* Make sure refill_work doesn't re-enable napi! */
3750 cancel_delayed_work_sync(&vi->refill);
3751 /* Prevent the config change callback from changing carrier
3752 * after close
3753 */
3754 virtio_config_driver_disable(vi->vdev);
3755 /* Stop getting status/speed updates: we don't care until next
3756 * open
3757 */
3758 cancel_work_sync(&vi->config_work);
3759
3760 for (i = 0; i < vi->max_queue_pairs; i++) {
3761 virtnet_disable_queue_pair(vi, i);
3762 virtnet_cancel_dim(vi, &vi->rq[i].dim);
3763 }
3764
3765 netif_carrier_off(dev);
3766
3767 return 0;
3768 }
3769
virtnet_rx_mode_work(struct work_struct * work)3770 static void virtnet_rx_mode_work(struct work_struct *work)
3771 {
3772 struct virtnet_info *vi =
3773 container_of(work, struct virtnet_info, rx_mode_work);
3774 u8 *promisc_allmulti __free(kfree) = NULL;
3775 struct net_device *dev = vi->dev;
3776 struct scatterlist sg[2];
3777 struct virtio_net_ctrl_mac *mac_data;
3778 struct netdev_hw_addr *ha;
3779 int uc_count;
3780 int mc_count;
3781 void *buf;
3782 int i;
3783
3784 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
3785 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
3786 return;
3787
3788 promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL);
3789 if (!promisc_allmulti) {
3790 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
3791 return;
3792 }
3793
3794 rtnl_lock();
3795
3796 *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
3797 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3798
3799 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3800 VIRTIO_NET_CTRL_RX_PROMISC, sg))
3801 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
3802 *promisc_allmulti ? "en" : "dis");
3803
3804 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
3805 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3806
3807 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3808 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
3809 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
3810 *promisc_allmulti ? "en" : "dis");
3811
3812 netif_addr_lock_bh(dev);
3813
3814 uc_count = netdev_uc_count(dev);
3815 mc_count = netdev_mc_count(dev);
3816 /* MAC filter - use one buffer for both lists */
3817 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
3818 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
3819 mac_data = buf;
3820 if (!buf) {
3821 netif_addr_unlock_bh(dev);
3822 rtnl_unlock();
3823 return;
3824 }
3825
3826 sg_init_table(sg, 2);
3827
3828 /* Store the unicast list and count in the front of the buffer */
3829 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
3830 i = 0;
3831 netdev_for_each_uc_addr(ha, dev)
3832 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3833
3834 sg_set_buf(&sg[0], mac_data,
3835 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
3836
3837 /* multicast list and count fill the end */
3838 mac_data = (void *)&mac_data->macs[uc_count][0];
3839
3840 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
3841 i = 0;
3842 netdev_for_each_mc_addr(ha, dev)
3843 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3844
3845 netif_addr_unlock_bh(dev);
3846
3847 sg_set_buf(&sg[1], mac_data,
3848 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
3849
3850 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3851 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
3852 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
3853
3854 rtnl_unlock();
3855
3856 kfree(buf);
3857 }
3858
virtnet_set_rx_mode(struct net_device * dev)3859 static void virtnet_set_rx_mode(struct net_device *dev)
3860 {
3861 struct virtnet_info *vi = netdev_priv(dev);
3862
3863 if (vi->rx_mode_work_enabled)
3864 schedule_work(&vi->rx_mode_work);
3865 }
3866
virtnet_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)3867 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
3868 __be16 proto, u16 vid)
3869 {
3870 struct virtnet_info *vi = netdev_priv(dev);
3871 __virtio16 *_vid __free(kfree) = NULL;
3872 struct scatterlist sg;
3873
3874 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3875 if (!_vid)
3876 return -ENOMEM;
3877
3878 *_vid = cpu_to_virtio16(vi->vdev, vid);
3879 sg_init_one(&sg, _vid, sizeof(*_vid));
3880
3881 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3882 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3883 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
3884 return 0;
3885 }
3886
virtnet_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)3887 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3888 __be16 proto, u16 vid)
3889 {
3890 struct virtnet_info *vi = netdev_priv(dev);
3891 __virtio16 *_vid __free(kfree) = NULL;
3892 struct scatterlist sg;
3893
3894 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3895 if (!_vid)
3896 return -ENOMEM;
3897
3898 *_vid = cpu_to_virtio16(vi->vdev, vid);
3899 sg_init_one(&sg, _vid, sizeof(*_vid));
3900
3901 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3902 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3903 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
3904 return 0;
3905 }
3906
virtnet_clean_affinity(struct virtnet_info * vi)3907 static void virtnet_clean_affinity(struct virtnet_info *vi)
3908 {
3909 int i;
3910
3911 if (vi->affinity_hint_set) {
3912 for (i = 0; i < vi->max_queue_pairs; i++) {
3913 virtqueue_set_affinity(vi->rq[i].vq, NULL);
3914 virtqueue_set_affinity(vi->sq[i].vq, NULL);
3915 }
3916
3917 vi->affinity_hint_set = false;
3918 }
3919 }
3920
virtnet_set_affinity(struct virtnet_info * vi)3921 static void virtnet_set_affinity(struct virtnet_info *vi)
3922 {
3923 cpumask_var_t mask;
3924 int stragglers;
3925 int group_size;
3926 int i, start = 0, cpu;
3927 int num_cpu;
3928 int stride;
3929
3930 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3931 virtnet_clean_affinity(vi);
3932 return;
3933 }
3934
3935 num_cpu = num_online_cpus();
3936 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
3937 stragglers = num_cpu >= vi->curr_queue_pairs ?
3938 num_cpu % vi->curr_queue_pairs :
3939 0;
3940
3941 for (i = 0; i < vi->curr_queue_pairs; i++) {
3942 group_size = stride + (i < stragglers ? 1 : 0);
3943
3944 for_each_online_cpu_wrap(cpu, start) {
3945 if (!group_size--) {
3946 start = cpu;
3947 break;
3948 }
3949 cpumask_set_cpu(cpu, mask);
3950 }
3951
3952 virtqueue_set_affinity(vi->rq[i].vq, mask);
3953 virtqueue_set_affinity(vi->sq[i].vq, mask);
3954 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
3955 cpumask_clear(mask);
3956 }
3957
3958 vi->affinity_hint_set = true;
3959 free_cpumask_var(mask);
3960 }
3961
virtnet_cpu_online(unsigned int cpu,struct hlist_node * node)3962 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
3963 {
3964 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3965 node);
3966 virtnet_set_affinity(vi);
3967 return 0;
3968 }
3969
virtnet_cpu_dead(unsigned int cpu,struct hlist_node * node)3970 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
3971 {
3972 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3973 node_dead);
3974 virtnet_set_affinity(vi);
3975 return 0;
3976 }
3977
virtnet_cpu_down_prep(unsigned int cpu,struct hlist_node * node)3978 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
3979 {
3980 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3981 node);
3982
3983 virtnet_clean_affinity(vi);
3984 return 0;
3985 }
3986
3987 static enum cpuhp_state virtionet_online;
3988
virtnet_cpu_notif_add(struct virtnet_info * vi)3989 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
3990 {
3991 int ret;
3992
3993 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
3994 if (ret)
3995 return ret;
3996 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3997 &vi->node_dead);
3998 if (!ret)
3999 return ret;
4000 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
4001 return ret;
4002 }
4003
virtnet_cpu_notif_remove(struct virtnet_info * vi)4004 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
4005 {
4006 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
4007 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
4008 &vi->node_dead);
4009 }
4010
virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 vqn,u32 max_usecs,u32 max_packets)4011 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4012 u16 vqn, u32 max_usecs, u32 max_packets)
4013 {
4014 struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
4015 struct scatterlist sgs;
4016
4017 coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
4018 if (!coal_vq)
4019 return -ENOMEM;
4020
4021 coal_vq->vqn = cpu_to_le16(vqn);
4022 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
4023 coal_vq->coal.max_packets = cpu_to_le32(max_packets);
4024 sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
4025
4026 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
4027 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
4028 &sgs))
4029 return -EINVAL;
4030
4031 return 0;
4032 }
4033
virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 queue,u32 max_usecs,u32 max_packets)4034 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4035 u16 queue, u32 max_usecs,
4036 u32 max_packets)
4037 {
4038 int err;
4039
4040 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4041 return -EOPNOTSUPP;
4042
4043 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
4044 max_usecs, max_packets);
4045 if (err)
4046 return err;
4047
4048 vi->rq[queue].intr_coal.max_usecs = max_usecs;
4049 vi->rq[queue].intr_coal.max_packets = max_packets;
4050
4051 return 0;
4052 }
4053
virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 queue,u32 max_usecs,u32 max_packets)4054 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4055 u16 queue, u32 max_usecs,
4056 u32 max_packets)
4057 {
4058 int err;
4059
4060 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4061 return -EOPNOTSUPP;
4062
4063 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
4064 max_usecs, max_packets);
4065 if (err)
4066 return err;
4067
4068 vi->sq[queue].intr_coal.max_usecs = max_usecs;
4069 vi->sq[queue].intr_coal.max_packets = max_packets;
4070
4071 return 0;
4072 }
4073
virtnet_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)4074 static void virtnet_get_ringparam(struct net_device *dev,
4075 struct ethtool_ringparam *ring,
4076 struct kernel_ethtool_ringparam *kernel_ring,
4077 struct netlink_ext_ack *extack)
4078 {
4079 struct virtnet_info *vi = netdev_priv(dev);
4080
4081 ring->rx_max_pending = vi->rq[0].vq->num_max;
4082 ring->tx_max_pending = vi->sq[0].vq->num_max;
4083 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4084 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4085 }
4086
virtnet_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)4087 static int virtnet_set_ringparam(struct net_device *dev,
4088 struct ethtool_ringparam *ring,
4089 struct kernel_ethtool_ringparam *kernel_ring,
4090 struct netlink_ext_ack *extack)
4091 {
4092 struct virtnet_info *vi = netdev_priv(dev);
4093 u32 rx_pending, tx_pending;
4094 struct receive_queue *rq;
4095 struct send_queue *sq;
4096 int i, err;
4097
4098 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
4099 return -EINVAL;
4100
4101 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4102 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4103
4104 if (ring->rx_pending == rx_pending &&
4105 ring->tx_pending == tx_pending)
4106 return 0;
4107
4108 if (ring->rx_pending > vi->rq[0].vq->num_max)
4109 return -EINVAL;
4110
4111 if (ring->tx_pending > vi->sq[0].vq->num_max)
4112 return -EINVAL;
4113
4114 for (i = 0; i < vi->max_queue_pairs; i++) {
4115 rq = vi->rq + i;
4116 sq = vi->sq + i;
4117
4118 if (ring->tx_pending != tx_pending) {
4119 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
4120 if (err)
4121 return err;
4122
4123 /* Upon disabling and re-enabling a transmit virtqueue, the device must
4124 * set the coalescing parameters of the virtqueue to those configured
4125 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
4126 * did not set any TX coalescing parameters, to 0.
4127 */
4128 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
4129 vi->intr_coal_tx.max_usecs,
4130 vi->intr_coal_tx.max_packets);
4131
4132 /* Don't break the tx resize action if the vq coalescing is not
4133 * supported. The same is true for rx resize below.
4134 */
4135 if (err && err != -EOPNOTSUPP)
4136 return err;
4137 }
4138
4139 if (ring->rx_pending != rx_pending) {
4140 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
4141 if (err)
4142 return err;
4143
4144 /* The reason is same as the transmit virtqueue reset */
4145 mutex_lock(&vi->rq[i].dim_lock);
4146 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
4147 vi->intr_coal_rx.max_usecs,
4148 vi->intr_coal_rx.max_packets);
4149 mutex_unlock(&vi->rq[i].dim_lock);
4150 if (err && err != -EOPNOTSUPP)
4151 return err;
4152 }
4153 }
4154
4155 return 0;
4156 }
4157
virtnet_commit_rss_command(struct virtnet_info * vi)4158 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
4159 {
4160 struct net_device *dev = vi->dev;
4161 struct scatterlist sgs[2];
4162
4163 /* prepare sgs */
4164 sg_init_table(sgs, 2);
4165 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
4166 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
4167
4168 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
4169 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
4170 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
4171 goto err;
4172
4173 return true;
4174
4175 err:
4176 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
4177 return false;
4178
4179 }
4180
virtnet_init_default_rss(struct virtnet_info * vi)4181 static void virtnet_init_default_rss(struct virtnet_info *vi)
4182 {
4183 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
4184 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
4185 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
4186 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
4187 vi->rss_hdr->unclassified_queue = 0;
4188
4189 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
4190
4191 vi->rss_trailer.hash_key_length = vi->rss_key_size;
4192
4193 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
4194 }
4195
virtnet_get_hashflow(const struct virtnet_info * vi,struct ethtool_rxnfc * info)4196 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
4197 {
4198 info->data = 0;
4199 switch (info->flow_type) {
4200 case TCP_V4_FLOW:
4201 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
4202 info->data = RXH_IP_SRC | RXH_IP_DST |
4203 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4204 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4205 info->data = RXH_IP_SRC | RXH_IP_DST;
4206 }
4207 break;
4208 case TCP_V6_FLOW:
4209 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
4210 info->data = RXH_IP_SRC | RXH_IP_DST |
4211 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4212 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4213 info->data = RXH_IP_SRC | RXH_IP_DST;
4214 }
4215 break;
4216 case UDP_V4_FLOW:
4217 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
4218 info->data = RXH_IP_SRC | RXH_IP_DST |
4219 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4220 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4221 info->data = RXH_IP_SRC | RXH_IP_DST;
4222 }
4223 break;
4224 case UDP_V6_FLOW:
4225 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
4226 info->data = RXH_IP_SRC | RXH_IP_DST |
4227 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4228 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4229 info->data = RXH_IP_SRC | RXH_IP_DST;
4230 }
4231 break;
4232 case IPV4_FLOW:
4233 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
4234 info->data = RXH_IP_SRC | RXH_IP_DST;
4235
4236 break;
4237 case IPV6_FLOW:
4238 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
4239 info->data = RXH_IP_SRC | RXH_IP_DST;
4240
4241 break;
4242 default:
4243 info->data = 0;
4244 break;
4245 }
4246 }
4247
virtnet_set_hashflow(struct virtnet_info * vi,struct ethtool_rxnfc * info)4248 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
4249 {
4250 u32 new_hashtypes = vi->rss_hash_types_saved;
4251 bool is_disable = info->data & RXH_DISCARD;
4252 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
4253
4254 /* supports only 'sd', 'sdfn' and 'r' */
4255 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
4256 return false;
4257
4258 switch (info->flow_type) {
4259 case TCP_V4_FLOW:
4260 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
4261 if (!is_disable)
4262 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4263 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
4264 break;
4265 case UDP_V4_FLOW:
4266 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
4267 if (!is_disable)
4268 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4269 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
4270 break;
4271 case IPV4_FLOW:
4272 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4273 if (!is_disable)
4274 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4275 break;
4276 case TCP_V6_FLOW:
4277 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
4278 if (!is_disable)
4279 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4280 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
4281 break;
4282 case UDP_V6_FLOW:
4283 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
4284 if (!is_disable)
4285 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4286 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
4287 break;
4288 case IPV6_FLOW:
4289 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4290 if (!is_disable)
4291 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4292 break;
4293 default:
4294 /* unsupported flow */
4295 return false;
4296 }
4297
4298 /* if unsupported hashtype was set */
4299 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
4300 return false;
4301
4302 if (new_hashtypes != vi->rss_hash_types_saved) {
4303 vi->rss_hash_types_saved = new_hashtypes;
4304 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
4305 if (vi->dev->features & NETIF_F_RXHASH)
4306 return virtnet_commit_rss_command(vi);
4307 }
4308
4309 return true;
4310 }
4311
virtnet_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4312 static void virtnet_get_drvinfo(struct net_device *dev,
4313 struct ethtool_drvinfo *info)
4314 {
4315 struct virtnet_info *vi = netdev_priv(dev);
4316 struct virtio_device *vdev = vi->vdev;
4317
4318 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
4319 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
4320 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
4321
4322 }
4323
4324 /* TODO: Eliminate OOO packets during switching */
virtnet_set_channels(struct net_device * dev,struct ethtool_channels * channels)4325 static int virtnet_set_channels(struct net_device *dev,
4326 struct ethtool_channels *channels)
4327 {
4328 struct virtnet_info *vi = netdev_priv(dev);
4329 u16 queue_pairs = channels->combined_count;
4330 int err;
4331
4332 /* We don't support separate rx/tx channels.
4333 * We don't allow setting 'other' channels.
4334 */
4335 if (channels->rx_count || channels->tx_count || channels->other_count)
4336 return -EINVAL;
4337
4338 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
4339 return -EINVAL;
4340
4341 /* For now we don't support modifying channels while XDP is loaded
4342 * also when XDP is loaded all RX queues have XDP programs so we only
4343 * need to check a single RX queue.
4344 */
4345 if (vi->rq[0].xdp_prog)
4346 return -EINVAL;
4347
4348 cpus_read_lock();
4349 err = virtnet_set_queues(vi, queue_pairs);
4350 if (err) {
4351 cpus_read_unlock();
4352 goto err;
4353 }
4354 virtnet_set_affinity(vi);
4355 cpus_read_unlock();
4356
4357 netif_set_real_num_tx_queues(dev, queue_pairs);
4358 netif_set_real_num_rx_queues(dev, queue_pairs);
4359 err:
4360 return err;
4361 }
4362
virtnet_stats_sprintf(u8 ** p,const char * fmt,const char * noq_fmt,int num,int qid,const struct virtnet_stat_desc * desc)4363 static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
4364 int num, int qid, const struct virtnet_stat_desc *desc)
4365 {
4366 int i;
4367
4368 if (qid < 0) {
4369 for (i = 0; i < num; ++i)
4370 ethtool_sprintf(p, noq_fmt, desc[i].desc);
4371 } else {
4372 for (i = 0; i < num; ++i)
4373 ethtool_sprintf(p, fmt, qid, desc[i].desc);
4374 }
4375 }
4376
4377 /* qid == -1: for rx/tx queue total field */
virtnet_get_stats_string(struct virtnet_info * vi,int type,int qid,u8 ** data)4378 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
4379 {
4380 const struct virtnet_stat_desc *desc;
4381 const char *fmt, *noq_fmt;
4382 u8 *p = *data;
4383 u32 num;
4384
4385 if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
4386 noq_fmt = "cq_hw_%s";
4387
4388 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4389 desc = &virtnet_stats_cvq_desc[0];
4390 num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4391
4392 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
4393 }
4394 }
4395
4396 if (type == VIRTNET_Q_TYPE_RX) {
4397 fmt = "rx%u_%s";
4398 noq_fmt = "rx_%s";
4399
4400 desc = &virtnet_rq_stats_desc[0];
4401 num = ARRAY_SIZE(virtnet_rq_stats_desc);
4402
4403 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4404
4405 fmt = "rx%u_hw_%s";
4406 noq_fmt = "rx_hw_%s";
4407
4408 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4409 desc = &virtnet_stats_rx_basic_desc[0];
4410 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4411
4412 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4413 }
4414
4415 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4416 desc = &virtnet_stats_rx_csum_desc[0];
4417 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4418
4419 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4420 }
4421
4422 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4423 desc = &virtnet_stats_rx_speed_desc[0];
4424 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4425
4426 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4427 }
4428 }
4429
4430 if (type == VIRTNET_Q_TYPE_TX) {
4431 fmt = "tx%u_%s";
4432 noq_fmt = "tx_%s";
4433
4434 desc = &virtnet_sq_stats_desc[0];
4435 num = ARRAY_SIZE(virtnet_sq_stats_desc);
4436
4437 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4438
4439 fmt = "tx%u_hw_%s";
4440 noq_fmt = "tx_hw_%s";
4441
4442 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4443 desc = &virtnet_stats_tx_basic_desc[0];
4444 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4445
4446 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4447 }
4448
4449 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4450 desc = &virtnet_stats_tx_gso_desc[0];
4451 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4452
4453 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4454 }
4455
4456 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4457 desc = &virtnet_stats_tx_speed_desc[0];
4458 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4459
4460 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4461 }
4462 }
4463
4464 *data = p;
4465 }
4466
4467 struct virtnet_stats_ctx {
4468 /* The stats are write to qstats or ethtool -S */
4469 bool to_qstat;
4470
4471 /* Used to calculate the offset inside the output buffer. */
4472 u32 desc_num[3];
4473
4474 /* The actual supported stat types. */
4475 u64 bitmap[3];
4476
4477 /* Used to calculate the reply buffer size. */
4478 u32 size[3];
4479
4480 /* Record the output buffer. */
4481 u64 *data;
4482 };
4483
virtnet_stats_ctx_init(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,u64 * data,bool to_qstat)4484 static void virtnet_stats_ctx_init(struct virtnet_info *vi,
4485 struct virtnet_stats_ctx *ctx,
4486 u64 *data, bool to_qstat)
4487 {
4488 u32 queue_type;
4489
4490 ctx->data = data;
4491 ctx->to_qstat = to_qstat;
4492
4493 if (to_qstat) {
4494 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4495 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4496
4497 queue_type = VIRTNET_Q_TYPE_RX;
4498
4499 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4500 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4501 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4502 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
4503 }
4504
4505 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4506 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4507 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4508 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
4509 }
4510
4511 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4512 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO;
4513 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4514 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso);
4515 }
4516
4517 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4518 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4519 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4520 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
4521 }
4522
4523 queue_type = VIRTNET_Q_TYPE_TX;
4524
4525 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4526 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4527 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4528 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
4529 }
4530
4531 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4532 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
4533 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4534 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum);
4535 }
4536
4537 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4538 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4539 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4540 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
4541 }
4542
4543 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4544 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4545 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4546 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
4547 }
4548
4549 return;
4550 }
4551
4552 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
4553 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
4554
4555 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4556 queue_type = VIRTNET_Q_TYPE_CQ;
4557
4558 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ;
4559 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
4560 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq);
4561 }
4562
4563 queue_type = VIRTNET_Q_TYPE_RX;
4564
4565 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4566 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4567 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4568 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
4569 }
4570
4571 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4572 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4573 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4574 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
4575 }
4576
4577 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4578 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4579 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4580 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
4581 }
4582
4583 queue_type = VIRTNET_Q_TYPE_TX;
4584
4585 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4586 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4587 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4588 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
4589 }
4590
4591 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4592 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4593 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4594 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
4595 }
4596
4597 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4598 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4599 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4600 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
4601 }
4602 }
4603
4604 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4605 * @sum: the position to store the sum values
4606 * @num: field num
4607 * @q_value: the first queue fields
4608 * @q_num: number of the queues
4609 */
stats_sum_queue(u64 * sum,u32 num,u64 * q_value,u32 q_num)4610 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
4611 {
4612 u32 step = num;
4613 int i, j;
4614 u64 *p;
4615
4616 for (i = 0; i < num; ++i) {
4617 p = sum + i;
4618 *p = 0;
4619
4620 for (j = 0; j < q_num; ++j)
4621 *p += *(q_value + i + j * step);
4622 }
4623 }
4624
virtnet_fill_total_fields(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx)4625 static void virtnet_fill_total_fields(struct virtnet_info *vi,
4626 struct virtnet_stats_ctx *ctx)
4627 {
4628 u64 *data, *first_rx_q, *first_tx_q;
4629 u32 num_cq, num_rx, num_tx;
4630
4631 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4632 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4633 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4634
4635 first_rx_q = ctx->data + num_rx + num_tx + num_cq;
4636 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
4637
4638 data = ctx->data;
4639
4640 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
4641
4642 data = ctx->data + num_rx;
4643
4644 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
4645 }
4646
virtnet_fill_stats_qstat(struct virtnet_info * vi,u32 qid,struct virtnet_stats_ctx * ctx,const u8 * base,bool drv_stats,u8 reply_type)4647 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
4648 struct virtnet_stats_ctx *ctx,
4649 const u8 *base, bool drv_stats, u8 reply_type)
4650 {
4651 const struct virtnet_stat_desc *desc;
4652 const u64_stats_t *v_stat;
4653 u64 offset, bitmap;
4654 const __le64 *v;
4655 u32 queue_type;
4656 int i, num;
4657
4658 queue_type = vq_type(vi, qid);
4659 bitmap = ctx->bitmap[queue_type];
4660
4661 if (drv_stats) {
4662 if (queue_type == VIRTNET_Q_TYPE_RX) {
4663 desc = &virtnet_rq_stats_desc_qstat[0];
4664 num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4665 } else {
4666 desc = &virtnet_sq_stats_desc_qstat[0];
4667 num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4668 }
4669
4670 for (i = 0; i < num; ++i) {
4671 offset = desc[i].qstat_offset / sizeof(*ctx->data);
4672 v_stat = (const u64_stats_t *)(base + desc[i].offset);
4673 ctx->data[offset] = u64_stats_read(v_stat);
4674 }
4675 return;
4676 }
4677
4678 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4679 desc = &virtnet_stats_rx_basic_desc_qstat[0];
4680 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4681 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4682 goto found;
4683 }
4684
4685 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4686 desc = &virtnet_stats_rx_csum_desc_qstat[0];
4687 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4688 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4689 goto found;
4690 }
4691
4692 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4693 desc = &virtnet_stats_rx_gso_desc_qstat[0];
4694 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4695 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
4696 goto found;
4697 }
4698
4699 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4700 desc = &virtnet_stats_rx_speed_desc_qstat[0];
4701 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4702 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4703 goto found;
4704 }
4705
4706 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4707 desc = &virtnet_stats_tx_basic_desc_qstat[0];
4708 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4709 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4710 goto found;
4711 }
4712
4713 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4714 desc = &virtnet_stats_tx_csum_desc_qstat[0];
4715 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4716 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
4717 goto found;
4718 }
4719
4720 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4721 desc = &virtnet_stats_tx_gso_desc_qstat[0];
4722 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4723 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4724 goto found;
4725 }
4726
4727 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4728 desc = &virtnet_stats_tx_speed_desc_qstat[0];
4729 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4730 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4731 goto found;
4732 }
4733
4734 return;
4735
4736 found:
4737 for (i = 0; i < num; ++i) {
4738 offset = desc[i].qstat_offset / sizeof(*ctx->data);
4739 v = (const __le64 *)(base + desc[i].offset);
4740 ctx->data[offset] = le64_to_cpu(*v);
4741 }
4742 }
4743
4744 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4745 * The stats source is the device or the driver.
4746 *
4747 * @vi: virtio net info
4748 * @qid: the vq id
4749 * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
4750 * @base: pointer to the device reply or the driver stats structure.
4751 * @drv_stats: designate the base type (device reply, driver stats)
4752 * @type: the type of the device reply (if drv_stats is true, this must be zero)
4753 */
virtnet_fill_stats(struct virtnet_info * vi,u32 qid,struct virtnet_stats_ctx * ctx,const u8 * base,bool drv_stats,u8 reply_type)4754 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
4755 struct virtnet_stats_ctx *ctx,
4756 const u8 *base, bool drv_stats, u8 reply_type)
4757 {
4758 u32 queue_type, num_rx, num_tx, num_cq;
4759 const struct virtnet_stat_desc *desc;
4760 const u64_stats_t *v_stat;
4761 u64 offset, bitmap;
4762 const __le64 *v;
4763 int i, num;
4764
4765 if (ctx->to_qstat)
4766 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
4767
4768 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4769 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4770 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4771
4772 queue_type = vq_type(vi, qid);
4773 bitmap = ctx->bitmap[queue_type];
4774
4775 /* skip the total fields of pairs */
4776 offset = num_rx + num_tx;
4777
4778 if (queue_type == VIRTNET_Q_TYPE_TX) {
4779 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
4780
4781 num = ARRAY_SIZE(virtnet_sq_stats_desc);
4782 if (drv_stats) {
4783 desc = &virtnet_sq_stats_desc[0];
4784 goto drv_stats;
4785 }
4786
4787 offset += num;
4788
4789 } else if (queue_type == VIRTNET_Q_TYPE_RX) {
4790 offset += num_cq + num_rx * (qid / 2);
4791
4792 num = ARRAY_SIZE(virtnet_rq_stats_desc);
4793 if (drv_stats) {
4794 desc = &virtnet_rq_stats_desc[0];
4795 goto drv_stats;
4796 }
4797
4798 offset += num;
4799 }
4800
4801 if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
4802 desc = &virtnet_stats_cvq_desc[0];
4803 num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4804 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
4805 goto found;
4806
4807 offset += num;
4808 }
4809
4810 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4811 desc = &virtnet_stats_rx_basic_desc[0];
4812 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4813 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4814 goto found;
4815
4816 offset += num;
4817 }
4818
4819 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4820 desc = &virtnet_stats_rx_csum_desc[0];
4821 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4822 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4823 goto found;
4824
4825 offset += num;
4826 }
4827
4828 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4829 desc = &virtnet_stats_rx_speed_desc[0];
4830 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4831 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4832 goto found;
4833
4834 offset += num;
4835 }
4836
4837 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4838 desc = &virtnet_stats_tx_basic_desc[0];
4839 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4840 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4841 goto found;
4842
4843 offset += num;
4844 }
4845
4846 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4847 desc = &virtnet_stats_tx_gso_desc[0];
4848 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4849 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4850 goto found;
4851
4852 offset += num;
4853 }
4854
4855 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4856 desc = &virtnet_stats_tx_speed_desc[0];
4857 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4858 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4859 goto found;
4860
4861 offset += num;
4862 }
4863
4864 return;
4865
4866 found:
4867 for (i = 0; i < num; ++i) {
4868 v = (const __le64 *)(base + desc[i].offset);
4869 ctx->data[offset + i] = le64_to_cpu(*v);
4870 }
4871
4872 return;
4873
4874 drv_stats:
4875 for (i = 0; i < num; ++i) {
4876 v_stat = (const u64_stats_t *)(base + desc[i].offset);
4877 ctx->data[offset + i] = u64_stats_read(v_stat);
4878 }
4879 }
4880
__virtnet_get_hw_stats(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,struct virtio_net_ctrl_queue_stats * req,int req_size,void * reply,int res_size)4881 static int __virtnet_get_hw_stats(struct virtnet_info *vi,
4882 struct virtnet_stats_ctx *ctx,
4883 struct virtio_net_ctrl_queue_stats *req,
4884 int req_size, void *reply, int res_size)
4885 {
4886 struct virtio_net_stats_reply_hdr *hdr;
4887 struct scatterlist sgs_in, sgs_out;
4888 void *p;
4889 u32 qid;
4890 int ok;
4891
4892 sg_init_one(&sgs_out, req, req_size);
4893 sg_init_one(&sgs_in, reply, res_size);
4894
4895 ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
4896 VIRTIO_NET_CTRL_STATS_GET,
4897 &sgs_out, &sgs_in);
4898
4899 if (!ok)
4900 return ok;
4901
4902 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
4903 hdr = p;
4904 qid = le16_to_cpu(hdr->vq_index);
4905 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
4906 }
4907
4908 return 0;
4909 }
4910
virtnet_make_stat_req(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,struct virtio_net_ctrl_queue_stats * req,int qid,int * idx)4911 static void virtnet_make_stat_req(struct virtnet_info *vi,
4912 struct virtnet_stats_ctx *ctx,
4913 struct virtio_net_ctrl_queue_stats *req,
4914 int qid, int *idx)
4915 {
4916 int qtype = vq_type(vi, qid);
4917 u64 bitmap = ctx->bitmap[qtype];
4918
4919 if (!bitmap)
4920 return;
4921
4922 req->stats[*idx].vq_index = cpu_to_le16(qid);
4923 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
4924 *idx += 1;
4925 }
4926
4927 /* qid: -1: get stats of all vq.
4928 * > 0: get the stats for the special vq. This must not be cvq.
4929 */
virtnet_get_hw_stats(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,int qid)4930 static int virtnet_get_hw_stats(struct virtnet_info *vi,
4931 struct virtnet_stats_ctx *ctx, int qid)
4932 {
4933 int qnum, i, j, res_size, qtype, last_vq, first_vq;
4934 struct virtio_net_ctrl_queue_stats *req;
4935 bool enable_cvq;
4936 void *reply;
4937 int ok;
4938
4939 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
4940 return 0;
4941
4942 if (qid == -1) {
4943 last_vq = vi->curr_queue_pairs * 2 - 1;
4944 first_vq = 0;
4945 enable_cvq = true;
4946 } else {
4947 last_vq = qid;
4948 first_vq = qid;
4949 enable_cvq = false;
4950 }
4951
4952 qnum = 0;
4953 res_size = 0;
4954 for (i = first_vq; i <= last_vq ; ++i) {
4955 qtype = vq_type(vi, i);
4956 if (ctx->bitmap[qtype]) {
4957 ++qnum;
4958 res_size += ctx->size[qtype];
4959 }
4960 }
4961
4962 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
4963 res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
4964 qnum += 1;
4965 }
4966
4967 req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
4968 if (!req)
4969 return -ENOMEM;
4970
4971 reply = kmalloc(res_size, GFP_KERNEL);
4972 if (!reply) {
4973 kfree(req);
4974 return -ENOMEM;
4975 }
4976
4977 j = 0;
4978 for (i = first_vq; i <= last_vq ; ++i)
4979 virtnet_make_stat_req(vi, ctx, req, i, &j);
4980
4981 if (enable_cvq)
4982 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
4983
4984 ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
4985
4986 kfree(req);
4987 kfree(reply);
4988
4989 return ok;
4990 }
4991
virtnet_get_strings(struct net_device * dev,u32 stringset,u8 * data)4992 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4993 {
4994 struct virtnet_info *vi = netdev_priv(dev);
4995 unsigned int i;
4996 u8 *p = data;
4997
4998 switch (stringset) {
4999 case ETH_SS_STATS:
5000 /* Generate the total field names. */
5001 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
5002 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
5003
5004 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
5005
5006 for (i = 0; i < vi->curr_queue_pairs; ++i)
5007 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
5008
5009 for (i = 0; i < vi->curr_queue_pairs; ++i)
5010 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
5011 break;
5012 }
5013 }
5014
virtnet_get_sset_count(struct net_device * dev,int sset)5015 static int virtnet_get_sset_count(struct net_device *dev, int sset)
5016 {
5017 struct virtnet_info *vi = netdev_priv(dev);
5018 struct virtnet_stats_ctx ctx = {0};
5019 u32 pair_count;
5020
5021 switch (sset) {
5022 case ETH_SS_STATS:
5023 virtnet_stats_ctx_init(vi, &ctx, NULL, false);
5024
5025 pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
5026
5027 return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
5028 vi->curr_queue_pairs * pair_count;
5029 default:
5030 return -EOPNOTSUPP;
5031 }
5032 }
5033
virtnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)5034 static void virtnet_get_ethtool_stats(struct net_device *dev,
5035 struct ethtool_stats *stats, u64 *data)
5036 {
5037 struct virtnet_info *vi = netdev_priv(dev);
5038 struct virtnet_stats_ctx ctx = {0};
5039 unsigned int start, i;
5040 const u8 *stats_base;
5041
5042 virtnet_stats_ctx_init(vi, &ctx, data, false);
5043 if (virtnet_get_hw_stats(vi, &ctx, -1))
5044 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
5045
5046 for (i = 0; i < vi->curr_queue_pairs; i++) {
5047 struct receive_queue *rq = &vi->rq[i];
5048 struct send_queue *sq = &vi->sq[i];
5049
5050 stats_base = (const u8 *)&rq->stats;
5051 do {
5052 start = u64_stats_fetch_begin(&rq->stats.syncp);
5053 virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
5054 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
5055
5056 stats_base = (const u8 *)&sq->stats;
5057 do {
5058 start = u64_stats_fetch_begin(&sq->stats.syncp);
5059 virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
5060 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
5061 }
5062
5063 virtnet_fill_total_fields(vi, &ctx);
5064 }
5065
virtnet_get_channels(struct net_device * dev,struct ethtool_channels * channels)5066 static void virtnet_get_channels(struct net_device *dev,
5067 struct ethtool_channels *channels)
5068 {
5069 struct virtnet_info *vi = netdev_priv(dev);
5070
5071 channels->combined_count = vi->curr_queue_pairs;
5072 channels->max_combined = vi->max_queue_pairs;
5073 channels->max_other = 0;
5074 channels->rx_count = 0;
5075 channels->tx_count = 0;
5076 channels->other_count = 0;
5077 }
5078
virtnet_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5079 static int virtnet_set_link_ksettings(struct net_device *dev,
5080 const struct ethtool_link_ksettings *cmd)
5081 {
5082 struct virtnet_info *vi = netdev_priv(dev);
5083
5084 return ethtool_virtdev_set_link_ksettings(dev, cmd,
5085 &vi->speed, &vi->duplex);
5086 }
5087
virtnet_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5088 static int virtnet_get_link_ksettings(struct net_device *dev,
5089 struct ethtool_link_ksettings *cmd)
5090 {
5091 struct virtnet_info *vi = netdev_priv(dev);
5092
5093 cmd->base.speed = vi->speed;
5094 cmd->base.duplex = vi->duplex;
5095 cmd->base.port = PORT_OTHER;
5096
5097 return 0;
5098 }
5099
virtnet_send_tx_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5100 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
5101 struct ethtool_coalesce *ec)
5102 {
5103 struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
5104 struct scatterlist sgs_tx;
5105 int i;
5106
5107 coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
5108 if (!coal_tx)
5109 return -ENOMEM;
5110
5111 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
5112 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
5113 sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
5114
5115 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5116 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
5117 &sgs_tx))
5118 return -EINVAL;
5119
5120 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
5121 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
5122 for (i = 0; i < vi->max_queue_pairs; i++) {
5123 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
5124 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
5125 }
5126
5127 return 0;
5128 }
5129
virtnet_send_rx_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5130 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
5131 struct ethtool_coalesce *ec)
5132 {
5133 struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
5134 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5135 struct scatterlist sgs_rx;
5136 int i;
5137
5138 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5139 return -EOPNOTSUPP;
5140
5141 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
5142 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
5143 return -EINVAL;
5144
5145 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
5146 vi->rx_dim_enabled = true;
5147 for (i = 0; i < vi->max_queue_pairs; i++) {
5148 mutex_lock(&vi->rq[i].dim_lock);
5149 vi->rq[i].dim_enabled = true;
5150 mutex_unlock(&vi->rq[i].dim_lock);
5151 }
5152 return 0;
5153 }
5154
5155 coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
5156 if (!coal_rx)
5157 return -ENOMEM;
5158
5159 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
5160 vi->rx_dim_enabled = false;
5161 for (i = 0; i < vi->max_queue_pairs; i++) {
5162 mutex_lock(&vi->rq[i].dim_lock);
5163 vi->rq[i].dim_enabled = false;
5164 mutex_unlock(&vi->rq[i].dim_lock);
5165 }
5166 }
5167
5168 /* Since the per-queue coalescing params can be set,
5169 * we need apply the global new params even if they
5170 * are not updated.
5171 */
5172 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
5173 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
5174 sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
5175
5176 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5177 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
5178 &sgs_rx))
5179 return -EINVAL;
5180
5181 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
5182 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
5183 for (i = 0; i < vi->max_queue_pairs; i++) {
5184 mutex_lock(&vi->rq[i].dim_lock);
5185 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
5186 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
5187 mutex_unlock(&vi->rq[i].dim_lock);
5188 }
5189
5190 return 0;
5191 }
5192
virtnet_send_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5193 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
5194 struct ethtool_coalesce *ec)
5195 {
5196 int err;
5197
5198 err = virtnet_send_tx_notf_coal_cmds(vi, ec);
5199 if (err)
5200 return err;
5201
5202 err = virtnet_send_rx_notf_coal_cmds(vi, ec);
5203 if (err)
5204 return err;
5205
5206 return 0;
5207 }
5208
virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec,u16 queue)5209 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
5210 struct ethtool_coalesce *ec,
5211 u16 queue)
5212 {
5213 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5214 u32 max_usecs, max_packets;
5215 bool cur_rx_dim;
5216 int err;
5217
5218 mutex_lock(&vi->rq[queue].dim_lock);
5219 cur_rx_dim = vi->rq[queue].dim_enabled;
5220 max_usecs = vi->rq[queue].intr_coal.max_usecs;
5221 max_packets = vi->rq[queue].intr_coal.max_packets;
5222
5223 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
5224 ec->rx_max_coalesced_frames != max_packets)) {
5225 mutex_unlock(&vi->rq[queue].dim_lock);
5226 return -EINVAL;
5227 }
5228
5229 if (rx_ctrl_dim_on && !cur_rx_dim) {
5230 vi->rq[queue].dim_enabled = true;
5231 mutex_unlock(&vi->rq[queue].dim_lock);
5232 return 0;
5233 }
5234
5235 if (!rx_ctrl_dim_on && cur_rx_dim)
5236 vi->rq[queue].dim_enabled = false;
5237
5238 /* If no params are updated, userspace ethtool will
5239 * reject the modification.
5240 */
5241 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
5242 ec->rx_coalesce_usecs,
5243 ec->rx_max_coalesced_frames);
5244 mutex_unlock(&vi->rq[queue].dim_lock);
5245 return err;
5246 }
5247
virtnet_send_notf_coal_vq_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec,u16 queue)5248 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
5249 struct ethtool_coalesce *ec,
5250 u16 queue)
5251 {
5252 int err;
5253
5254 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
5255 if (err)
5256 return err;
5257
5258 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
5259 ec->tx_coalesce_usecs,
5260 ec->tx_max_coalesced_frames);
5261 if (err)
5262 return err;
5263
5264 return 0;
5265 }
5266
virtnet_rx_dim_work(struct work_struct * work)5267 static void virtnet_rx_dim_work(struct work_struct *work)
5268 {
5269 struct dim *dim = container_of(work, struct dim, work);
5270 struct receive_queue *rq = container_of(dim,
5271 struct receive_queue, dim);
5272 struct virtnet_info *vi = rq->vq->vdev->priv;
5273 struct net_device *dev = vi->dev;
5274 struct dim_cq_moder update_moder;
5275 int qnum, err;
5276
5277 qnum = rq - vi->rq;
5278
5279 mutex_lock(&rq->dim_lock);
5280 if (!rq->dim_enabled)
5281 goto out;
5282
5283 update_moder = net_dim_get_rx_irq_moder(dev, dim);
5284 if (update_moder.usec != rq->intr_coal.max_usecs ||
5285 update_moder.pkts != rq->intr_coal.max_packets) {
5286 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
5287 update_moder.usec,
5288 update_moder.pkts);
5289 if (err)
5290 pr_debug("%s: Failed to send dim parameters on rxq%d\n",
5291 dev->name, qnum);
5292 }
5293 out:
5294 dim->state = DIM_START_MEASURE;
5295 mutex_unlock(&rq->dim_lock);
5296 }
5297
virtnet_coal_params_supported(struct ethtool_coalesce * ec)5298 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
5299 {
5300 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
5301 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
5302 */
5303 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
5304 return -EOPNOTSUPP;
5305
5306 if (ec->tx_max_coalesced_frames > 1 ||
5307 ec->rx_max_coalesced_frames != 1)
5308 return -EINVAL;
5309
5310 return 0;
5311 }
5312
virtnet_should_update_vq_weight(int dev_flags,int weight,int vq_weight,bool * should_update)5313 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
5314 int vq_weight, bool *should_update)
5315 {
5316 if (weight ^ vq_weight) {
5317 if (dev_flags & IFF_UP)
5318 return -EBUSY;
5319 *should_update = true;
5320 }
5321
5322 return 0;
5323 }
5324
virtnet_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5325 static int virtnet_set_coalesce(struct net_device *dev,
5326 struct ethtool_coalesce *ec,
5327 struct kernel_ethtool_coalesce *kernel_coal,
5328 struct netlink_ext_ack *extack)
5329 {
5330 struct virtnet_info *vi = netdev_priv(dev);
5331 int ret, queue_number, napi_weight, i;
5332 bool update_napi = false;
5333
5334 /* Can't change NAPI weight if the link is up */
5335 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5336 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
5337 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5338 vi->sq[queue_number].napi.weight,
5339 &update_napi);
5340 if (ret)
5341 return ret;
5342
5343 if (update_napi) {
5344 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
5345 * updated for the sake of simplicity, which might not be necessary
5346 */
5347 break;
5348 }
5349 }
5350
5351 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
5352 ret = virtnet_send_notf_coal_cmds(vi, ec);
5353 else
5354 ret = virtnet_coal_params_supported(ec);
5355
5356 if (ret)
5357 return ret;
5358
5359 if (update_napi) {
5360 /* xsk xmit depends on the tx napi. So if xsk is active,
5361 * prevent modifications to tx napi.
5362 */
5363 for (i = queue_number; i < vi->max_queue_pairs; i++) {
5364 if (vi->sq[i].xsk_pool)
5365 return -EBUSY;
5366 }
5367
5368 for (; queue_number < vi->max_queue_pairs; queue_number++)
5369 vi->sq[queue_number].napi.weight = napi_weight;
5370 }
5371
5372 return ret;
5373 }
5374
virtnet_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5375 static int virtnet_get_coalesce(struct net_device *dev,
5376 struct ethtool_coalesce *ec,
5377 struct kernel_ethtool_coalesce *kernel_coal,
5378 struct netlink_ext_ack *extack)
5379 {
5380 struct virtnet_info *vi = netdev_priv(dev);
5381
5382 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
5383 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
5384 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
5385 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
5386 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
5387 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
5388 } else {
5389 ec->rx_max_coalesced_frames = 1;
5390
5391 if (vi->sq[0].napi.weight)
5392 ec->tx_max_coalesced_frames = 1;
5393 }
5394
5395 return 0;
5396 }
5397
virtnet_set_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)5398 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
5399 u32 queue,
5400 struct ethtool_coalesce *ec)
5401 {
5402 struct virtnet_info *vi = netdev_priv(dev);
5403 int ret, napi_weight;
5404 bool update_napi = false;
5405
5406 if (queue >= vi->max_queue_pairs)
5407 return -EINVAL;
5408
5409 /* Can't change NAPI weight if the link is up */
5410 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5411 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5412 vi->sq[queue].napi.weight,
5413 &update_napi);
5414 if (ret)
5415 return ret;
5416
5417 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5418 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
5419 else
5420 ret = virtnet_coal_params_supported(ec);
5421
5422 if (ret)
5423 return ret;
5424
5425 if (update_napi)
5426 vi->sq[queue].napi.weight = napi_weight;
5427
5428 return 0;
5429 }
5430
virtnet_get_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)5431 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
5432 u32 queue,
5433 struct ethtool_coalesce *ec)
5434 {
5435 struct virtnet_info *vi = netdev_priv(dev);
5436
5437 if (queue >= vi->max_queue_pairs)
5438 return -EINVAL;
5439
5440 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
5441 mutex_lock(&vi->rq[queue].dim_lock);
5442 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
5443 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
5444 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
5445 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
5446 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
5447 mutex_unlock(&vi->rq[queue].dim_lock);
5448 } else {
5449 ec->rx_max_coalesced_frames = 1;
5450
5451 if (vi->sq[queue].napi.weight)
5452 ec->tx_max_coalesced_frames = 1;
5453 }
5454
5455 return 0;
5456 }
5457
virtnet_init_settings(struct net_device * dev)5458 static void virtnet_init_settings(struct net_device *dev)
5459 {
5460 struct virtnet_info *vi = netdev_priv(dev);
5461
5462 vi->speed = SPEED_UNKNOWN;
5463 vi->duplex = DUPLEX_UNKNOWN;
5464 }
5465
virtnet_get_rxfh_key_size(struct net_device * dev)5466 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
5467 {
5468 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
5469 }
5470
virtnet_get_rxfh_indir_size(struct net_device * dev)5471 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
5472 {
5473 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
5474 }
5475
virtnet_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)5476 static int virtnet_get_rxfh(struct net_device *dev,
5477 struct ethtool_rxfh_param *rxfh)
5478 {
5479 struct virtnet_info *vi = netdev_priv(dev);
5480 int i;
5481
5482 if (rxfh->indir) {
5483 for (i = 0; i < vi->rss_indir_table_size; ++i)
5484 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
5485 }
5486
5487 if (rxfh->key)
5488 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
5489
5490 rxfh->hfunc = ETH_RSS_HASH_TOP;
5491
5492 return 0;
5493 }
5494
virtnet_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)5495 static int virtnet_set_rxfh(struct net_device *dev,
5496 struct ethtool_rxfh_param *rxfh,
5497 struct netlink_ext_ack *extack)
5498 {
5499 struct virtnet_info *vi = netdev_priv(dev);
5500 bool update = false;
5501 int i;
5502
5503 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5504 rxfh->hfunc != ETH_RSS_HASH_TOP)
5505 return -EOPNOTSUPP;
5506
5507 if (rxfh->indir) {
5508 if (!vi->has_rss)
5509 return -EOPNOTSUPP;
5510
5511 for (i = 0; i < vi->rss_indir_table_size; ++i)
5512 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
5513 update = true;
5514 }
5515
5516 if (rxfh->key) {
5517 /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
5518 * device provides hash calculation capabilities, that is,
5519 * hash_key is configured.
5520 */
5521 if (!vi->has_rss && !vi->has_rss_hash_report)
5522 return -EOPNOTSUPP;
5523
5524 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
5525 update = true;
5526 }
5527
5528 if (update)
5529 virtnet_commit_rss_command(vi);
5530
5531 return 0;
5532 }
5533
virtnet_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)5534 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
5535 {
5536 struct virtnet_info *vi = netdev_priv(dev);
5537 int rc = 0;
5538
5539 switch (info->cmd) {
5540 case ETHTOOL_GRXRINGS:
5541 info->data = vi->curr_queue_pairs;
5542 break;
5543 case ETHTOOL_GRXFH:
5544 virtnet_get_hashflow(vi, info);
5545 break;
5546 default:
5547 rc = -EOPNOTSUPP;
5548 }
5549
5550 return rc;
5551 }
5552
virtnet_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info)5553 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
5554 {
5555 struct virtnet_info *vi = netdev_priv(dev);
5556 int rc = 0;
5557
5558 switch (info->cmd) {
5559 case ETHTOOL_SRXFH:
5560 if (!virtnet_set_hashflow(vi, info))
5561 rc = -EINVAL;
5562
5563 break;
5564 default:
5565 rc = -EOPNOTSUPP;
5566 }
5567
5568 return rc;
5569 }
5570
5571 static const struct ethtool_ops virtnet_ethtool_ops = {
5572 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
5573 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
5574 .get_drvinfo = virtnet_get_drvinfo,
5575 .get_link = ethtool_op_get_link,
5576 .get_ringparam = virtnet_get_ringparam,
5577 .set_ringparam = virtnet_set_ringparam,
5578 .get_strings = virtnet_get_strings,
5579 .get_sset_count = virtnet_get_sset_count,
5580 .get_ethtool_stats = virtnet_get_ethtool_stats,
5581 .set_channels = virtnet_set_channels,
5582 .get_channels = virtnet_get_channels,
5583 .get_ts_info = ethtool_op_get_ts_info,
5584 .get_link_ksettings = virtnet_get_link_ksettings,
5585 .set_link_ksettings = virtnet_set_link_ksettings,
5586 .set_coalesce = virtnet_set_coalesce,
5587 .get_coalesce = virtnet_get_coalesce,
5588 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
5589 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
5590 .get_rxfh_key_size = virtnet_get_rxfh_key_size,
5591 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
5592 .get_rxfh = virtnet_get_rxfh,
5593 .set_rxfh = virtnet_set_rxfh,
5594 .get_rxnfc = virtnet_get_rxnfc,
5595 .set_rxnfc = virtnet_set_rxnfc,
5596 };
5597
virtnet_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)5598 static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
5599 struct netdev_queue_stats_rx *stats)
5600 {
5601 struct virtnet_info *vi = netdev_priv(dev);
5602 struct receive_queue *rq = &vi->rq[i];
5603 struct virtnet_stats_ctx ctx = {0};
5604
5605 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5606
5607 virtnet_get_hw_stats(vi, &ctx, i * 2);
5608 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
5609 }
5610
virtnet_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)5611 static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
5612 struct netdev_queue_stats_tx *stats)
5613 {
5614 struct virtnet_info *vi = netdev_priv(dev);
5615 struct send_queue *sq = &vi->sq[i];
5616 struct virtnet_stats_ctx ctx = {0};
5617
5618 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5619
5620 virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
5621 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
5622 }
5623
virtnet_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)5624 static void virtnet_get_base_stats(struct net_device *dev,
5625 struct netdev_queue_stats_rx *rx,
5626 struct netdev_queue_stats_tx *tx)
5627 {
5628 struct virtnet_info *vi = netdev_priv(dev);
5629
5630 /* The queue stats of the virtio-net will not be reset. So here we
5631 * return 0.
5632 */
5633 rx->bytes = 0;
5634 rx->packets = 0;
5635
5636 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
5637 rx->hw_drops = 0;
5638 rx->hw_drop_overruns = 0;
5639 }
5640
5641 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
5642 rx->csum_unnecessary = 0;
5643 rx->csum_none = 0;
5644 rx->csum_bad = 0;
5645 }
5646
5647 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
5648 rx->hw_gro_packets = 0;
5649 rx->hw_gro_bytes = 0;
5650 rx->hw_gro_wire_packets = 0;
5651 rx->hw_gro_wire_bytes = 0;
5652 }
5653
5654 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
5655 rx->hw_drop_ratelimits = 0;
5656
5657 tx->bytes = 0;
5658 tx->packets = 0;
5659 tx->stop = 0;
5660 tx->wake = 0;
5661
5662 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
5663 tx->hw_drops = 0;
5664 tx->hw_drop_errors = 0;
5665 }
5666
5667 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
5668 tx->csum_none = 0;
5669 tx->needs_csum = 0;
5670 }
5671
5672 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
5673 tx->hw_gso_packets = 0;
5674 tx->hw_gso_bytes = 0;
5675 tx->hw_gso_wire_packets = 0;
5676 tx->hw_gso_wire_bytes = 0;
5677 }
5678
5679 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
5680 tx->hw_drop_ratelimits = 0;
5681
5682 netdev_stat_queue_sum(dev,
5683 dev->real_num_rx_queues, vi->max_queue_pairs, rx,
5684 dev->real_num_tx_queues, vi->max_queue_pairs, tx);
5685 }
5686
5687 static const struct netdev_stat_ops virtnet_stat_ops = {
5688 .get_queue_stats_rx = virtnet_get_queue_stats_rx,
5689 .get_queue_stats_tx = virtnet_get_queue_stats_tx,
5690 .get_base_stats = virtnet_get_base_stats,
5691 };
5692
virtnet_freeze_down(struct virtio_device * vdev)5693 static void virtnet_freeze_down(struct virtio_device *vdev)
5694 {
5695 struct virtnet_info *vi = vdev->priv;
5696
5697 /* Make sure no work handler is accessing the device */
5698 flush_work(&vi->config_work);
5699 disable_rx_mode_work(vi);
5700 flush_work(&vi->rx_mode_work);
5701
5702 netif_tx_lock_bh(vi->dev);
5703 netif_device_detach(vi->dev);
5704 netif_tx_unlock_bh(vi->dev);
5705 if (netif_running(vi->dev)) {
5706 rtnl_lock();
5707 virtnet_close(vi->dev);
5708 rtnl_unlock();
5709 }
5710 }
5711
5712 static int init_vqs(struct virtnet_info *vi);
5713
virtnet_restore_up(struct virtio_device * vdev)5714 static int virtnet_restore_up(struct virtio_device *vdev)
5715 {
5716 struct virtnet_info *vi = vdev->priv;
5717 int err;
5718
5719 err = init_vqs(vi);
5720 if (err)
5721 return err;
5722
5723 virtio_device_ready(vdev);
5724
5725 enable_delayed_refill(vi);
5726 enable_rx_mode_work(vi);
5727
5728 if (netif_running(vi->dev)) {
5729 rtnl_lock();
5730 err = virtnet_open(vi->dev);
5731 rtnl_unlock();
5732 if (err)
5733 return err;
5734 }
5735
5736 netif_tx_lock_bh(vi->dev);
5737 netif_device_attach(vi->dev);
5738 netif_tx_unlock_bh(vi->dev);
5739 return err;
5740 }
5741
virtnet_set_guest_offloads(struct virtnet_info * vi,u64 offloads)5742 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
5743 {
5744 __virtio64 *_offloads __free(kfree) = NULL;
5745 struct scatterlist sg;
5746
5747 _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
5748 if (!_offloads)
5749 return -ENOMEM;
5750
5751 *_offloads = cpu_to_virtio64(vi->vdev, offloads);
5752
5753 sg_init_one(&sg, _offloads, sizeof(*_offloads));
5754
5755 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
5756 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
5757 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
5758 return -EINVAL;
5759 }
5760
5761 return 0;
5762 }
5763
virtnet_clear_guest_offloads(struct virtnet_info * vi)5764 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
5765 {
5766 u64 offloads = 0;
5767
5768 if (!vi->guest_offloads)
5769 return 0;
5770
5771 return virtnet_set_guest_offloads(vi, offloads);
5772 }
5773
virtnet_restore_guest_offloads(struct virtnet_info * vi)5774 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
5775 {
5776 u64 offloads = vi->guest_offloads;
5777
5778 if (!vi->guest_offloads)
5779 return 0;
5780
5781 return virtnet_set_guest_offloads(vi, offloads);
5782 }
5783
virtnet_rq_bind_xsk_pool(struct virtnet_info * vi,struct receive_queue * rq,struct xsk_buff_pool * pool)5784 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
5785 struct xsk_buff_pool *pool)
5786 {
5787 int err, qindex;
5788
5789 qindex = rq - vi->rq;
5790
5791 if (pool) {
5792 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
5793 if (err < 0)
5794 return err;
5795
5796 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
5797 MEM_TYPE_XSK_BUFF_POOL, NULL);
5798 if (err < 0)
5799 goto unreg;
5800
5801 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
5802 }
5803
5804 virtnet_rx_pause(vi, rq);
5805
5806 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
5807 if (err) {
5808 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
5809
5810 pool = NULL;
5811 }
5812
5813 rq->xsk_pool = pool;
5814
5815 virtnet_rx_resume(vi, rq);
5816
5817 if (pool)
5818 return 0;
5819
5820 unreg:
5821 xdp_rxq_info_unreg(&rq->xsk_rxq_info);
5822 return err;
5823 }
5824
virtnet_sq_bind_xsk_pool(struct virtnet_info * vi,struct send_queue * sq,struct xsk_buff_pool * pool)5825 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
5826 struct send_queue *sq,
5827 struct xsk_buff_pool *pool)
5828 {
5829 int err, qindex;
5830
5831 qindex = sq - vi->sq;
5832
5833 virtnet_tx_pause(vi, sq);
5834
5835 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
5836 virtnet_sq_free_unused_buf_done);
5837 if (err) {
5838 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
5839 pool = NULL;
5840 }
5841
5842 sq->xsk_pool = pool;
5843
5844 virtnet_tx_resume(vi, sq);
5845
5846 return err;
5847 }
5848
virtnet_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)5849 static int virtnet_xsk_pool_enable(struct net_device *dev,
5850 struct xsk_buff_pool *pool,
5851 u16 qid)
5852 {
5853 struct virtnet_info *vi = netdev_priv(dev);
5854 struct receive_queue *rq;
5855 struct device *dma_dev;
5856 struct send_queue *sq;
5857 dma_addr_t hdr_dma;
5858 int err, size;
5859
5860 if (vi->hdr_len > xsk_pool_get_headroom(pool))
5861 return -EINVAL;
5862
5863 /* In big_packets mode, xdp cannot work, so there is no need to
5864 * initialize xsk of rq.
5865 */
5866 if (vi->big_packets && !vi->mergeable_rx_bufs)
5867 return -ENOENT;
5868
5869 if (qid >= vi->curr_queue_pairs)
5870 return -EINVAL;
5871
5872 sq = &vi->sq[qid];
5873 rq = &vi->rq[qid];
5874
5875 /* xsk assumes that tx and rx must have the same dma device. The af-xdp
5876 * may use one buffer to receive from the rx and reuse this buffer to
5877 * send by the tx. So the dma dev of sq and rq must be the same one.
5878 *
5879 * But vq->dma_dev allows every vq has the respective dma dev. So I
5880 * check the dma dev of vq and sq is the same dev.
5881 */
5882 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
5883 return -EINVAL;
5884
5885 dma_dev = virtqueue_dma_dev(rq->vq);
5886 if (!dma_dev)
5887 return -EINVAL;
5888
5889 size = virtqueue_get_vring_size(rq->vq);
5890
5891 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL);
5892 if (!rq->xsk_buffs)
5893 return -ENOMEM;
5894
5895 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
5896 DMA_TO_DEVICE, 0);
5897 if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
5898 err = -ENOMEM;
5899 goto err_free_buffs;
5900 }
5901
5902 err = xsk_pool_dma_map(pool, dma_dev, 0);
5903 if (err)
5904 goto err_xsk_map;
5905
5906 err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
5907 if (err)
5908 goto err_rq;
5909
5910 err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
5911 if (err)
5912 goto err_sq;
5913
5914 /* Now, we do not support tx offload(such as tx csum), so all the tx
5915 * virtnet hdr is zero. So all the tx packets can share a single hdr.
5916 */
5917 sq->xsk_hdr_dma_addr = hdr_dma;
5918
5919 return 0;
5920
5921 err_sq:
5922 virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5923 err_rq:
5924 xsk_pool_dma_unmap(pool, 0);
5925 err_xsk_map:
5926 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
5927 DMA_TO_DEVICE, 0);
5928 err_free_buffs:
5929 kvfree(rq->xsk_buffs);
5930 return err;
5931 }
5932
virtnet_xsk_pool_disable(struct net_device * dev,u16 qid)5933 static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5934 {
5935 struct virtnet_info *vi = netdev_priv(dev);
5936 struct xsk_buff_pool *pool;
5937 struct receive_queue *rq;
5938 struct send_queue *sq;
5939 int err;
5940
5941 if (qid >= vi->curr_queue_pairs)
5942 return -EINVAL;
5943
5944 sq = &vi->sq[qid];
5945 rq = &vi->rq[qid];
5946
5947 pool = rq->xsk_pool;
5948
5949 err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5950 err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
5951
5952 xsk_pool_dma_unmap(pool, 0);
5953
5954 virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
5955 vi->hdr_len, DMA_TO_DEVICE, 0);
5956 kvfree(rq->xsk_buffs);
5957
5958 return err;
5959 }
5960
virtnet_xsk_pool_setup(struct net_device * dev,struct netdev_bpf * xdp)5961 static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
5962 {
5963 if (xdp->xsk.pool)
5964 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
5965 xdp->xsk.queue_id);
5966 else
5967 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
5968 }
5969
virtnet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)5970 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5971 struct netlink_ext_ack *extack)
5972 {
5973 unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
5974 sizeof(struct skb_shared_info));
5975 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
5976 struct virtnet_info *vi = netdev_priv(dev);
5977 struct bpf_prog *old_prog;
5978 u16 xdp_qp = 0, curr_qp;
5979 int i, err;
5980
5981 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
5982 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5983 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
5984 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
5985 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
5986 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
5987 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
5988 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
5989 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
5990 return -EOPNOTSUPP;
5991 }
5992
5993 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
5994 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
5995 return -EINVAL;
5996 }
5997
5998 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
5999 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
6000 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
6001 return -EINVAL;
6002 }
6003
6004 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
6005 if (prog)
6006 xdp_qp = nr_cpu_ids;
6007
6008 /* XDP requires extra queues for XDP_TX */
6009 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
6010 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
6011 curr_qp + xdp_qp, vi->max_queue_pairs);
6012 xdp_qp = 0;
6013 }
6014
6015 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
6016 if (!prog && !old_prog)
6017 return 0;
6018
6019 if (prog)
6020 bpf_prog_add(prog, vi->max_queue_pairs - 1);
6021
6022 virtnet_rx_pause_all(vi);
6023
6024 /* Make sure NAPI is not using any XDP TX queues for RX. */
6025 if (netif_running(dev)) {
6026 for (i = 0; i < vi->max_queue_pairs; i++)
6027 virtnet_napi_tx_disable(&vi->sq[i]);
6028 }
6029
6030 if (!prog) {
6031 for (i = 0; i < vi->max_queue_pairs; i++) {
6032 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
6033 if (i == 0)
6034 virtnet_restore_guest_offloads(vi);
6035 }
6036 synchronize_net();
6037 }
6038
6039 err = virtnet_set_queues(vi, curr_qp + xdp_qp);
6040 if (err)
6041 goto err;
6042 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
6043 vi->xdp_queue_pairs = xdp_qp;
6044
6045 if (prog) {
6046 vi->xdp_enabled = true;
6047 for (i = 0; i < vi->max_queue_pairs; i++) {
6048 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
6049 if (i == 0 && !old_prog)
6050 virtnet_clear_guest_offloads(vi);
6051 }
6052 if (!old_prog)
6053 xdp_features_set_redirect_target(dev, true);
6054 } else {
6055 xdp_features_clear_redirect_target(dev);
6056 vi->xdp_enabled = false;
6057 }
6058
6059 virtnet_rx_resume_all(vi);
6060 for (i = 0; i < vi->max_queue_pairs; i++) {
6061 if (old_prog)
6062 bpf_prog_put(old_prog);
6063 if (netif_running(dev))
6064 virtnet_napi_tx_enable(&vi->sq[i]);
6065 }
6066
6067 return 0;
6068
6069 err:
6070 if (!prog) {
6071 virtnet_clear_guest_offloads(vi);
6072 for (i = 0; i < vi->max_queue_pairs; i++)
6073 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
6074 }
6075
6076 virtnet_rx_resume_all(vi);
6077 if (netif_running(dev)) {
6078 for (i = 0; i < vi->max_queue_pairs; i++)
6079 virtnet_napi_tx_enable(&vi->sq[i]);
6080 }
6081 if (prog)
6082 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
6083 return err;
6084 }
6085
virtnet_xdp(struct net_device * dev,struct netdev_bpf * xdp)6086 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
6087 {
6088 switch (xdp->command) {
6089 case XDP_SETUP_PROG:
6090 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
6091 case XDP_SETUP_XSK_POOL:
6092 return virtnet_xsk_pool_setup(dev, xdp);
6093 default:
6094 return -EINVAL;
6095 }
6096 }
6097
virtnet_get_phys_port_name(struct net_device * dev,char * buf,size_t len)6098 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
6099 size_t len)
6100 {
6101 struct virtnet_info *vi = netdev_priv(dev);
6102 int ret;
6103
6104 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
6105 return -EOPNOTSUPP;
6106
6107 ret = snprintf(buf, len, "sby");
6108 if (ret >= len)
6109 return -EOPNOTSUPP;
6110
6111 return 0;
6112 }
6113
virtnet_set_features(struct net_device * dev,netdev_features_t features)6114 static int virtnet_set_features(struct net_device *dev,
6115 netdev_features_t features)
6116 {
6117 struct virtnet_info *vi = netdev_priv(dev);
6118 u64 offloads;
6119 int err;
6120
6121 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
6122 if (vi->xdp_enabled)
6123 return -EBUSY;
6124
6125 if (features & NETIF_F_GRO_HW)
6126 offloads = vi->guest_offloads_capable;
6127 else
6128 offloads = vi->guest_offloads_capable &
6129 ~GUEST_OFFLOAD_GRO_HW_MASK;
6130
6131 err = virtnet_set_guest_offloads(vi, offloads);
6132 if (err)
6133 return err;
6134 vi->guest_offloads = offloads;
6135 }
6136
6137 if ((dev->features ^ features) & NETIF_F_RXHASH) {
6138 if (features & NETIF_F_RXHASH)
6139 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
6140 else
6141 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
6142
6143 if (!virtnet_commit_rss_command(vi))
6144 return -EINVAL;
6145 }
6146
6147 return 0;
6148 }
6149
virtnet_tx_timeout(struct net_device * dev,unsigned int txqueue)6150 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
6151 {
6152 struct virtnet_info *priv = netdev_priv(dev);
6153 struct send_queue *sq = &priv->sq[txqueue];
6154 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
6155
6156 u64_stats_update_begin(&sq->stats.syncp);
6157 u64_stats_inc(&sq->stats.tx_timeouts);
6158 u64_stats_update_end(&sq->stats.syncp);
6159
6160 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
6161 txqueue, sq->name, sq->vq->index, sq->vq->name,
6162 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
6163 }
6164
virtnet_init_irq_moder(struct virtnet_info * vi)6165 static int virtnet_init_irq_moder(struct virtnet_info *vi)
6166 {
6167 u8 profile_flags = 0, coal_flags = 0;
6168 int ret, i;
6169
6170 profile_flags |= DIM_PROFILE_RX;
6171 coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
6172 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
6173 DIM_CQ_PERIOD_MODE_START_FROM_EQE,
6174 0, virtnet_rx_dim_work, NULL);
6175
6176 if (ret)
6177 return ret;
6178
6179 for (i = 0; i < vi->max_queue_pairs; i++)
6180 net_dim_setting(vi->dev, &vi->rq[i].dim, false);
6181
6182 return 0;
6183 }
6184
virtnet_free_irq_moder(struct virtnet_info * vi)6185 static void virtnet_free_irq_moder(struct virtnet_info *vi)
6186 {
6187 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
6188 return;
6189
6190 rtnl_lock();
6191 net_dim_free_irq_moder(vi->dev);
6192 rtnl_unlock();
6193 }
6194
6195 static const struct net_device_ops virtnet_netdev = {
6196 .ndo_open = virtnet_open,
6197 .ndo_stop = virtnet_close,
6198 .ndo_start_xmit = start_xmit,
6199 .ndo_validate_addr = eth_validate_addr,
6200 .ndo_set_mac_address = virtnet_set_mac_address,
6201 .ndo_set_rx_mode = virtnet_set_rx_mode,
6202 .ndo_get_stats64 = virtnet_stats,
6203 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
6204 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
6205 .ndo_bpf = virtnet_xdp,
6206 .ndo_xdp_xmit = virtnet_xdp_xmit,
6207 .ndo_xsk_wakeup = virtnet_xsk_wakeup,
6208 .ndo_features_check = passthru_features_check,
6209 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
6210 .ndo_set_features = virtnet_set_features,
6211 .ndo_tx_timeout = virtnet_tx_timeout,
6212 };
6213
virtnet_config_changed_work(struct work_struct * work)6214 static void virtnet_config_changed_work(struct work_struct *work)
6215 {
6216 struct virtnet_info *vi =
6217 container_of(work, struct virtnet_info, config_work);
6218 u16 v;
6219
6220 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
6221 struct virtio_net_config, status, &v) < 0)
6222 return;
6223
6224 if (v & VIRTIO_NET_S_ANNOUNCE) {
6225 netdev_notify_peers(vi->dev);
6226 virtnet_ack_link_announce(vi);
6227 }
6228
6229 /* Ignore unknown (future) status bits */
6230 v &= VIRTIO_NET_S_LINK_UP;
6231
6232 if (vi->status == v)
6233 return;
6234
6235 vi->status = v;
6236
6237 if (vi->status & VIRTIO_NET_S_LINK_UP) {
6238 virtnet_update_settings(vi);
6239 netif_carrier_on(vi->dev);
6240 netif_tx_wake_all_queues(vi->dev);
6241 } else {
6242 netif_carrier_off(vi->dev);
6243 netif_tx_stop_all_queues(vi->dev);
6244 }
6245 }
6246
virtnet_config_changed(struct virtio_device * vdev)6247 static void virtnet_config_changed(struct virtio_device *vdev)
6248 {
6249 struct virtnet_info *vi = vdev->priv;
6250
6251 schedule_work(&vi->config_work);
6252 }
6253
virtnet_free_queues(struct virtnet_info * vi)6254 static void virtnet_free_queues(struct virtnet_info *vi)
6255 {
6256 int i;
6257
6258 for (i = 0; i < vi->max_queue_pairs; i++) {
6259 __netif_napi_del(&vi->rq[i].napi);
6260 __netif_napi_del(&vi->sq[i].napi);
6261 }
6262
6263 /* We called __netif_napi_del(),
6264 * we need to respect an RCU grace period before freeing vi->rq
6265 */
6266 synchronize_net();
6267
6268 kfree(vi->rq);
6269 kfree(vi->sq);
6270 kfree(vi->ctrl);
6271 }
6272
_free_receive_bufs(struct virtnet_info * vi)6273 static void _free_receive_bufs(struct virtnet_info *vi)
6274 {
6275 struct bpf_prog *old_prog;
6276 int i;
6277
6278 for (i = 0; i < vi->max_queue_pairs; i++) {
6279 while (vi->rq[i].pages)
6280 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
6281
6282 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
6283 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
6284 if (old_prog)
6285 bpf_prog_put(old_prog);
6286 }
6287 }
6288
free_receive_bufs(struct virtnet_info * vi)6289 static void free_receive_bufs(struct virtnet_info *vi)
6290 {
6291 rtnl_lock();
6292 _free_receive_bufs(vi);
6293 rtnl_unlock();
6294 }
6295
free_receive_page_frags(struct virtnet_info * vi)6296 static void free_receive_page_frags(struct virtnet_info *vi)
6297 {
6298 int i;
6299 for (i = 0; i < vi->max_queue_pairs; i++)
6300 if (vi->rq[i].alloc_frag.page) {
6301 if (vi->rq[i].last_dma)
6302 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
6303 put_page(vi->rq[i].alloc_frag.page);
6304 }
6305 }
6306
virtnet_sq_free_unused_buf(struct virtqueue * vq,void * buf)6307 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
6308 {
6309 struct virtnet_info *vi = vq->vdev->priv;
6310 struct send_queue *sq;
6311 int i = vq2txq(vq);
6312
6313 sq = &vi->sq[i];
6314
6315 switch (virtnet_xmit_ptr_unpack(&buf)) {
6316 case VIRTNET_XMIT_TYPE_SKB:
6317 case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
6318 dev_kfree_skb(buf);
6319 break;
6320
6321 case VIRTNET_XMIT_TYPE_XDP:
6322 xdp_return_frame(buf);
6323 break;
6324
6325 case VIRTNET_XMIT_TYPE_XSK:
6326 xsk_tx_completed(sq->xsk_pool, 1);
6327 break;
6328 }
6329 }
6330
virtnet_sq_free_unused_buf_done(struct virtqueue * vq)6331 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
6332 {
6333 struct virtnet_info *vi = vq->vdev->priv;
6334 int i = vq2txq(vq);
6335
6336 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
6337 }
6338
free_unused_bufs(struct virtnet_info * vi)6339 static void free_unused_bufs(struct virtnet_info *vi)
6340 {
6341 void *buf;
6342 int i;
6343
6344 for (i = 0; i < vi->max_queue_pairs; i++) {
6345 struct virtqueue *vq = vi->sq[i].vq;
6346 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6347 virtnet_sq_free_unused_buf(vq, buf);
6348 cond_resched();
6349 }
6350
6351 for (i = 0; i < vi->max_queue_pairs; i++) {
6352 struct virtqueue *vq = vi->rq[i].vq;
6353
6354 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6355 virtnet_rq_unmap_free_buf(vq, buf);
6356 cond_resched();
6357 }
6358 }
6359
virtnet_del_vqs(struct virtnet_info * vi)6360 static void virtnet_del_vqs(struct virtnet_info *vi)
6361 {
6362 struct virtio_device *vdev = vi->vdev;
6363
6364 virtnet_clean_affinity(vi);
6365
6366 vdev->config->del_vqs(vdev);
6367
6368 virtnet_free_queues(vi);
6369 }
6370
6371 /* How large should a single buffer be so a queue full of these can fit at
6372 * least one full packet?
6373 * Logic below assumes the mergeable buffer header is used.
6374 */
mergeable_min_buf_len(struct virtnet_info * vi,struct virtqueue * vq)6375 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
6376 {
6377 const unsigned int hdr_len = vi->hdr_len;
6378 unsigned int rq_size = virtqueue_get_vring_size(vq);
6379 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
6380 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
6381 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
6382
6383 return max(max(min_buf_len, hdr_len) - hdr_len,
6384 (unsigned int)GOOD_PACKET_LEN);
6385 }
6386
virtnet_find_vqs(struct virtnet_info * vi)6387 static int virtnet_find_vqs(struct virtnet_info *vi)
6388 {
6389 struct virtqueue_info *vqs_info;
6390 struct virtqueue **vqs;
6391 int ret = -ENOMEM;
6392 int total_vqs;
6393 bool *ctx;
6394 u16 i;
6395
6396 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
6397 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
6398 * possible control vq.
6399 */
6400 total_vqs = vi->max_queue_pairs * 2 +
6401 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
6402
6403 /* Allocate space for find_vqs parameters */
6404 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
6405 if (!vqs)
6406 goto err_vq;
6407 vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
6408 if (!vqs_info)
6409 goto err_vqs_info;
6410 if (!vi->big_packets || vi->mergeable_rx_bufs) {
6411 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
6412 if (!ctx)
6413 goto err_ctx;
6414 } else {
6415 ctx = NULL;
6416 }
6417
6418 /* Parameters for control virtqueue, if any */
6419 if (vi->has_cvq) {
6420 vqs_info[total_vqs - 1].name = "control";
6421 }
6422
6423 /* Allocate/initialize parameters for send/receive virtqueues */
6424 for (i = 0; i < vi->max_queue_pairs; i++) {
6425 vqs_info[rxq2vq(i)].callback = skb_recv_done;
6426 vqs_info[txq2vq(i)].callback = skb_xmit_done;
6427 sprintf(vi->rq[i].name, "input.%u", i);
6428 sprintf(vi->sq[i].name, "output.%u", i);
6429 vqs_info[rxq2vq(i)].name = vi->rq[i].name;
6430 vqs_info[txq2vq(i)].name = vi->sq[i].name;
6431 if (ctx)
6432 vqs_info[rxq2vq(i)].ctx = true;
6433 }
6434
6435 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL);
6436 if (ret)
6437 goto err_find;
6438
6439 if (vi->has_cvq) {
6440 vi->cvq = vqs[total_vqs - 1];
6441 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
6442 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6443 }
6444
6445 for (i = 0; i < vi->max_queue_pairs; i++) {
6446 vi->rq[i].vq = vqs[rxq2vq(i)];
6447 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
6448 vi->sq[i].vq = vqs[txq2vq(i)];
6449 }
6450
6451 /* run here: ret == 0. */
6452
6453
6454 err_find:
6455 kfree(ctx);
6456 err_ctx:
6457 kfree(vqs_info);
6458 err_vqs_info:
6459 kfree(vqs);
6460 err_vq:
6461 return ret;
6462 }
6463
virtnet_alloc_queues(struct virtnet_info * vi)6464 static int virtnet_alloc_queues(struct virtnet_info *vi)
6465 {
6466 int i;
6467
6468 if (vi->has_cvq) {
6469 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
6470 if (!vi->ctrl)
6471 goto err_ctrl;
6472 } else {
6473 vi->ctrl = NULL;
6474 }
6475 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
6476 if (!vi->sq)
6477 goto err_sq;
6478 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
6479 if (!vi->rq)
6480 goto err_rq;
6481
6482 INIT_DELAYED_WORK(&vi->refill, refill_work);
6483 for (i = 0; i < vi->max_queue_pairs; i++) {
6484 vi->rq[i].pages = NULL;
6485 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
6486 i);
6487 vi->rq[i].napi.weight = napi_weight;
6488 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
6489 virtnet_poll_tx,
6490 napi_tx ? napi_weight : 0);
6491
6492 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
6493 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
6494 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
6495
6496 u64_stats_init(&vi->rq[i].stats.syncp);
6497 u64_stats_init(&vi->sq[i].stats.syncp);
6498 mutex_init(&vi->rq[i].dim_lock);
6499 }
6500
6501 return 0;
6502
6503 err_rq:
6504 kfree(vi->sq);
6505 err_sq:
6506 kfree(vi->ctrl);
6507 err_ctrl:
6508 return -ENOMEM;
6509 }
6510
init_vqs(struct virtnet_info * vi)6511 static int init_vqs(struct virtnet_info *vi)
6512 {
6513 int ret;
6514
6515 /* Allocate send & receive queues */
6516 ret = virtnet_alloc_queues(vi);
6517 if (ret)
6518 goto err;
6519
6520 ret = virtnet_find_vqs(vi);
6521 if (ret)
6522 goto err_free;
6523
6524 cpus_read_lock();
6525 virtnet_set_affinity(vi);
6526 cpus_read_unlock();
6527
6528 return 0;
6529
6530 err_free:
6531 virtnet_free_queues(vi);
6532 err:
6533 return ret;
6534 }
6535
6536 #ifdef CONFIG_SYSFS
mergeable_rx_buffer_size_show(struct netdev_rx_queue * queue,char * buf)6537 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
6538 char *buf)
6539 {
6540 struct virtnet_info *vi = netdev_priv(queue->dev);
6541 unsigned int queue_index = get_netdev_rx_queue_index(queue);
6542 unsigned int headroom = virtnet_get_headroom(vi);
6543 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
6544 struct ewma_pkt_len *avg;
6545
6546 BUG_ON(queue_index >= vi->max_queue_pairs);
6547 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
6548 return sprintf(buf, "%u\n",
6549 get_mergeable_buf_len(&vi->rq[queue_index], avg,
6550 SKB_DATA_ALIGN(headroom + tailroom)));
6551 }
6552
6553 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
6554 __ATTR_RO(mergeable_rx_buffer_size);
6555
6556 static struct attribute *virtio_net_mrg_rx_attrs[] = {
6557 &mergeable_rx_buffer_size_attribute.attr,
6558 NULL
6559 };
6560
6561 static const struct attribute_group virtio_net_mrg_rx_group = {
6562 .name = "virtio_net",
6563 .attrs = virtio_net_mrg_rx_attrs
6564 };
6565 #endif
6566
virtnet_fail_on_feature(struct virtio_device * vdev,unsigned int fbit,const char * fname,const char * dname)6567 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
6568 unsigned int fbit,
6569 const char *fname, const char *dname)
6570 {
6571 if (!virtio_has_feature(vdev, fbit))
6572 return false;
6573
6574 dev_err(&vdev->dev, "device advertises feature %s but not %s",
6575 fname, dname);
6576
6577 return true;
6578 }
6579
6580 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
6581 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
6582
virtnet_validate_features(struct virtio_device * vdev)6583 static bool virtnet_validate_features(struct virtio_device *vdev)
6584 {
6585 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
6586 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
6587 "VIRTIO_NET_F_CTRL_VQ") ||
6588 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
6589 "VIRTIO_NET_F_CTRL_VQ") ||
6590 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
6591 "VIRTIO_NET_F_CTRL_VQ") ||
6592 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
6593 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
6594 "VIRTIO_NET_F_CTRL_VQ") ||
6595 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
6596 "VIRTIO_NET_F_CTRL_VQ") ||
6597 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
6598 "VIRTIO_NET_F_CTRL_VQ") ||
6599 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
6600 "VIRTIO_NET_F_CTRL_VQ") ||
6601 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
6602 "VIRTIO_NET_F_CTRL_VQ"))) {
6603 return false;
6604 }
6605
6606 return true;
6607 }
6608
6609 #define MIN_MTU ETH_MIN_MTU
6610 #define MAX_MTU ETH_MAX_MTU
6611
virtnet_validate(struct virtio_device * vdev)6612 static int virtnet_validate(struct virtio_device *vdev)
6613 {
6614 if (!vdev->config->get) {
6615 dev_err(&vdev->dev, "%s failure: config access disabled\n",
6616 __func__);
6617 return -EINVAL;
6618 }
6619
6620 if (!virtnet_validate_features(vdev))
6621 return -EINVAL;
6622
6623 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6624 int mtu = virtio_cread16(vdev,
6625 offsetof(struct virtio_net_config,
6626 mtu));
6627 if (mtu < MIN_MTU)
6628 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
6629 }
6630
6631 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
6632 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6633 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
6634 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
6635 }
6636
6637 return 0;
6638 }
6639
virtnet_check_guest_gso(const struct virtnet_info * vi)6640 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
6641 {
6642 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6643 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
6644 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
6645 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
6646 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
6647 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
6648 }
6649
virtnet_set_big_packets(struct virtnet_info * vi,const int mtu)6650 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
6651 {
6652 bool guest_gso = virtnet_check_guest_gso(vi);
6653
6654 /* If device can receive ANY guest GSO packets, regardless of mtu,
6655 * allocate packets of maximum size, otherwise limit it to only
6656 * mtu size worth only.
6657 */
6658 if (mtu > ETH_DATA_LEN || guest_gso) {
6659 vi->big_packets = true;
6660 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
6661 }
6662 }
6663
6664 #define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10
6665 static enum xdp_rss_hash_type
6666 virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
6667 [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
6668 [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
6669 [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6670 [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6671 [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
6672 [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6673 [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6674 [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6675 [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6676 [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
6677 };
6678
virtnet_xdp_rx_hash(const struct xdp_md * _ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)6679 static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6680 enum xdp_rss_hash_type *rss_type)
6681 {
6682 const struct xdp_buff *xdp = (void *)_ctx;
6683 struct virtio_net_hdr_v1_hash *hdr_hash;
6684 struct virtnet_info *vi;
6685 u16 hash_report;
6686
6687 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
6688 return -ENODATA;
6689
6690 vi = netdev_priv(xdp->rxq->dev);
6691 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
6692 hash_report = __le16_to_cpu(hdr_hash->hash_report);
6693
6694 if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
6695 hash_report = VIRTIO_NET_HASH_REPORT_NONE;
6696
6697 *rss_type = virtnet_xdp_rss_type[hash_report];
6698 *hash = __le32_to_cpu(hdr_hash->hash_value);
6699 return 0;
6700 }
6701
6702 static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
6703 .xmo_rx_hash = virtnet_xdp_rx_hash,
6704 };
6705
virtnet_probe(struct virtio_device * vdev)6706 static int virtnet_probe(struct virtio_device *vdev)
6707 {
6708 int i, err = -ENOMEM;
6709 struct net_device *dev;
6710 struct virtnet_info *vi;
6711 u16 max_queue_pairs;
6712 int mtu = 0;
6713
6714 /* Find if host supports multiqueue/rss virtio_net device */
6715 max_queue_pairs = 1;
6716 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
6717 max_queue_pairs =
6718 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
6719
6720 /* We need at least 2 queue's */
6721 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
6722 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
6723 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6724 max_queue_pairs = 1;
6725
6726 /* Allocate ourselves a network device with room for our info */
6727 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
6728 if (!dev)
6729 return -ENOMEM;
6730
6731 /* Set up network device as normal. */
6732 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
6733 IFF_TX_SKB_NO_LINEAR;
6734 dev->netdev_ops = &virtnet_netdev;
6735 dev->stat_ops = &virtnet_stat_ops;
6736 dev->features = NETIF_F_HIGHDMA;
6737
6738 dev->ethtool_ops = &virtnet_ethtool_ops;
6739 SET_NETDEV_DEV(dev, &vdev->dev);
6740
6741 /* Do we support "hardware" checksums? */
6742 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
6743 /* This opens up the world of extra features. */
6744 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6745 if (csum)
6746 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6747
6748 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
6749 dev->hw_features |= NETIF_F_TSO
6750 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
6751 }
6752 /* Individual feature bits: what can host handle? */
6753 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
6754 dev->hw_features |= NETIF_F_TSO;
6755 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
6756 dev->hw_features |= NETIF_F_TSO6;
6757 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
6758 dev->hw_features |= NETIF_F_TSO_ECN;
6759 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
6760 dev->hw_features |= NETIF_F_GSO_UDP_L4;
6761
6762 dev->features |= NETIF_F_GSO_ROBUST;
6763
6764 if (gso)
6765 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
6766 /* (!csum && gso) case will be fixed by register_netdev() */
6767 }
6768
6769 /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
6770 * need to calculate checksums for partially checksummed packets,
6771 * as they're considered valid by the upper layer.
6772 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
6773 * receives fully checksummed packets. The device may assist in
6774 * validating these packets' checksums, so the driver won't have to.
6775 */
6776 dev->features |= NETIF_F_RXCSUM;
6777
6778 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6779 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
6780 dev->features |= NETIF_F_GRO_HW;
6781 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
6782 dev->hw_features |= NETIF_F_GRO_HW;
6783
6784 dev->vlan_features = dev->features;
6785 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6786 NETDEV_XDP_ACT_XSK_ZEROCOPY;
6787
6788 /* MTU range: 68 - 65535 */
6789 dev->min_mtu = MIN_MTU;
6790 dev->max_mtu = MAX_MTU;
6791
6792 /* Configuration may specify what MAC to use. Otherwise random. */
6793 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6794 u8 addr[ETH_ALEN];
6795
6796 virtio_cread_bytes(vdev,
6797 offsetof(struct virtio_net_config, mac),
6798 addr, ETH_ALEN);
6799 eth_hw_addr_set(dev, addr);
6800 } else {
6801 eth_hw_addr_random(dev);
6802 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
6803 dev->dev_addr);
6804 }
6805
6806 /* Set up our device-specific information */
6807 vi = netdev_priv(dev);
6808 vi->dev = dev;
6809 vi->vdev = vdev;
6810 vdev->priv = vi;
6811
6812 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
6813 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
6814 spin_lock_init(&vi->refill_lock);
6815
6816 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
6817 vi->mergeable_rx_bufs = true;
6818 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
6819 }
6820
6821 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
6822 vi->has_rss_hash_report = true;
6823
6824 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
6825 vi->has_rss = true;
6826
6827 vi->rss_indir_table_size =
6828 virtio_cread16(vdev, offsetof(struct virtio_net_config,
6829 rss_max_indirection_table_length));
6830 }
6831 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
6832 if (!vi->rss_hdr) {
6833 err = -ENOMEM;
6834 goto free;
6835 }
6836
6837 if (vi->has_rss || vi->has_rss_hash_report) {
6838 vi->rss_key_size =
6839 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
6840 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
6841 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
6842 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
6843 err = -EINVAL;
6844 goto free;
6845 }
6846
6847 vi->rss_hash_types_supported =
6848 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
6849 vi->rss_hash_types_supported &=
6850 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
6851 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
6852 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
6853
6854 dev->hw_features |= NETIF_F_RXHASH;
6855 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
6856 }
6857
6858 if (vi->has_rss_hash_report)
6859 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
6860 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
6861 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6862 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
6863 else
6864 vi->hdr_len = sizeof(struct virtio_net_hdr);
6865
6866 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
6867 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6868 vi->any_header_sg = true;
6869
6870 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6871 vi->has_cvq = true;
6872
6873 mutex_init(&vi->cvq_lock);
6874
6875 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6876 mtu = virtio_cread16(vdev,
6877 offsetof(struct virtio_net_config,
6878 mtu));
6879 if (mtu < dev->min_mtu) {
6880 /* Should never trigger: MTU was previously validated
6881 * in virtnet_validate.
6882 */
6883 dev_err(&vdev->dev,
6884 "device MTU appears to have changed it is now %d < %d",
6885 mtu, dev->min_mtu);
6886 err = -EINVAL;
6887 goto free;
6888 }
6889
6890 dev->mtu = mtu;
6891 dev->max_mtu = mtu;
6892 }
6893
6894 virtnet_set_big_packets(vi, mtu);
6895
6896 if (vi->any_header_sg)
6897 dev->needed_headroom = vi->hdr_len;
6898
6899 /* Enable multiqueue by default */
6900 if (num_online_cpus() >= max_queue_pairs)
6901 vi->curr_queue_pairs = max_queue_pairs;
6902 else
6903 vi->curr_queue_pairs = num_online_cpus();
6904 vi->max_queue_pairs = max_queue_pairs;
6905
6906 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
6907 err = init_vqs(vi);
6908 if (err)
6909 goto free;
6910
6911 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
6912 vi->intr_coal_rx.max_usecs = 0;
6913 vi->intr_coal_tx.max_usecs = 0;
6914 vi->intr_coal_rx.max_packets = 0;
6915
6916 /* Keep the default values of the coalescing parameters
6917 * aligned with the default napi_tx state.
6918 */
6919 if (vi->sq[0].napi.weight)
6920 vi->intr_coal_tx.max_packets = 1;
6921 else
6922 vi->intr_coal_tx.max_packets = 0;
6923 }
6924
6925 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
6926 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
6927 for (i = 0; i < vi->max_queue_pairs; i++)
6928 if (vi->sq[i].napi.weight)
6929 vi->sq[i].intr_coal.max_packets = 1;
6930
6931 err = virtnet_init_irq_moder(vi);
6932 if (err)
6933 goto free;
6934 }
6935
6936 #ifdef CONFIG_SYSFS
6937 if (vi->mergeable_rx_bufs)
6938 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
6939 #endif
6940 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
6941 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
6942
6943 virtnet_init_settings(dev);
6944
6945 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
6946 vi->failover = net_failover_create(vi->dev);
6947 if (IS_ERR(vi->failover)) {
6948 err = PTR_ERR(vi->failover);
6949 goto free_vqs;
6950 }
6951 }
6952
6953 if (vi->has_rss || vi->has_rss_hash_report)
6954 virtnet_init_default_rss(vi);
6955
6956 enable_rx_mode_work(vi);
6957
6958 /* serialize netdev register + virtio_device_ready() with ndo_open() */
6959 rtnl_lock();
6960
6961 err = register_netdevice(dev);
6962 if (err) {
6963 pr_debug("virtio_net: registering device failed\n");
6964 rtnl_unlock();
6965 goto free_failover;
6966 }
6967
6968 /* Disable config change notification until ndo_open. */
6969 virtio_config_driver_disable(vi->vdev);
6970
6971 virtio_device_ready(vdev);
6972
6973 if (vi->has_rss || vi->has_rss_hash_report) {
6974 if (!virtnet_commit_rss_command(vi)) {
6975 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
6976 dev->hw_features &= ~NETIF_F_RXHASH;
6977 vi->has_rss_hash_report = false;
6978 vi->has_rss = false;
6979 }
6980 }
6981
6982 virtnet_set_queues(vi, vi->curr_queue_pairs);
6983
6984 /* a random MAC address has been assigned, notify the device.
6985 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
6986 * because many devices work fine without getting MAC explicitly
6987 */
6988 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
6989 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
6990 struct scatterlist sg;
6991
6992 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
6993 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
6994 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
6995 pr_debug("virtio_net: setting MAC address failed\n");
6996 rtnl_unlock();
6997 err = -EINVAL;
6998 goto free_unregister_netdev;
6999 }
7000 }
7001
7002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
7003 struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL;
7004 struct scatterlist sg;
7005 __le64 v;
7006
7007 stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
7008 if (!stats_cap) {
7009 rtnl_unlock();
7010 err = -ENOMEM;
7011 goto free_unregister_netdev;
7012 }
7013
7014 sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
7015
7016 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
7017 VIRTIO_NET_CTRL_STATS_QUERY,
7018 NULL, &sg)) {
7019 pr_debug("virtio_net: fail to get stats capability\n");
7020 rtnl_unlock();
7021 err = -EINVAL;
7022 goto free_unregister_netdev;
7023 }
7024
7025 v = stats_cap->supported_stats_types[0];
7026 vi->device_stats_cap = le64_to_cpu(v);
7027 }
7028
7029 /* Assume link up if device can't report link status,
7030 otherwise get link status from config. */
7031 netif_carrier_off(dev);
7032 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
7033 virtnet_config_changed_work(&vi->config_work);
7034 } else {
7035 vi->status = VIRTIO_NET_S_LINK_UP;
7036 virtnet_update_settings(vi);
7037 netif_carrier_on(dev);
7038 }
7039
7040 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
7041 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
7042 set_bit(guest_offloads[i], &vi->guest_offloads);
7043 vi->guest_offloads_capable = vi->guest_offloads;
7044
7045 rtnl_unlock();
7046
7047 err = virtnet_cpu_notif_add(vi);
7048 if (err) {
7049 pr_debug("virtio_net: registering cpu notifier failed\n");
7050 goto free_unregister_netdev;
7051 }
7052
7053 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
7054 dev->name, max_queue_pairs);
7055
7056 return 0;
7057
7058 free_unregister_netdev:
7059 unregister_netdev(dev);
7060 free_failover:
7061 net_failover_destroy(vi->failover);
7062 free_vqs:
7063 virtio_reset_device(vdev);
7064 cancel_delayed_work_sync(&vi->refill);
7065 free_receive_page_frags(vi);
7066 virtnet_del_vqs(vi);
7067 free:
7068 free_netdev(dev);
7069 return err;
7070 }
7071
remove_vq_common(struct virtnet_info * vi)7072 static void remove_vq_common(struct virtnet_info *vi)
7073 {
7074 int i;
7075
7076 virtio_reset_device(vi->vdev);
7077
7078 /* Free unused buffers in both send and recv, if any. */
7079 free_unused_bufs(vi);
7080
7081 /*
7082 * Rule of thumb is netdev_tx_reset_queue() should follow any
7083 * skb freeing not followed by netdev_tx_completed_queue()
7084 */
7085 for (i = 0; i < vi->max_queue_pairs; i++)
7086 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
7087
7088 free_receive_bufs(vi);
7089
7090 free_receive_page_frags(vi);
7091
7092 virtnet_del_vqs(vi);
7093 }
7094
virtnet_remove(struct virtio_device * vdev)7095 static void virtnet_remove(struct virtio_device *vdev)
7096 {
7097 struct virtnet_info *vi = vdev->priv;
7098
7099 virtnet_cpu_notif_remove(vi);
7100
7101 /* Make sure no work handler is accessing the device. */
7102 flush_work(&vi->config_work);
7103 disable_rx_mode_work(vi);
7104 flush_work(&vi->rx_mode_work);
7105
7106 virtnet_free_irq_moder(vi);
7107
7108 unregister_netdev(vi->dev);
7109
7110 net_failover_destroy(vi->failover);
7111
7112 remove_vq_common(vi);
7113
7114 free_netdev(vi->dev);
7115 }
7116
virtnet_freeze(struct virtio_device * vdev)7117 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
7118 {
7119 struct virtnet_info *vi = vdev->priv;
7120
7121 virtnet_cpu_notif_remove(vi);
7122 virtnet_freeze_down(vdev);
7123 remove_vq_common(vi);
7124
7125 return 0;
7126 }
7127
virtnet_restore(struct virtio_device * vdev)7128 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
7129 {
7130 struct virtnet_info *vi = vdev->priv;
7131 int err;
7132
7133 err = virtnet_restore_up(vdev);
7134 if (err)
7135 return err;
7136 virtnet_set_queues(vi, vi->curr_queue_pairs);
7137
7138 err = virtnet_cpu_notif_add(vi);
7139 if (err) {
7140 virtnet_freeze_down(vdev);
7141 remove_vq_common(vi);
7142 return err;
7143 }
7144
7145 return 0;
7146 }
7147
7148 static struct virtio_device_id id_table[] = {
7149 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
7150 { 0 },
7151 };
7152
7153 #define VIRTNET_FEATURES \
7154 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
7155 VIRTIO_NET_F_MAC, \
7156 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
7157 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
7158 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
7159 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
7160 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
7161 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
7162 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
7163 VIRTIO_NET_F_CTRL_MAC_ADDR, \
7164 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
7165 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
7166 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
7167 VIRTIO_NET_F_VQ_NOTF_COAL, \
7168 VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
7169
7170 static unsigned int features[] = {
7171 VIRTNET_FEATURES,
7172 };
7173
7174 static unsigned int features_legacy[] = {
7175 VIRTNET_FEATURES,
7176 VIRTIO_NET_F_GSO,
7177 VIRTIO_F_ANY_LAYOUT,
7178 };
7179
7180 static struct virtio_driver virtio_net_driver = {
7181 .feature_table = features,
7182 .feature_table_size = ARRAY_SIZE(features),
7183 .feature_table_legacy = features_legacy,
7184 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
7185 .driver.name = KBUILD_MODNAME,
7186 .id_table = id_table,
7187 .validate = virtnet_validate,
7188 .probe = virtnet_probe,
7189 .remove = virtnet_remove,
7190 .config_changed = virtnet_config_changed,
7191 #ifdef CONFIG_PM_SLEEP
7192 .freeze = virtnet_freeze,
7193 .restore = virtnet_restore,
7194 #endif
7195 };
7196
virtio_net_driver_init(void)7197 static __init int virtio_net_driver_init(void)
7198 {
7199 int ret;
7200
7201 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
7202 virtnet_cpu_online,
7203 virtnet_cpu_down_prep);
7204 if (ret < 0)
7205 goto out;
7206 virtionet_online = ret;
7207 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
7208 NULL, virtnet_cpu_dead);
7209 if (ret)
7210 goto err_dead;
7211 ret = register_virtio_driver(&virtio_net_driver);
7212 if (ret)
7213 goto err_virtio;
7214 return 0;
7215 err_virtio:
7216 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7217 err_dead:
7218 cpuhp_remove_multi_state(virtionet_online);
7219 out:
7220 return ret;
7221 }
7222 module_init(virtio_net_driver_init);
7223
virtio_net_driver_exit(void)7224 static __exit void virtio_net_driver_exit(void)
7225 {
7226 unregister_virtio_driver(&virtio_net_driver);
7227 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7228 cpuhp_remove_multi_state(virtionet_online);
7229 }
7230 module_exit(virtio_net_driver_exit);
7231
7232 MODULE_DEVICE_TABLE(virtio, id_table);
7233 MODULE_DESCRIPTION("Virtio network driver");
7234 MODULE_LICENSE("GPL");
7235