1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <linux/dim.h>
23 #include <net/route.h>
24 #include <net/xdp.h>
25 #include <net/net_failover.h>
26 #include <net/netdev_rx_queue.h>
27 #include <net/netdev_queues.h>
28 #include <net/xdp_sock_drv.h>
29
30 static int napi_weight = NAPI_POLL_WEIGHT;
31 module_param(napi_weight, int, 0444);
32
33 static bool csum = true, gso = true, napi_tx = true;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36 module_param(napi_tx, bool, 0644);
37
38 /* FIXME: MTU in config. */
39 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define GOOD_COPY_LEN 128
41
42 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
43
44 /* Separating two types of XDP xmit */
45 #define VIRTIO_XDP_TX BIT(0)
46 #define VIRTIO_XDP_REDIR BIT(1)
47
48 /* RX packet size EWMA. The average packet size is used to determine the packet
49 * buffer size when refilling RX rings. As the entire RX ring may be refilled
50 * at once, the weight is chosen so that the EWMA will be insensitive to short-
51 * term, transient changes in packet size.
52 */
53 DECLARE_EWMA(pkt_len, 0, 64)
54
55 #define VIRTNET_DRIVER_VERSION "1.0.0"
56
57 static const unsigned long guest_offloads[] = {
58 VIRTIO_NET_F_GUEST_TSO4,
59 VIRTIO_NET_F_GUEST_TSO6,
60 VIRTIO_NET_F_GUEST_ECN,
61 VIRTIO_NET_F_GUEST_UFO,
62 VIRTIO_NET_F_GUEST_CSUM,
63 VIRTIO_NET_F_GUEST_USO4,
64 VIRTIO_NET_F_GUEST_USO6,
65 VIRTIO_NET_F_GUEST_HDRLEN
66 };
67
68 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
69 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
70 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
71 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
72 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
73 (1ULL << VIRTIO_NET_F_GUEST_USO6))
74
75 struct virtnet_stat_desc {
76 char desc[ETH_GSTRING_LEN];
77 size_t offset;
78 size_t qstat_offset;
79 };
80
81 struct virtnet_sq_free_stats {
82 u64 packets;
83 u64 bytes;
84 u64 napi_packets;
85 u64 napi_bytes;
86 u64 xsk;
87 };
88
89 struct virtnet_sq_stats {
90 struct u64_stats_sync syncp;
91 u64_stats_t packets;
92 u64_stats_t bytes;
93 u64_stats_t xdp_tx;
94 u64_stats_t xdp_tx_drops;
95 u64_stats_t kicks;
96 u64_stats_t tx_timeouts;
97 u64_stats_t stop;
98 u64_stats_t wake;
99 };
100
101 struct virtnet_rq_stats {
102 struct u64_stats_sync syncp;
103 u64_stats_t packets;
104 u64_stats_t bytes;
105 u64_stats_t drops;
106 u64_stats_t xdp_packets;
107 u64_stats_t xdp_tx;
108 u64_stats_t xdp_redirects;
109 u64_stats_t xdp_drops;
110 u64_stats_t kicks;
111 };
112
113 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
114 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
115
116 #define VIRTNET_SQ_STAT_QSTAT(name, m) \
117 { \
118 name, \
119 offsetof(struct virtnet_sq_stats, m), \
120 offsetof(struct netdev_queue_stats_tx, m), \
121 }
122
123 #define VIRTNET_RQ_STAT_QSTAT(name, m) \
124 { \
125 name, \
126 offsetof(struct virtnet_rq_stats, m), \
127 offsetof(struct netdev_queue_stats_rx, m), \
128 }
129
130 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
131 VIRTNET_SQ_STAT("xdp_tx", xdp_tx),
132 VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
133 VIRTNET_SQ_STAT("kicks", kicks),
134 VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts),
135 };
136
137 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
138 VIRTNET_RQ_STAT("drops", drops),
139 VIRTNET_RQ_STAT("xdp_packets", xdp_packets),
140 VIRTNET_RQ_STAT("xdp_tx", xdp_tx),
141 VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
142 VIRTNET_RQ_STAT("xdp_drops", xdp_drops),
143 VIRTNET_RQ_STAT("kicks", kicks),
144 };
145
146 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
147 VIRTNET_SQ_STAT_QSTAT("packets", packets),
148 VIRTNET_SQ_STAT_QSTAT("bytes", bytes),
149 VIRTNET_SQ_STAT_QSTAT("stop", stop),
150 VIRTNET_SQ_STAT_QSTAT("wake", wake),
151 };
152
153 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
154 VIRTNET_RQ_STAT_QSTAT("packets", packets),
155 VIRTNET_RQ_STAT_QSTAT("bytes", bytes),
156 };
157
158 #define VIRTNET_STATS_DESC_CQ(name) \
159 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
160
161 #define VIRTNET_STATS_DESC_RX(class, name) \
162 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
163
164 #define VIRTNET_STATS_DESC_TX(class, name) \
165 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
166
167
168 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
169 VIRTNET_STATS_DESC_CQ(command_num),
170 VIRTNET_STATS_DESC_CQ(ok_num),
171 };
172
173 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
174 VIRTNET_STATS_DESC_RX(basic, packets),
175 VIRTNET_STATS_DESC_RX(basic, bytes),
176
177 VIRTNET_STATS_DESC_RX(basic, notifications),
178 VIRTNET_STATS_DESC_RX(basic, interrupts),
179 };
180
181 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
182 VIRTNET_STATS_DESC_TX(basic, packets),
183 VIRTNET_STATS_DESC_TX(basic, bytes),
184
185 VIRTNET_STATS_DESC_TX(basic, notifications),
186 VIRTNET_STATS_DESC_TX(basic, interrupts),
187 };
188
189 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
190 VIRTNET_STATS_DESC_RX(csum, needs_csum),
191 };
192
193 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
194 VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
195 VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
196 };
197
198 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
199 VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
200 };
201
202 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
203 VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
204 };
205
206 #define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \
207 { \
208 #name, \
209 offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \
210 offsetof(struct netdev_queue_stats_rx, qstat_field), \
211 }
212
213 #define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \
214 { \
215 #name, \
216 offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \
217 offsetof(struct netdev_queue_stats_tx, qstat_field), \
218 }
219
220 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
221 VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops),
222 VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
223 };
224
225 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
226 VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops),
227 VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
228 };
229
230 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
231 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
232 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
233 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
234 };
235
236 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
237 VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
238 VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
239 };
240
241 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
242 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
243 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
244 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
245 VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
246 };
247
248 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
249 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
250 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
251 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
252 VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
253 };
254
255 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
256 VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
257 };
258
259 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
260 VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
261 };
262
263 #define VIRTNET_Q_TYPE_RX 0
264 #define VIRTNET_Q_TYPE_TX 1
265 #define VIRTNET_Q_TYPE_CQ 2
266
267 struct virtnet_interrupt_coalesce {
268 u32 max_packets;
269 u32 max_usecs;
270 };
271
272 /* The dma information of pages allocated at a time. */
273 struct virtnet_rq_dma {
274 dma_addr_t addr;
275 u32 ref;
276 u16 len;
277 u16 need_sync;
278 };
279
280 /* Internal representation of a send virtqueue */
281 struct send_queue {
282 /* Virtqueue associated with this send _queue */
283 struct virtqueue *vq;
284
285 /* TX: fragments + linear part + virtio header */
286 struct scatterlist sg[MAX_SKB_FRAGS + 2];
287
288 /* Name of the send queue: output.$index */
289 char name[16];
290
291 struct virtnet_sq_stats stats;
292
293 struct virtnet_interrupt_coalesce intr_coal;
294
295 struct napi_struct napi;
296
297 /* Record whether sq is in reset state. */
298 bool reset;
299
300 struct xsk_buff_pool *xsk_pool;
301
302 dma_addr_t xsk_hdr_dma_addr;
303 };
304
305 /* Internal representation of a receive virtqueue */
306 struct receive_queue {
307 /* Virtqueue associated with this receive_queue */
308 struct virtqueue *vq;
309
310 struct napi_struct napi;
311
312 struct bpf_prog __rcu *xdp_prog;
313
314 struct virtnet_rq_stats stats;
315
316 /* The number of rx notifications */
317 u16 calls;
318
319 /* Is dynamic interrupt moderation enabled? */
320 bool dim_enabled;
321
322 /* Used to protect dim_enabled and inter_coal */
323 struct mutex dim_lock;
324
325 /* Dynamic Interrupt Moderation */
326 struct dim dim;
327
328 u32 packets_in_napi;
329
330 struct virtnet_interrupt_coalesce intr_coal;
331
332 /* Chain pages by the private ptr. */
333 struct page *pages;
334
335 /* Average packet length for mergeable receive buffers. */
336 struct ewma_pkt_len mrg_avg_pkt_len;
337
338 /* Page frag for packet buffer allocation. */
339 struct page_frag alloc_frag;
340
341 /* RX: fragments + linear part + virtio header */
342 struct scatterlist sg[MAX_SKB_FRAGS + 2];
343
344 /* Min single buffer size for mergeable buffers case. */
345 unsigned int min_buf_len;
346
347 /* Name of this receive queue: input.$index */
348 char name[16];
349
350 struct xdp_rxq_info xdp_rxq;
351
352 /* Record the last dma info to free after new pages is allocated. */
353 struct virtnet_rq_dma *last_dma;
354
355 struct xsk_buff_pool *xsk_pool;
356
357 /* xdp rxq used by xsk */
358 struct xdp_rxq_info xsk_rxq_info;
359
360 struct xdp_buff **xsk_buffs;
361 };
362
363 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
364
365 /* Control VQ buffers: protected by the rtnl lock */
366 struct control_buf {
367 struct virtio_net_ctrl_hdr hdr;
368 virtio_net_ctrl_ack status;
369 };
370
371 struct virtnet_info {
372 struct virtio_device *vdev;
373 struct virtqueue *cvq;
374 struct net_device *dev;
375 struct send_queue *sq;
376 struct receive_queue *rq;
377 unsigned int status;
378
379 /* Max # of queue pairs supported by the device */
380 u16 max_queue_pairs;
381
382 /* # of queue pairs currently used by the driver */
383 u16 curr_queue_pairs;
384
385 /* # of XDP queue pairs currently used by the driver */
386 u16 xdp_queue_pairs;
387
388 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
389 bool xdp_enabled;
390
391 /* I like... big packets and I cannot lie! */
392 bool big_packets;
393
394 /* number of sg entries allocated for big packets */
395 unsigned int big_packets_num_skbfrags;
396
397 /* Host will merge rx buffers for big packets (shake it! shake it!) */
398 bool mergeable_rx_bufs;
399
400 /* Host supports rss and/or hash report */
401 bool has_rss;
402 bool has_rss_hash_report;
403 u8 rss_key_size;
404 u16 rss_indir_table_size;
405 u32 rss_hash_types_supported;
406 u32 rss_hash_types_saved;
407 struct virtio_net_rss_config_hdr *rss_hdr;
408 struct virtio_net_rss_config_trailer rss_trailer;
409 u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
410
411 /* Has control virtqueue */
412 bool has_cvq;
413
414 /* Lock to protect the control VQ */
415 struct mutex cvq_lock;
416
417 /* Host can handle any s/g split between our header and packet data */
418 bool any_header_sg;
419
420 /* Packet virtio header size */
421 u8 hdr_len;
422
423 /* Work struct for delayed refilling if we run low on memory. */
424 struct delayed_work refill;
425
426 /* Is delayed refill enabled? */
427 bool refill_enabled;
428
429 /* The lock to synchronize the access to refill_enabled */
430 spinlock_t refill_lock;
431
432 /* Work struct for config space updates */
433 struct work_struct config_work;
434
435 /* Work struct for setting rx mode */
436 struct work_struct rx_mode_work;
437
438 /* OK to queue work setting RX mode? */
439 bool rx_mode_work_enabled;
440
441 /* Does the affinity hint is set for virtqueues? */
442 bool affinity_hint_set;
443
444 /* CPU hotplug instances for online & dead */
445 struct hlist_node node;
446 struct hlist_node node_dead;
447
448 struct control_buf *ctrl;
449
450 /* Ethtool settings */
451 u8 duplex;
452 u32 speed;
453
454 /* Is rx dynamic interrupt moderation enabled? */
455 bool rx_dim_enabled;
456
457 /* Interrupt coalescing settings */
458 struct virtnet_interrupt_coalesce intr_coal_tx;
459 struct virtnet_interrupt_coalesce intr_coal_rx;
460
461 unsigned long guest_offloads;
462 unsigned long guest_offloads_capable;
463
464 /* failover when STANDBY feature enabled */
465 struct failover *failover;
466
467 u64 device_stats_cap;
468 };
469
470 struct padded_vnet_hdr {
471 struct virtio_net_hdr_v1_hash hdr;
472 /*
473 * hdr is in a separate sg buffer, and data sg buffer shares same page
474 * with this header sg. This padding makes next sg 16 byte aligned
475 * after the header.
476 */
477 char padding[12];
478 };
479
480 struct virtio_net_common_hdr {
481 union {
482 struct virtio_net_hdr hdr;
483 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
484 struct virtio_net_hdr_v1_hash hash_v1_hdr;
485 };
486 };
487
488 static struct virtio_net_common_hdr xsk_hdr;
489
490 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
491 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
492 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
493 struct net_device *dev,
494 unsigned int *xdp_xmit,
495 struct virtnet_rq_stats *stats);
496 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
497 struct sk_buff *skb, u8 flags);
498 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
499 struct sk_buff *curr_skb,
500 struct page *page, void *buf,
501 int len, int truesize);
502 static void virtnet_xsk_completed(struct send_queue *sq, int num);
503
504 enum virtnet_xmit_type {
505 VIRTNET_XMIT_TYPE_SKB,
506 VIRTNET_XMIT_TYPE_SKB_ORPHAN,
507 VIRTNET_XMIT_TYPE_XDP,
508 VIRTNET_XMIT_TYPE_XSK,
509 };
510
virtnet_rss_hdr_size(const struct virtnet_info * vi)511 static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
512 {
513 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
514
515 return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
516 }
517
virtnet_rss_trailer_size(const struct virtnet_info * vi)518 static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
519 {
520 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
521 }
522
523 /* We use the last two bits of the pointer to distinguish the xmit type. */
524 #define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
525
526 #define VIRTIO_XSK_FLAG_OFFSET 2
527
virtnet_xmit_ptr_unpack(void ** ptr)528 static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
529 {
530 unsigned long p = (unsigned long)*ptr;
531
532 *ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
533
534 return p & VIRTNET_XMIT_TYPE_MASK;
535 }
536
virtnet_xmit_ptr_pack(void * ptr,enum virtnet_xmit_type type)537 static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
538 {
539 return (void *)((unsigned long)ptr | type);
540 }
541
virtnet_add_outbuf(struct send_queue * sq,int num,void * data,enum virtnet_xmit_type type)542 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
543 enum virtnet_xmit_type type)
544 {
545 return virtqueue_add_outbuf(sq->vq, sq->sg, num,
546 virtnet_xmit_ptr_pack(data, type),
547 GFP_ATOMIC);
548 }
549
virtnet_ptr_to_xsk_buff_len(void * ptr)550 static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
551 {
552 return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
553 }
554
sg_fill_dma(struct scatterlist * sg,dma_addr_t addr,u32 len)555 static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
556 {
557 sg_dma_address(sg) = addr;
558 sg_dma_len(sg) = len;
559 }
560
__free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi,struct virtnet_sq_free_stats * stats)561 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
562 bool in_napi, struct virtnet_sq_free_stats *stats)
563 {
564 struct xdp_frame *frame;
565 struct sk_buff *skb;
566 unsigned int len;
567 void *ptr;
568
569 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
570 switch (virtnet_xmit_ptr_unpack(&ptr)) {
571 case VIRTNET_XMIT_TYPE_SKB:
572 skb = ptr;
573
574 pr_debug("Sent skb %p\n", skb);
575 stats->napi_packets++;
576 stats->napi_bytes += skb->len;
577 napi_consume_skb(skb, in_napi);
578 break;
579
580 case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
581 skb = ptr;
582
583 stats->packets++;
584 stats->bytes += skb->len;
585 napi_consume_skb(skb, in_napi);
586 break;
587
588 case VIRTNET_XMIT_TYPE_XDP:
589 frame = ptr;
590
591 stats->packets++;
592 stats->bytes += xdp_get_frame_len(frame);
593 xdp_return_frame(frame);
594 break;
595
596 case VIRTNET_XMIT_TYPE_XSK:
597 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
598 stats->xsk++;
599 break;
600 }
601 }
602 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
603 }
604
virtnet_free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi,struct virtnet_sq_free_stats * stats)605 static void virtnet_free_old_xmit(struct send_queue *sq,
606 struct netdev_queue *txq,
607 bool in_napi,
608 struct virtnet_sq_free_stats *stats)
609 {
610 __free_old_xmit(sq, txq, in_napi, stats);
611
612 if (stats->xsk)
613 virtnet_xsk_completed(sq, stats->xsk);
614 }
615
616 /* Converting between virtqueue no. and kernel tx/rx queue no.
617 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
618 */
vq2txq(struct virtqueue * vq)619 static int vq2txq(struct virtqueue *vq)
620 {
621 return (vq->index - 1) / 2;
622 }
623
txq2vq(int txq)624 static int txq2vq(int txq)
625 {
626 return txq * 2 + 1;
627 }
628
vq2rxq(struct virtqueue * vq)629 static int vq2rxq(struct virtqueue *vq)
630 {
631 return vq->index / 2;
632 }
633
rxq2vq(int rxq)634 static int rxq2vq(int rxq)
635 {
636 return rxq * 2;
637 }
638
vq_type(struct virtnet_info * vi,int qid)639 static int vq_type(struct virtnet_info *vi, int qid)
640 {
641 if (qid == vi->max_queue_pairs * 2)
642 return VIRTNET_Q_TYPE_CQ;
643
644 if (qid % 2)
645 return VIRTNET_Q_TYPE_TX;
646
647 return VIRTNET_Q_TYPE_RX;
648 }
649
650 static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff * skb)651 skb_vnet_common_hdr(struct sk_buff *skb)
652 {
653 return (struct virtio_net_common_hdr *)skb->cb;
654 }
655
656 /*
657 * private is used to chain pages for big packets, put the whole
658 * most recent used list in the beginning for reuse
659 */
give_pages(struct receive_queue * rq,struct page * page)660 static void give_pages(struct receive_queue *rq, struct page *page)
661 {
662 struct page *end;
663
664 /* Find end of list, sew whole thing into vi->rq.pages. */
665 for (end = page; end->private; end = (struct page *)end->private);
666 end->private = (unsigned long)rq->pages;
667 rq->pages = page;
668 }
669
get_a_page(struct receive_queue * rq,gfp_t gfp_mask)670 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
671 {
672 struct page *p = rq->pages;
673
674 if (p) {
675 rq->pages = (struct page *)p->private;
676 /* clear private here, it is used to chain pages */
677 p->private = 0;
678 } else
679 p = alloc_page(gfp_mask);
680 return p;
681 }
682
virtnet_rq_free_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf)683 static void virtnet_rq_free_buf(struct virtnet_info *vi,
684 struct receive_queue *rq, void *buf)
685 {
686 if (vi->mergeable_rx_bufs)
687 put_page(virt_to_head_page(buf));
688 else if (vi->big_packets)
689 give_pages(rq, buf);
690 else
691 put_page(virt_to_head_page(buf));
692 }
693
enable_delayed_refill(struct virtnet_info * vi)694 static void enable_delayed_refill(struct virtnet_info *vi)
695 {
696 spin_lock_bh(&vi->refill_lock);
697 vi->refill_enabled = true;
698 spin_unlock_bh(&vi->refill_lock);
699 }
700
disable_delayed_refill(struct virtnet_info * vi)701 static void disable_delayed_refill(struct virtnet_info *vi)
702 {
703 spin_lock_bh(&vi->refill_lock);
704 vi->refill_enabled = false;
705 spin_unlock_bh(&vi->refill_lock);
706 }
707
enable_rx_mode_work(struct virtnet_info * vi)708 static void enable_rx_mode_work(struct virtnet_info *vi)
709 {
710 rtnl_lock();
711 vi->rx_mode_work_enabled = true;
712 rtnl_unlock();
713 }
714
disable_rx_mode_work(struct virtnet_info * vi)715 static void disable_rx_mode_work(struct virtnet_info *vi)
716 {
717 rtnl_lock();
718 vi->rx_mode_work_enabled = false;
719 rtnl_unlock();
720 }
721
virtqueue_napi_schedule(struct napi_struct * napi,struct virtqueue * vq)722 static void virtqueue_napi_schedule(struct napi_struct *napi,
723 struct virtqueue *vq)
724 {
725 if (napi_schedule_prep(napi)) {
726 virtqueue_disable_cb(vq);
727 __napi_schedule(napi);
728 }
729 }
730
virtqueue_napi_complete(struct napi_struct * napi,struct virtqueue * vq,int processed)731 static bool virtqueue_napi_complete(struct napi_struct *napi,
732 struct virtqueue *vq, int processed)
733 {
734 int opaque;
735
736 opaque = virtqueue_enable_cb_prepare(vq);
737 if (napi_complete_done(napi, processed)) {
738 if (unlikely(virtqueue_poll(vq, opaque)))
739 virtqueue_napi_schedule(napi, vq);
740 else
741 return true;
742 } else {
743 virtqueue_disable_cb(vq);
744 }
745
746 return false;
747 }
748
skb_xmit_done(struct virtqueue * vq)749 static void skb_xmit_done(struct virtqueue *vq)
750 {
751 struct virtnet_info *vi = vq->vdev->priv;
752 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
753
754 /* Suppress further interrupts. */
755 virtqueue_disable_cb(vq);
756
757 if (napi->weight)
758 virtqueue_napi_schedule(napi, vq);
759 else
760 /* We were probably waiting for more output buffers. */
761 netif_wake_subqueue(vi->dev, vq2txq(vq));
762 }
763
764 #define MRG_CTX_HEADER_SHIFT 22
mergeable_len_to_ctx(unsigned int truesize,unsigned int headroom)765 static void *mergeable_len_to_ctx(unsigned int truesize,
766 unsigned int headroom)
767 {
768 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
769 }
770
mergeable_ctx_to_headroom(void * mrg_ctx)771 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
772 {
773 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
774 }
775
mergeable_ctx_to_truesize(void * mrg_ctx)776 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
777 {
778 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
779 }
780
virtnet_build_skb(void * buf,unsigned int buflen,unsigned int headroom,unsigned int len)781 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
782 unsigned int headroom,
783 unsigned int len)
784 {
785 struct sk_buff *skb;
786
787 skb = build_skb(buf, buflen);
788 if (unlikely(!skb))
789 return NULL;
790
791 skb_reserve(skb, headroom);
792 skb_put(skb, len);
793
794 return skb;
795 }
796
797 /* Called from bottom half context */
page_to_skb(struct virtnet_info * vi,struct receive_queue * rq,struct page * page,unsigned int offset,unsigned int len,unsigned int truesize,unsigned int headroom)798 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
799 struct receive_queue *rq,
800 struct page *page, unsigned int offset,
801 unsigned int len, unsigned int truesize,
802 unsigned int headroom)
803 {
804 struct sk_buff *skb;
805 struct virtio_net_common_hdr *hdr;
806 unsigned int copy, hdr_len, hdr_padded_len;
807 struct page *page_to_free = NULL;
808 int tailroom, shinfo_size;
809 char *p, *hdr_p, *buf;
810
811 p = page_address(page) + offset;
812 hdr_p = p;
813
814 hdr_len = vi->hdr_len;
815 if (vi->mergeable_rx_bufs)
816 hdr_padded_len = hdr_len;
817 else
818 hdr_padded_len = sizeof(struct padded_vnet_hdr);
819
820 buf = p - headroom;
821 len -= hdr_len;
822 offset += hdr_padded_len;
823 p += hdr_padded_len;
824 tailroom = truesize - headroom - hdr_padded_len - len;
825
826 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
827
828 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
829 skb = virtnet_build_skb(buf, truesize, p - buf, len);
830 if (unlikely(!skb))
831 return NULL;
832
833 page = (struct page *)page->private;
834 if (page)
835 give_pages(rq, page);
836 goto ok;
837 }
838
839 /* copy small packet so we can reuse these pages for small data */
840 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
841 if (unlikely(!skb))
842 return NULL;
843
844 /* Copy all frame if it fits skb->head, otherwise
845 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
846 */
847 if (len <= skb_tailroom(skb))
848 copy = len;
849 else
850 copy = ETH_HLEN;
851 skb_put_data(skb, p, copy);
852
853 len -= copy;
854 offset += copy;
855
856 if (vi->mergeable_rx_bufs) {
857 if (len)
858 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
859 else
860 page_to_free = page;
861 goto ok;
862 }
863
864 /*
865 * Verify that we can indeed put this data into a skb.
866 * This is here to handle cases when the device erroneously
867 * tries to receive more than is possible. This is usually
868 * the case of a broken device.
869 */
870 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
871 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
872 dev_kfree_skb(skb);
873 return NULL;
874 }
875 BUG_ON(offset >= PAGE_SIZE);
876 while (len) {
877 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
878 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
879 frag_size, truesize);
880 len -= frag_size;
881 page = (struct page *)page->private;
882 offset = 0;
883 }
884
885 if (page)
886 give_pages(rq, page);
887
888 ok:
889 hdr = skb_vnet_common_hdr(skb);
890 memcpy(hdr, hdr_p, hdr_len);
891 if (page_to_free)
892 put_page(page_to_free);
893
894 return skb;
895 }
896
virtnet_rq_unmap(struct receive_queue * rq,void * buf,u32 len)897 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
898 {
899 struct virtnet_info *vi = rq->vq->vdev->priv;
900 struct page *page = virt_to_head_page(buf);
901 struct virtnet_rq_dma *dma;
902 void *head;
903 int offset;
904
905 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
906
907 head = page_address(page);
908
909 dma = head;
910
911 --dma->ref;
912
913 if (dma->need_sync && len) {
914 offset = buf - (head + sizeof(*dma));
915
916 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
917 offset, len,
918 DMA_FROM_DEVICE);
919 }
920
921 if (dma->ref)
922 return;
923
924 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
925 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
926 put_page(page);
927 }
928
virtnet_rq_get_buf(struct receive_queue * rq,u32 * len,void ** ctx)929 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
930 {
931 struct virtnet_info *vi = rq->vq->vdev->priv;
932 void *buf;
933
934 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
935
936 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
937 if (buf)
938 virtnet_rq_unmap(rq, buf, *len);
939
940 return buf;
941 }
942
virtnet_rq_init_one_sg(struct receive_queue * rq,void * buf,u32 len)943 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
944 {
945 struct virtnet_info *vi = rq->vq->vdev->priv;
946 struct virtnet_rq_dma *dma;
947 dma_addr_t addr;
948 u32 offset;
949 void *head;
950
951 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
952
953 head = page_address(rq->alloc_frag.page);
954
955 offset = buf - head;
956
957 dma = head;
958
959 addr = dma->addr - sizeof(*dma) + offset;
960
961 sg_init_table(rq->sg, 1);
962 sg_fill_dma(rq->sg, addr, len);
963 }
964
virtnet_rq_alloc(struct receive_queue * rq,u32 size,gfp_t gfp)965 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
966 {
967 struct page_frag *alloc_frag = &rq->alloc_frag;
968 struct virtnet_info *vi = rq->vq->vdev->priv;
969 struct virtnet_rq_dma *dma;
970 void *buf, *head;
971 dma_addr_t addr;
972
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
974
975 head = page_address(alloc_frag->page);
976
977 dma = head;
978
979 /* new pages */
980 if (!alloc_frag->offset) {
981 if (rq->last_dma) {
982 /* Now, the new page is allocated, the last dma
983 * will not be used. So the dma can be unmapped
984 * if the ref is 0.
985 */
986 virtnet_rq_unmap(rq, rq->last_dma, 0);
987 rq->last_dma = NULL;
988 }
989
990 dma->len = alloc_frag->size - sizeof(*dma);
991
992 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
993 dma->len, DMA_FROM_DEVICE, 0);
994 if (virtqueue_dma_mapping_error(rq->vq, addr))
995 return NULL;
996
997 dma->addr = addr;
998 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
999
1000 /* Add a reference to dma to prevent the entire dma from
1001 * being released during error handling. This reference
1002 * will be freed after the pages are no longer used.
1003 */
1004 get_page(alloc_frag->page);
1005 dma->ref = 1;
1006 alloc_frag->offset = sizeof(*dma);
1007
1008 rq->last_dma = dma;
1009 }
1010
1011 ++dma->ref;
1012
1013 buf = head + alloc_frag->offset;
1014
1015 get_page(alloc_frag->page);
1016 alloc_frag->offset += size;
1017
1018 return buf;
1019 }
1020
virtnet_rq_unmap_free_buf(struct virtqueue * vq,void * buf)1021 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
1022 {
1023 struct virtnet_info *vi = vq->vdev->priv;
1024 struct receive_queue *rq;
1025 int i = vq2rxq(vq);
1026
1027 rq = &vi->rq[i];
1028
1029 if (rq->xsk_pool) {
1030 xsk_buff_free((struct xdp_buff *)buf);
1031 return;
1032 }
1033
1034 if (!vi->big_packets || vi->mergeable_rx_bufs)
1035 virtnet_rq_unmap(rq, buf, 0);
1036
1037 virtnet_rq_free_buf(vi, rq, buf);
1038 }
1039
free_old_xmit(struct send_queue * sq,struct netdev_queue * txq,bool in_napi)1040 static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
1041 bool in_napi)
1042 {
1043 struct virtnet_sq_free_stats stats = {0};
1044
1045 virtnet_free_old_xmit(sq, txq, in_napi, &stats);
1046
1047 /* Avoid overhead when no packets have been processed
1048 * happens when called speculatively from start_xmit.
1049 */
1050 if (!stats.packets && !stats.napi_packets)
1051 return;
1052
1053 u64_stats_update_begin(&sq->stats.syncp);
1054 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
1055 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
1056 u64_stats_update_end(&sq->stats.syncp);
1057 }
1058
is_xdp_raw_buffer_queue(struct virtnet_info * vi,int q)1059 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1060 {
1061 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1062 return false;
1063 else if (q < vi->curr_queue_pairs)
1064 return true;
1065 else
1066 return false;
1067 }
1068
tx_may_stop(struct virtnet_info * vi,struct net_device * dev,struct send_queue * sq)1069 static bool tx_may_stop(struct virtnet_info *vi,
1070 struct net_device *dev,
1071 struct send_queue *sq)
1072 {
1073 int qnum;
1074
1075 qnum = sq - vi->sq;
1076
1077 /* If running out of space, stop queue to avoid getting packets that we
1078 * are then unable to transmit.
1079 * An alternative would be to force queuing layer to requeue the skb by
1080 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1081 * returned in a normal path of operation: it means that driver is not
1082 * maintaining the TX queue stop/start state properly, and causes
1083 * the stack to do a non-trivial amount of useless work.
1084 * Since most packets only take 1 or 2 ring slots, stopping the queue
1085 * early means 16 slots are typically wasted.
1086 */
1087 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1088 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1089
1090 netif_tx_stop_queue(txq);
1091 u64_stats_update_begin(&sq->stats.syncp);
1092 u64_stats_inc(&sq->stats.stop);
1093 u64_stats_update_end(&sq->stats.syncp);
1094
1095 return true;
1096 }
1097
1098 return false;
1099 }
1100
check_sq_full_and_disable(struct virtnet_info * vi,struct net_device * dev,struct send_queue * sq)1101 static void check_sq_full_and_disable(struct virtnet_info *vi,
1102 struct net_device *dev,
1103 struct send_queue *sq)
1104 {
1105 bool use_napi = sq->napi.weight;
1106 int qnum;
1107
1108 qnum = sq - vi->sq;
1109
1110 if (tx_may_stop(vi, dev, sq)) {
1111 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1112
1113 if (use_napi) {
1114 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1115 virtqueue_napi_schedule(&sq->napi, sq->vq);
1116 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1117 /* More just got used, free them then recheck. */
1118 free_old_xmit(sq, txq, false);
1119 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1120 netif_start_subqueue(dev, qnum);
1121 u64_stats_update_begin(&sq->stats.syncp);
1122 u64_stats_inc(&sq->stats.wake);
1123 u64_stats_update_end(&sq->stats.syncp);
1124 virtqueue_disable_cb(sq->vq);
1125 }
1126 }
1127 }
1128 }
1129
buf_to_xdp(struct virtnet_info * vi,struct receive_queue * rq,void * buf,u32 len)1130 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
1131 struct receive_queue *rq, void *buf, u32 len)
1132 {
1133 struct xdp_buff *xdp;
1134 u32 bufsize;
1135
1136 xdp = (struct xdp_buff *)buf;
1137
1138 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
1139
1140 if (unlikely(len > bufsize)) {
1141 pr_debug("%s: rx error: len %u exceeds truesize %u\n",
1142 vi->dev->name, len, bufsize);
1143 DEV_STATS_INC(vi->dev, rx_length_errors);
1144 xsk_buff_free(xdp);
1145 return NULL;
1146 }
1147
1148 xsk_buff_set_size(xdp, len);
1149 xsk_buff_dma_sync_for_cpu(xdp);
1150
1151 return xdp;
1152 }
1153
xsk_construct_skb(struct receive_queue * rq,struct xdp_buff * xdp)1154 static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
1155 struct xdp_buff *xdp)
1156 {
1157 unsigned int metasize = xdp->data - xdp->data_meta;
1158 struct sk_buff *skb;
1159 unsigned int size;
1160
1161 size = xdp->data_end - xdp->data_hard_start;
1162 skb = napi_alloc_skb(&rq->napi, size);
1163 if (unlikely(!skb)) {
1164 xsk_buff_free(xdp);
1165 return NULL;
1166 }
1167
1168 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
1169
1170 size = xdp->data_end - xdp->data_meta;
1171 memcpy(__skb_put(skb, size), xdp->data_meta, size);
1172
1173 if (metasize) {
1174 __skb_pull(skb, metasize);
1175 skb_metadata_set(skb, metasize);
1176 }
1177
1178 xsk_buff_free(xdp);
1179
1180 return skb;
1181 }
1182
virtnet_receive_xsk_small(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1183 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
1184 struct receive_queue *rq, struct xdp_buff *xdp,
1185 unsigned int *xdp_xmit,
1186 struct virtnet_rq_stats *stats)
1187 {
1188 struct bpf_prog *prog;
1189 u32 ret;
1190
1191 ret = XDP_PASS;
1192 rcu_read_lock();
1193 prog = rcu_dereference(rq->xdp_prog);
1194 if (prog)
1195 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1196 rcu_read_unlock();
1197
1198 switch (ret) {
1199 case XDP_PASS:
1200 return xsk_construct_skb(rq, xdp);
1201
1202 case XDP_TX:
1203 case XDP_REDIRECT:
1204 return NULL;
1205
1206 default:
1207 /* drop packet */
1208 xsk_buff_free(xdp);
1209 u64_stats_inc(&stats->drops);
1210 return NULL;
1211 }
1212 }
1213
xsk_drop_follow_bufs(struct net_device * dev,struct receive_queue * rq,u32 num_buf,struct virtnet_rq_stats * stats)1214 static void xsk_drop_follow_bufs(struct net_device *dev,
1215 struct receive_queue *rq,
1216 u32 num_buf,
1217 struct virtnet_rq_stats *stats)
1218 {
1219 struct xdp_buff *xdp;
1220 u32 len;
1221
1222 while (num_buf-- > 1) {
1223 xdp = virtqueue_get_buf(rq->vq, &len);
1224 if (unlikely(!xdp)) {
1225 pr_debug("%s: rx error: %d buffers missing\n",
1226 dev->name, num_buf);
1227 DEV_STATS_INC(dev, rx_length_errors);
1228 break;
1229 }
1230 u64_stats_add(&stats->bytes, len);
1231 xsk_buff_free(xdp);
1232 }
1233 }
1234
xsk_append_merge_buffer(struct virtnet_info * vi,struct receive_queue * rq,struct sk_buff * head_skb,u32 num_buf,struct virtio_net_hdr_mrg_rxbuf * hdr,struct virtnet_rq_stats * stats)1235 static int xsk_append_merge_buffer(struct virtnet_info *vi,
1236 struct receive_queue *rq,
1237 struct sk_buff *head_skb,
1238 u32 num_buf,
1239 struct virtio_net_hdr_mrg_rxbuf *hdr,
1240 struct virtnet_rq_stats *stats)
1241 {
1242 struct sk_buff *curr_skb;
1243 struct xdp_buff *xdp;
1244 u32 len, truesize;
1245 struct page *page;
1246 void *buf;
1247
1248 curr_skb = head_skb;
1249
1250 while (--num_buf) {
1251 buf = virtqueue_get_buf(rq->vq, &len);
1252 if (unlikely(!buf)) {
1253 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1254 vi->dev->name, num_buf,
1255 virtio16_to_cpu(vi->vdev,
1256 hdr->num_buffers));
1257 DEV_STATS_INC(vi->dev, rx_length_errors);
1258 return -EINVAL;
1259 }
1260
1261 u64_stats_add(&stats->bytes, len);
1262
1263 xdp = buf_to_xdp(vi, rq, buf, len);
1264 if (!xdp)
1265 goto err;
1266
1267 buf = napi_alloc_frag(len);
1268 if (!buf) {
1269 xsk_buff_free(xdp);
1270 goto err;
1271 }
1272
1273 memcpy(buf, xdp->data - vi->hdr_len, len);
1274
1275 xsk_buff_free(xdp);
1276
1277 page = virt_to_page(buf);
1278
1279 truesize = len;
1280
1281 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
1282 buf, len, truesize);
1283 if (!curr_skb) {
1284 put_page(page);
1285 goto err;
1286 }
1287 }
1288
1289 return 0;
1290
1291 err:
1292 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
1293 return -EINVAL;
1294 }
1295
virtnet_receive_xsk_merge(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1296 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
1297 struct receive_queue *rq, struct xdp_buff *xdp,
1298 unsigned int *xdp_xmit,
1299 struct virtnet_rq_stats *stats)
1300 {
1301 struct virtio_net_hdr_mrg_rxbuf *hdr;
1302 struct bpf_prog *prog;
1303 struct sk_buff *skb;
1304 u32 ret, num_buf;
1305
1306 hdr = xdp->data - vi->hdr_len;
1307 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1308
1309 ret = XDP_PASS;
1310 rcu_read_lock();
1311 prog = rcu_dereference(rq->xdp_prog);
1312 /* TODO: support multi buffer. */
1313 if (prog && num_buf == 1)
1314 ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1315 rcu_read_unlock();
1316
1317 switch (ret) {
1318 case XDP_PASS:
1319 skb = xsk_construct_skb(rq, xdp);
1320 if (!skb)
1321 goto drop_bufs;
1322
1323 if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
1324 dev_kfree_skb(skb);
1325 goto drop;
1326 }
1327
1328 return skb;
1329
1330 case XDP_TX:
1331 case XDP_REDIRECT:
1332 return NULL;
1333
1334 default:
1335 /* drop packet */
1336 xsk_buff_free(xdp);
1337 }
1338
1339 drop_bufs:
1340 xsk_drop_follow_bufs(dev, rq, num_buf, stats);
1341
1342 drop:
1343 u64_stats_inc(&stats->drops);
1344 return NULL;
1345 }
1346
virtnet_receive_xsk_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,u32 len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1347 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
1348 void *buf, u32 len,
1349 unsigned int *xdp_xmit,
1350 struct virtnet_rq_stats *stats)
1351 {
1352 struct net_device *dev = vi->dev;
1353 struct sk_buff *skb = NULL;
1354 struct xdp_buff *xdp;
1355 u8 flags;
1356
1357 len -= vi->hdr_len;
1358
1359 u64_stats_add(&stats->bytes, len);
1360
1361 xdp = buf_to_xdp(vi, rq, buf, len);
1362 if (!xdp)
1363 return;
1364
1365 if (unlikely(len < ETH_HLEN)) {
1366 pr_debug("%s: short packet %i\n", dev->name, len);
1367 DEV_STATS_INC(dev, rx_length_errors);
1368 xsk_buff_free(xdp);
1369 return;
1370 }
1371
1372 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
1373
1374 if (!vi->mergeable_rx_bufs)
1375 skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
1376 else
1377 skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
1378
1379 if (skb)
1380 virtnet_receive_done(vi, rq, skb, flags);
1381 }
1382
virtnet_add_recvbuf_xsk(struct virtnet_info * vi,struct receive_queue * rq,struct xsk_buff_pool * pool,gfp_t gfp)1383 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
1384 struct xsk_buff_pool *pool, gfp_t gfp)
1385 {
1386 struct xdp_buff **xsk_buffs;
1387 dma_addr_t addr;
1388 int err = 0;
1389 u32 len, i;
1390 int num;
1391
1392 xsk_buffs = rq->xsk_buffs;
1393
1394 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
1395 if (!num)
1396 return -ENOMEM;
1397
1398 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
1399
1400 for (i = 0; i < num; ++i) {
1401 /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
1402 * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
1403 * (see function virtnet_xsk_pool_enable)
1404 */
1405 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
1406
1407 sg_init_table(rq->sg, 1);
1408 sg_fill_dma(rq->sg, addr, len);
1409
1410 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
1411 xsk_buffs[i], NULL, gfp);
1412 if (err)
1413 goto err;
1414 }
1415
1416 return num;
1417
1418 err:
1419 for (; i < num; ++i)
1420 xsk_buff_free(xsk_buffs[i]);
1421
1422 return err;
1423 }
1424
virtnet_xsk_to_ptr(u32 len)1425 static void *virtnet_xsk_to_ptr(u32 len)
1426 {
1427 unsigned long p;
1428
1429 p = len << VIRTIO_XSK_FLAG_OFFSET;
1430
1431 return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
1432 }
1433
virtnet_xsk_xmit_one(struct send_queue * sq,struct xsk_buff_pool * pool,struct xdp_desc * desc)1434 static int virtnet_xsk_xmit_one(struct send_queue *sq,
1435 struct xsk_buff_pool *pool,
1436 struct xdp_desc *desc)
1437 {
1438 struct virtnet_info *vi;
1439 dma_addr_t addr;
1440
1441 vi = sq->vq->vdev->priv;
1442
1443 addr = xsk_buff_raw_get_dma(pool, desc->addr);
1444 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
1445
1446 sg_init_table(sq->sg, 2);
1447 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
1448 sg_fill_dma(sq->sg + 1, addr, desc->len);
1449
1450 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
1451 virtnet_xsk_to_ptr(desc->len),
1452 GFP_ATOMIC);
1453 }
1454
virtnet_xsk_xmit_batch(struct send_queue * sq,struct xsk_buff_pool * pool,unsigned int budget,u64 * kicks)1455 static int virtnet_xsk_xmit_batch(struct send_queue *sq,
1456 struct xsk_buff_pool *pool,
1457 unsigned int budget,
1458 u64 *kicks)
1459 {
1460 struct xdp_desc *descs = pool->tx_descs;
1461 bool kick = false;
1462 u32 nb_pkts, i;
1463 int err;
1464
1465 budget = min_t(u32, budget, sq->vq->num_free);
1466
1467 nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
1468 if (!nb_pkts)
1469 return 0;
1470
1471 for (i = 0; i < nb_pkts; i++) {
1472 err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
1473 if (unlikely(err)) {
1474 xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
1475 break;
1476 }
1477
1478 kick = true;
1479 }
1480
1481 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1482 (*kicks)++;
1483
1484 return i;
1485 }
1486
virtnet_xsk_xmit(struct send_queue * sq,struct xsk_buff_pool * pool,int budget)1487 static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
1488 int budget)
1489 {
1490 struct virtnet_info *vi = sq->vq->vdev->priv;
1491 struct virtnet_sq_free_stats stats = {};
1492 struct net_device *dev = vi->dev;
1493 u64 kicks = 0;
1494 int sent;
1495
1496 /* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
1497 * free_old_xmit().
1498 */
1499 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
1500
1501 if (stats.xsk)
1502 xsk_tx_completed(sq->xsk_pool, stats.xsk);
1503
1504 sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
1505
1506 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1507 check_sq_full_and_disable(vi, vi->dev, sq);
1508
1509 if (sent) {
1510 struct netdev_queue *txq;
1511
1512 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
1513 txq_trans_cond_update(txq);
1514 }
1515
1516 u64_stats_update_begin(&sq->stats.syncp);
1517 u64_stats_add(&sq->stats.packets, stats.packets);
1518 u64_stats_add(&sq->stats.bytes, stats.bytes);
1519 u64_stats_add(&sq->stats.kicks, kicks);
1520 u64_stats_add(&sq->stats.xdp_tx, sent);
1521 u64_stats_update_end(&sq->stats.syncp);
1522
1523 if (xsk_uses_need_wakeup(pool))
1524 xsk_set_tx_need_wakeup(pool);
1525
1526 return sent;
1527 }
1528
xsk_wakeup(struct send_queue * sq)1529 static void xsk_wakeup(struct send_queue *sq)
1530 {
1531 if (napi_if_scheduled_mark_missed(&sq->napi))
1532 return;
1533
1534 local_bh_disable();
1535 virtqueue_napi_schedule(&sq->napi, sq->vq);
1536 local_bh_enable();
1537 }
1538
virtnet_xsk_wakeup(struct net_device * dev,u32 qid,u32 flag)1539 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
1540 {
1541 struct virtnet_info *vi = netdev_priv(dev);
1542 struct send_queue *sq;
1543
1544 if (!netif_running(dev))
1545 return -ENETDOWN;
1546
1547 if (qid >= vi->curr_queue_pairs)
1548 return -EINVAL;
1549
1550 sq = &vi->sq[qid];
1551
1552 xsk_wakeup(sq);
1553 return 0;
1554 }
1555
virtnet_xsk_completed(struct send_queue * sq,int num)1556 static void virtnet_xsk_completed(struct send_queue *sq, int num)
1557 {
1558 xsk_tx_completed(sq->xsk_pool, num);
1559
1560 /* If this is called by rx poll, start_xmit and xdp xmit we should
1561 * wakeup the tx napi to consume the xsk tx queue, because the tx
1562 * interrupt may not be triggered.
1563 */
1564 xsk_wakeup(sq);
1565 }
1566
__virtnet_xdp_xmit_one(struct virtnet_info * vi,struct send_queue * sq,struct xdp_frame * xdpf)1567 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1568 struct send_queue *sq,
1569 struct xdp_frame *xdpf)
1570 {
1571 struct virtio_net_hdr_mrg_rxbuf *hdr;
1572 struct skb_shared_info *shinfo;
1573 u8 nr_frags = 0;
1574 int err, i;
1575
1576 if (unlikely(xdpf->headroom < vi->hdr_len))
1577 return -EOVERFLOW;
1578
1579 if (unlikely(xdp_frame_has_frags(xdpf))) {
1580 shinfo = xdp_get_shared_info_from_frame(xdpf);
1581 nr_frags = shinfo->nr_frags;
1582 }
1583
1584 /* In wrapping function virtnet_xdp_xmit(), we need to free
1585 * up the pending old buffers, where we need to calculate the
1586 * position of skb_shared_info in xdp_get_frame_len() and
1587 * xdp_return_frame(), which will involve to xdpf->data and
1588 * xdpf->headroom. Therefore, we need to update the value of
1589 * headroom synchronously here.
1590 */
1591 xdpf->headroom -= vi->hdr_len;
1592 xdpf->data -= vi->hdr_len;
1593 /* Zero header and leave csum up to XDP layers */
1594 hdr = xdpf->data;
1595 memset(hdr, 0, vi->hdr_len);
1596 xdpf->len += vi->hdr_len;
1597
1598 sg_init_table(sq->sg, nr_frags + 1);
1599 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1600 for (i = 0; i < nr_frags; i++) {
1601 skb_frag_t *frag = &shinfo->frags[i];
1602
1603 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1604 skb_frag_size(frag), skb_frag_off(frag));
1605 }
1606
1607 err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
1608 if (unlikely(err))
1609 return -ENOSPC; /* Caller handle free/refcnt */
1610
1611 return 0;
1612 }
1613
1614 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1615 * the current cpu, so it does not need to be locked.
1616 *
1617 * Here we use marco instead of inline functions because we have to deal with
1618 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1619 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1620 * functions to perfectly solve these three problems at the same time.
1621 */
1622 #define virtnet_xdp_get_sq(vi) ({ \
1623 int cpu = smp_processor_id(); \
1624 struct netdev_queue *txq; \
1625 typeof(vi) v = (vi); \
1626 unsigned int qp; \
1627 \
1628 if (v->curr_queue_pairs > nr_cpu_ids) { \
1629 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1630 qp += cpu; \
1631 txq = netdev_get_tx_queue(v->dev, qp); \
1632 __netif_tx_acquire(txq); \
1633 } else { \
1634 qp = cpu % v->curr_queue_pairs; \
1635 txq = netdev_get_tx_queue(v->dev, qp); \
1636 __netif_tx_lock(txq, cpu); \
1637 } \
1638 v->sq + qp; \
1639 })
1640
1641 #define virtnet_xdp_put_sq(vi, q) { \
1642 struct netdev_queue *txq; \
1643 typeof(vi) v = (vi); \
1644 \
1645 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1646 if (v->curr_queue_pairs > nr_cpu_ids) \
1647 __netif_tx_release(txq); \
1648 else \
1649 __netif_tx_unlock(txq); \
1650 }
1651
virtnet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1652 static int virtnet_xdp_xmit(struct net_device *dev,
1653 int n, struct xdp_frame **frames, u32 flags)
1654 {
1655 struct virtnet_info *vi = netdev_priv(dev);
1656 struct virtnet_sq_free_stats stats = {0};
1657 struct receive_queue *rq = vi->rq;
1658 struct bpf_prog *xdp_prog;
1659 struct send_queue *sq;
1660 int nxmit = 0;
1661 int kicks = 0;
1662 int ret;
1663 int i;
1664
1665 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
1666 * indicate XDP resources have been successfully allocated.
1667 */
1668 xdp_prog = rcu_access_pointer(rq->xdp_prog);
1669 if (!xdp_prog)
1670 return -ENXIO;
1671
1672 sq = virtnet_xdp_get_sq(vi);
1673
1674 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1675 ret = -EINVAL;
1676 goto out;
1677 }
1678
1679 /* Free up any pending old buffers before queueing new ones. */
1680 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
1681 false, &stats);
1682
1683 for (i = 0; i < n; i++) {
1684 struct xdp_frame *xdpf = frames[i];
1685
1686 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1687 break;
1688 nxmit++;
1689 }
1690 ret = nxmit;
1691
1692 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1693 check_sq_full_and_disable(vi, dev, sq);
1694
1695 if (flags & XDP_XMIT_FLUSH) {
1696 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1697 kicks = 1;
1698 }
1699 out:
1700 u64_stats_update_begin(&sq->stats.syncp);
1701 u64_stats_add(&sq->stats.bytes, stats.bytes);
1702 u64_stats_add(&sq->stats.packets, stats.packets);
1703 u64_stats_add(&sq->stats.xdp_tx, n);
1704 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1705 u64_stats_add(&sq->stats.kicks, kicks);
1706 u64_stats_update_end(&sq->stats.syncp);
1707
1708 virtnet_xdp_put_sq(vi, sq);
1709 return ret;
1710 }
1711
put_xdp_frags(struct xdp_buff * xdp)1712 static void put_xdp_frags(struct xdp_buff *xdp)
1713 {
1714 struct skb_shared_info *shinfo;
1715 struct page *xdp_page;
1716 int i;
1717
1718 if (xdp_buff_has_frags(xdp)) {
1719 shinfo = xdp_get_shared_info_from_buff(xdp);
1720 for (i = 0; i < shinfo->nr_frags; i++) {
1721 xdp_page = skb_frag_page(&shinfo->frags[i]);
1722 put_page(xdp_page);
1723 }
1724 }
1725 }
1726
virtnet_xdp_handler(struct bpf_prog * xdp_prog,struct xdp_buff * xdp,struct net_device * dev,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1727 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1728 struct net_device *dev,
1729 unsigned int *xdp_xmit,
1730 struct virtnet_rq_stats *stats)
1731 {
1732 struct xdp_frame *xdpf;
1733 int err;
1734 u32 act;
1735
1736 act = bpf_prog_run_xdp(xdp_prog, xdp);
1737 u64_stats_inc(&stats->xdp_packets);
1738
1739 switch (act) {
1740 case XDP_PASS:
1741 return act;
1742
1743 case XDP_TX:
1744 u64_stats_inc(&stats->xdp_tx);
1745 xdpf = xdp_convert_buff_to_frame(xdp);
1746 if (unlikely(!xdpf)) {
1747 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1748 return XDP_DROP;
1749 }
1750
1751 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1752 if (unlikely(!err)) {
1753 xdp_return_frame_rx_napi(xdpf);
1754 } else if (unlikely(err < 0)) {
1755 trace_xdp_exception(dev, xdp_prog, act);
1756 return XDP_DROP;
1757 }
1758 *xdp_xmit |= VIRTIO_XDP_TX;
1759 return act;
1760
1761 case XDP_REDIRECT:
1762 u64_stats_inc(&stats->xdp_redirects);
1763 err = xdp_do_redirect(dev, xdp, xdp_prog);
1764 if (err)
1765 return XDP_DROP;
1766
1767 *xdp_xmit |= VIRTIO_XDP_REDIR;
1768 return act;
1769
1770 default:
1771 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1772 fallthrough;
1773 case XDP_ABORTED:
1774 trace_xdp_exception(dev, xdp_prog, act);
1775 fallthrough;
1776 case XDP_DROP:
1777 return XDP_DROP;
1778 }
1779 }
1780
virtnet_get_headroom(struct virtnet_info * vi)1781 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1782 {
1783 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
1784 }
1785
1786 /* We copy the packet for XDP in the following cases:
1787 *
1788 * 1) Packet is scattered across multiple rx buffers.
1789 * 2) Headroom space is insufficient.
1790 *
1791 * This is inefficient but it's a temporary condition that
1792 * we hit right after XDP is enabled and until queue is refilled
1793 * with large buffers with sufficient headroom - so it should affect
1794 * at most queue size packets.
1795 * Afterwards, the conditions to enable
1796 * XDP should preclude the underlying device from sending packets
1797 * across multiple buffers (num_buf > 1), and we make sure buffers
1798 * have enough headroom.
1799 */
xdp_linearize_page(struct receive_queue * rq,int * num_buf,struct page * p,int offset,int page_off,unsigned int * len)1800 static struct page *xdp_linearize_page(struct receive_queue *rq,
1801 int *num_buf,
1802 struct page *p,
1803 int offset,
1804 int page_off,
1805 unsigned int *len)
1806 {
1807 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1808 struct page *page;
1809
1810 if (page_off + *len + tailroom > PAGE_SIZE)
1811 return NULL;
1812
1813 page = alloc_page(GFP_ATOMIC);
1814 if (!page)
1815 return NULL;
1816
1817 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1818 page_off += *len;
1819
1820 while (--*num_buf) {
1821 unsigned int buflen;
1822 void *buf;
1823 int off;
1824
1825 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1826 if (unlikely(!buf))
1827 goto err_buf;
1828
1829 p = virt_to_head_page(buf);
1830 off = buf - page_address(p);
1831
1832 /* guard against a misconfigured or uncooperative backend that
1833 * is sending packet larger than the MTU.
1834 */
1835 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1836 put_page(p);
1837 goto err_buf;
1838 }
1839
1840 memcpy(page_address(page) + page_off,
1841 page_address(p) + off, buflen);
1842 page_off += buflen;
1843 put_page(p);
1844 }
1845
1846 /* Headroom does not contribute to packet length */
1847 *len = page_off - XDP_PACKET_HEADROOM;
1848 return page;
1849 err_buf:
1850 __free_pages(page, 0);
1851 return NULL;
1852 }
1853
receive_small_build_skb(struct virtnet_info * vi,unsigned int xdp_headroom,void * buf,unsigned int len)1854 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1855 unsigned int xdp_headroom,
1856 void *buf,
1857 unsigned int len)
1858 {
1859 unsigned int header_offset;
1860 unsigned int headroom;
1861 unsigned int buflen;
1862 struct sk_buff *skb;
1863
1864 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1865 headroom = vi->hdr_len + header_offset;
1866 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1867 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1868
1869 skb = virtnet_build_skb(buf, buflen, headroom, len);
1870 if (unlikely(!skb))
1871 return NULL;
1872
1873 buf += header_offset;
1874 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1875
1876 return skb;
1877 }
1878
receive_small_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,unsigned int xdp_headroom,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1879 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1880 struct virtnet_info *vi,
1881 struct receive_queue *rq,
1882 struct bpf_prog *xdp_prog,
1883 void *buf,
1884 unsigned int xdp_headroom,
1885 unsigned int len,
1886 unsigned int *xdp_xmit,
1887 struct virtnet_rq_stats *stats)
1888 {
1889 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1890 unsigned int headroom = vi->hdr_len + header_offset;
1891 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1892 struct page *page = virt_to_head_page(buf);
1893 struct page *xdp_page;
1894 unsigned int buflen;
1895 struct xdp_buff xdp;
1896 struct sk_buff *skb;
1897 unsigned int metasize = 0;
1898 u32 act;
1899
1900 if (unlikely(hdr->hdr.gso_type))
1901 goto err_xdp;
1902
1903 /* Partially checksummed packets must be dropped. */
1904 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1905 goto err_xdp;
1906
1907 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1908 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1909
1910 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1911 int offset = buf - page_address(page) + header_offset;
1912 unsigned int tlen = len + vi->hdr_len;
1913 int num_buf = 1;
1914
1915 xdp_headroom = virtnet_get_headroom(vi);
1916 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1917 headroom = vi->hdr_len + header_offset;
1918 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1919 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1920 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1921 offset, header_offset,
1922 &tlen);
1923 if (!xdp_page)
1924 goto err_xdp;
1925
1926 buf = page_address(xdp_page);
1927 put_page(page);
1928 page = xdp_page;
1929 }
1930
1931 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1932 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1933 xdp_headroom, len, true);
1934
1935 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1936
1937 switch (act) {
1938 case XDP_PASS:
1939 /* Recalculate length in case bpf program changed it */
1940 len = xdp.data_end - xdp.data;
1941 metasize = xdp.data - xdp.data_meta;
1942 break;
1943
1944 case XDP_TX:
1945 case XDP_REDIRECT:
1946 goto xdp_xmit;
1947
1948 default:
1949 goto err_xdp;
1950 }
1951
1952 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1953 if (unlikely(!skb))
1954 goto err;
1955
1956 if (metasize)
1957 skb_metadata_set(skb, metasize);
1958
1959 return skb;
1960
1961 err_xdp:
1962 u64_stats_inc(&stats->xdp_drops);
1963 err:
1964 u64_stats_inc(&stats->drops);
1965 put_page(page);
1966 xdp_xmit:
1967 return NULL;
1968 }
1969
receive_small(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)1970 static struct sk_buff *receive_small(struct net_device *dev,
1971 struct virtnet_info *vi,
1972 struct receive_queue *rq,
1973 void *buf, void *ctx,
1974 unsigned int len,
1975 unsigned int *xdp_xmit,
1976 struct virtnet_rq_stats *stats)
1977 {
1978 unsigned int xdp_headroom = (unsigned long)ctx;
1979 struct page *page = virt_to_head_page(buf);
1980 struct sk_buff *skb;
1981
1982 /* We passed the address of virtnet header to virtio-core,
1983 * so truncate the padding.
1984 */
1985 buf -= VIRTNET_RX_PAD + xdp_headroom;
1986
1987 len -= vi->hdr_len;
1988 u64_stats_add(&stats->bytes, len);
1989
1990 if (unlikely(len > GOOD_PACKET_LEN)) {
1991 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1992 dev->name, len, GOOD_PACKET_LEN);
1993 DEV_STATS_INC(dev, rx_length_errors);
1994 goto err;
1995 }
1996
1997 if (unlikely(vi->xdp_enabled)) {
1998 struct bpf_prog *xdp_prog;
1999
2000 rcu_read_lock();
2001 xdp_prog = rcu_dereference(rq->xdp_prog);
2002 if (xdp_prog) {
2003 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
2004 xdp_headroom, len, xdp_xmit,
2005 stats);
2006 rcu_read_unlock();
2007 return skb;
2008 }
2009 rcu_read_unlock();
2010 }
2011
2012 skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
2013 if (likely(skb))
2014 return skb;
2015
2016 err:
2017 u64_stats_inc(&stats->drops);
2018 put_page(page);
2019 return NULL;
2020 }
2021
receive_big(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,struct virtnet_rq_stats * stats)2022 static struct sk_buff *receive_big(struct net_device *dev,
2023 struct virtnet_info *vi,
2024 struct receive_queue *rq,
2025 void *buf,
2026 unsigned int len,
2027 struct virtnet_rq_stats *stats)
2028 {
2029 struct page *page = buf;
2030 struct sk_buff *skb =
2031 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
2032
2033 u64_stats_add(&stats->bytes, len - vi->hdr_len);
2034 if (unlikely(!skb))
2035 goto err;
2036
2037 return skb;
2038
2039 err:
2040 u64_stats_inc(&stats->drops);
2041 give_pages(rq, page);
2042 return NULL;
2043 }
2044
mergeable_buf_free(struct receive_queue * rq,int num_buf,struct net_device * dev,struct virtnet_rq_stats * stats)2045 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
2046 struct net_device *dev,
2047 struct virtnet_rq_stats *stats)
2048 {
2049 struct page *page;
2050 void *buf;
2051 int len;
2052
2053 while (num_buf-- > 1) {
2054 buf = virtnet_rq_get_buf(rq, &len, NULL);
2055 if (unlikely(!buf)) {
2056 pr_debug("%s: rx error: %d buffers missing\n",
2057 dev->name, num_buf);
2058 DEV_STATS_INC(dev, rx_length_errors);
2059 break;
2060 }
2061 u64_stats_add(&stats->bytes, len);
2062 page = virt_to_head_page(buf);
2063 put_page(page);
2064 }
2065 }
2066
2067 /* Why not use xdp_build_skb_from_frame() ?
2068 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
2069 * virtio-net there are 2 points that do not match its requirements:
2070 * 1. The size of the prefilled buffer is not fixed before xdp is set.
2071 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
2072 * like eth_type_trans() (which virtio-net does in receive_buf()).
2073 */
build_skb_from_xdp_buff(struct net_device * dev,struct virtnet_info * vi,struct xdp_buff * xdp,unsigned int xdp_frags_truesz)2074 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
2075 struct virtnet_info *vi,
2076 struct xdp_buff *xdp,
2077 unsigned int xdp_frags_truesz)
2078 {
2079 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2080 unsigned int headroom, data_len;
2081 struct sk_buff *skb;
2082 int metasize;
2083 u8 nr_frags;
2084
2085 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
2086 pr_debug("Error building skb as missing reserved tailroom for xdp");
2087 return NULL;
2088 }
2089
2090 if (unlikely(xdp_buff_has_frags(xdp)))
2091 nr_frags = sinfo->nr_frags;
2092
2093 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
2094 if (unlikely(!skb))
2095 return NULL;
2096
2097 headroom = xdp->data - xdp->data_hard_start;
2098 data_len = xdp->data_end - xdp->data;
2099 skb_reserve(skb, headroom);
2100 __skb_put(skb, data_len);
2101
2102 metasize = xdp->data - xdp->data_meta;
2103 metasize = metasize > 0 ? metasize : 0;
2104 if (metasize)
2105 skb_metadata_set(skb, metasize);
2106
2107 if (unlikely(xdp_buff_has_frags(xdp)))
2108 xdp_update_skb_shared_info(skb, nr_frags,
2109 sinfo->xdp_frags_size,
2110 xdp_frags_truesz,
2111 xdp_buff_is_frag_pfmemalloc(xdp));
2112
2113 return skb;
2114 }
2115
2116 /* TODO: build xdp in big mode */
virtnet_build_xdp_buff_mrg(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct xdp_buff * xdp,void * buf,unsigned int len,unsigned int frame_sz,int * num_buf,unsigned int * xdp_frags_truesize,struct virtnet_rq_stats * stats)2117 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
2118 struct virtnet_info *vi,
2119 struct receive_queue *rq,
2120 struct xdp_buff *xdp,
2121 void *buf,
2122 unsigned int len,
2123 unsigned int frame_sz,
2124 int *num_buf,
2125 unsigned int *xdp_frags_truesize,
2126 struct virtnet_rq_stats *stats)
2127 {
2128 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2129 unsigned int headroom, tailroom, room;
2130 unsigned int truesize, cur_frag_size;
2131 struct skb_shared_info *shinfo;
2132 unsigned int xdp_frags_truesz = 0;
2133 struct page *page;
2134 skb_frag_t *frag;
2135 int offset;
2136 void *ctx;
2137
2138 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
2139 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
2140 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
2141
2142 if (!*num_buf)
2143 return 0;
2144
2145 if (*num_buf > 1) {
2146 /* If we want to build multi-buffer xdp, we need
2147 * to specify that the flags of xdp_buff have the
2148 * XDP_FLAGS_HAS_FRAG bit.
2149 */
2150 if (!xdp_buff_has_frags(xdp))
2151 xdp_buff_set_frags_flag(xdp);
2152
2153 shinfo = xdp_get_shared_info_from_buff(xdp);
2154 shinfo->nr_frags = 0;
2155 shinfo->xdp_frags_size = 0;
2156 }
2157
2158 if (*num_buf > MAX_SKB_FRAGS + 1)
2159 return -EINVAL;
2160
2161 while (--*num_buf > 0) {
2162 buf = virtnet_rq_get_buf(rq, &len, &ctx);
2163 if (unlikely(!buf)) {
2164 pr_debug("%s: rx error: %d buffers out of %d missing\n",
2165 dev->name, *num_buf,
2166 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
2167 DEV_STATS_INC(dev, rx_length_errors);
2168 goto err;
2169 }
2170
2171 u64_stats_add(&stats->bytes, len);
2172 page = virt_to_head_page(buf);
2173 offset = buf - page_address(page);
2174
2175 truesize = mergeable_ctx_to_truesize(ctx);
2176 headroom = mergeable_ctx_to_headroom(ctx);
2177 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2178 room = SKB_DATA_ALIGN(headroom + tailroom);
2179
2180 cur_frag_size = truesize;
2181 xdp_frags_truesz += cur_frag_size;
2182 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
2183 put_page(page);
2184 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2185 dev->name, len, (unsigned long)(truesize - room));
2186 DEV_STATS_INC(dev, rx_length_errors);
2187 goto err;
2188 }
2189
2190 frag = &shinfo->frags[shinfo->nr_frags++];
2191 skb_frag_fill_page_desc(frag, page, offset, len);
2192 if (page_is_pfmemalloc(page))
2193 xdp_buff_set_frag_pfmemalloc(xdp);
2194
2195 shinfo->xdp_frags_size += len;
2196 }
2197
2198 *xdp_frags_truesize = xdp_frags_truesz;
2199 return 0;
2200
2201 err:
2202 put_xdp_frags(xdp);
2203 return -EINVAL;
2204 }
2205
mergeable_xdp_get_buf(struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * ctx,unsigned int * frame_sz,int * num_buf,struct page ** page,int offset,unsigned int * len,struct virtio_net_hdr_mrg_rxbuf * hdr)2206 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
2207 struct receive_queue *rq,
2208 struct bpf_prog *xdp_prog,
2209 void *ctx,
2210 unsigned int *frame_sz,
2211 int *num_buf,
2212 struct page **page,
2213 int offset,
2214 unsigned int *len,
2215 struct virtio_net_hdr_mrg_rxbuf *hdr)
2216 {
2217 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2218 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2219 struct page *xdp_page;
2220 unsigned int xdp_room;
2221
2222 /* Transient failure which in theory could occur if
2223 * in-flight packets from before XDP was enabled reach
2224 * the receive path after XDP is loaded.
2225 */
2226 if (unlikely(hdr->hdr.gso_type))
2227 return NULL;
2228
2229 /* Partially checksummed packets must be dropped. */
2230 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
2231 return NULL;
2232
2233 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
2234 * with headroom may add hole in truesize, which
2235 * make their length exceed PAGE_SIZE. So we disabled the
2236 * hole mechanism for xdp. See add_recvbuf_mergeable().
2237 */
2238 *frame_sz = truesize;
2239
2240 if (likely(headroom >= virtnet_get_headroom(vi) &&
2241 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
2242 return page_address(*page) + offset;
2243 }
2244
2245 /* This happens when headroom is not enough because
2246 * of the buffer was prefilled before XDP is set.
2247 * This should only happen for the first several packets.
2248 * In fact, vq reset can be used here to help us clean up
2249 * the prefilled buffers, but many existing devices do not
2250 * support it, and we don't want to bother users who are
2251 * using xdp normally.
2252 */
2253 if (!xdp_prog->aux->xdp_has_frags) {
2254 /* linearize data for XDP */
2255 xdp_page = xdp_linearize_page(rq, num_buf,
2256 *page, offset,
2257 XDP_PACKET_HEADROOM,
2258 len);
2259 if (!xdp_page)
2260 return NULL;
2261 } else {
2262 xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
2263 sizeof(struct skb_shared_info));
2264 if (*len + xdp_room > PAGE_SIZE)
2265 return NULL;
2266
2267 xdp_page = alloc_page(GFP_ATOMIC);
2268 if (!xdp_page)
2269 return NULL;
2270
2271 memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
2272 page_address(*page) + offset, *len);
2273 }
2274
2275 *frame_sz = PAGE_SIZE;
2276
2277 put_page(*page);
2278
2279 *page = xdp_page;
2280
2281 return page_address(*page) + XDP_PACKET_HEADROOM;
2282 }
2283
receive_mergeable_xdp(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,struct bpf_prog * xdp_prog,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2284 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
2285 struct virtnet_info *vi,
2286 struct receive_queue *rq,
2287 struct bpf_prog *xdp_prog,
2288 void *buf,
2289 void *ctx,
2290 unsigned int len,
2291 unsigned int *xdp_xmit,
2292 struct virtnet_rq_stats *stats)
2293 {
2294 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2295 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2296 struct page *page = virt_to_head_page(buf);
2297 int offset = buf - page_address(page);
2298 unsigned int xdp_frags_truesz = 0;
2299 struct sk_buff *head_skb;
2300 unsigned int frame_sz;
2301 struct xdp_buff xdp;
2302 void *data;
2303 u32 act;
2304 int err;
2305
2306 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
2307 offset, &len, hdr);
2308 if (unlikely(!data))
2309 goto err_xdp;
2310
2311 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
2312 &num_buf, &xdp_frags_truesz, stats);
2313 if (unlikely(err))
2314 goto err_xdp;
2315
2316 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
2317
2318 switch (act) {
2319 case XDP_PASS:
2320 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
2321 if (unlikely(!head_skb))
2322 break;
2323 return head_skb;
2324
2325 case XDP_TX:
2326 case XDP_REDIRECT:
2327 return NULL;
2328
2329 default:
2330 break;
2331 }
2332
2333 put_xdp_frags(&xdp);
2334
2335 err_xdp:
2336 put_page(page);
2337 mergeable_buf_free(rq, num_buf, dev, stats);
2338
2339 u64_stats_inc(&stats->xdp_drops);
2340 u64_stats_inc(&stats->drops);
2341 return NULL;
2342 }
2343
virtnet_skb_append_frag(struct sk_buff * head_skb,struct sk_buff * curr_skb,struct page * page,void * buf,int len,int truesize)2344 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
2345 struct sk_buff *curr_skb,
2346 struct page *page, void *buf,
2347 int len, int truesize)
2348 {
2349 int num_skb_frags;
2350 int offset;
2351
2352 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2353 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
2354 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
2355
2356 if (unlikely(!nskb))
2357 return NULL;
2358
2359 if (curr_skb == head_skb)
2360 skb_shinfo(curr_skb)->frag_list = nskb;
2361 else
2362 curr_skb->next = nskb;
2363 curr_skb = nskb;
2364 head_skb->truesize += nskb->truesize;
2365 num_skb_frags = 0;
2366 }
2367
2368 if (curr_skb != head_skb) {
2369 head_skb->data_len += len;
2370 head_skb->len += len;
2371 head_skb->truesize += truesize;
2372 }
2373
2374 offset = buf - page_address(page);
2375 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
2376 put_page(page);
2377 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
2378 len, truesize);
2379 } else {
2380 skb_add_rx_frag(curr_skb, num_skb_frags, page,
2381 offset, len, truesize);
2382 }
2383
2384 return curr_skb;
2385 }
2386
receive_mergeable(struct net_device * dev,struct virtnet_info * vi,struct receive_queue * rq,void * buf,void * ctx,unsigned int len,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2387 static struct sk_buff *receive_mergeable(struct net_device *dev,
2388 struct virtnet_info *vi,
2389 struct receive_queue *rq,
2390 void *buf,
2391 void *ctx,
2392 unsigned int len,
2393 unsigned int *xdp_xmit,
2394 struct virtnet_rq_stats *stats)
2395 {
2396 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2397 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2398 struct page *page = virt_to_head_page(buf);
2399 int offset = buf - page_address(page);
2400 struct sk_buff *head_skb, *curr_skb;
2401 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2402 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2403 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2404 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2405
2406 head_skb = NULL;
2407 u64_stats_add(&stats->bytes, len - vi->hdr_len);
2408
2409 if (unlikely(len > truesize - room)) {
2410 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2411 dev->name, len, (unsigned long)(truesize - room));
2412 DEV_STATS_INC(dev, rx_length_errors);
2413 goto err_skb;
2414 }
2415
2416 if (unlikely(vi->xdp_enabled)) {
2417 struct bpf_prog *xdp_prog;
2418
2419 rcu_read_lock();
2420 xdp_prog = rcu_dereference(rq->xdp_prog);
2421 if (xdp_prog) {
2422 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
2423 len, xdp_xmit, stats);
2424 rcu_read_unlock();
2425 return head_skb;
2426 }
2427 rcu_read_unlock();
2428 }
2429
2430 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
2431 curr_skb = head_skb;
2432
2433 if (unlikely(!curr_skb))
2434 goto err_skb;
2435 while (--num_buf) {
2436 buf = virtnet_rq_get_buf(rq, &len, &ctx);
2437 if (unlikely(!buf)) {
2438 pr_debug("%s: rx error: %d buffers out of %d missing\n",
2439 dev->name, num_buf,
2440 virtio16_to_cpu(vi->vdev,
2441 hdr->num_buffers));
2442 DEV_STATS_INC(dev, rx_length_errors);
2443 goto err_buf;
2444 }
2445
2446 u64_stats_add(&stats->bytes, len);
2447 page = virt_to_head_page(buf);
2448
2449 truesize = mergeable_ctx_to_truesize(ctx);
2450 headroom = mergeable_ctx_to_headroom(ctx);
2451 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2452 room = SKB_DATA_ALIGN(headroom + tailroom);
2453 if (unlikely(len > truesize - room)) {
2454 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2455 dev->name, len, (unsigned long)(truesize - room));
2456 DEV_STATS_INC(dev, rx_length_errors);
2457 goto err_skb;
2458 }
2459
2460 curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
2461 buf, len, truesize);
2462 if (!curr_skb)
2463 goto err_skb;
2464 }
2465
2466 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
2467 return head_skb;
2468
2469 err_skb:
2470 put_page(page);
2471 mergeable_buf_free(rq, num_buf, dev, stats);
2472
2473 err_buf:
2474 u64_stats_inc(&stats->drops);
2475 dev_kfree_skb(head_skb);
2476 return NULL;
2477 }
2478
virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash * hdr_hash,struct sk_buff * skb)2479 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
2480 struct sk_buff *skb)
2481 {
2482 enum pkt_hash_types rss_hash_type;
2483
2484 if (!hdr_hash || !skb)
2485 return;
2486
2487 switch (__le16_to_cpu(hdr_hash->hash_report)) {
2488 case VIRTIO_NET_HASH_REPORT_TCPv4:
2489 case VIRTIO_NET_HASH_REPORT_UDPv4:
2490 case VIRTIO_NET_HASH_REPORT_TCPv6:
2491 case VIRTIO_NET_HASH_REPORT_UDPv6:
2492 case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
2493 case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
2494 rss_hash_type = PKT_HASH_TYPE_L4;
2495 break;
2496 case VIRTIO_NET_HASH_REPORT_IPv4:
2497 case VIRTIO_NET_HASH_REPORT_IPv6:
2498 case VIRTIO_NET_HASH_REPORT_IPv6_EX:
2499 rss_hash_type = PKT_HASH_TYPE_L3;
2500 break;
2501 case VIRTIO_NET_HASH_REPORT_NONE:
2502 default:
2503 rss_hash_type = PKT_HASH_TYPE_NONE;
2504 }
2505 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
2506 }
2507
virtnet_receive_done(struct virtnet_info * vi,struct receive_queue * rq,struct sk_buff * skb,u8 flags)2508 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
2509 struct sk_buff *skb, u8 flags)
2510 {
2511 struct virtio_net_common_hdr *hdr;
2512 struct net_device *dev = vi->dev;
2513
2514 hdr = skb_vnet_common_hdr(skb);
2515 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
2516 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
2517
2518 if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
2519 skb->ip_summed = CHECKSUM_UNNECESSARY;
2520
2521 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
2522 virtio_is_little_endian(vi->vdev))) {
2523 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
2524 dev->name, hdr->hdr.gso_type,
2525 hdr->hdr.gso_size);
2526 goto frame_err;
2527 }
2528
2529 skb_record_rx_queue(skb, vq2rxq(rq->vq));
2530 skb->protocol = eth_type_trans(skb, dev);
2531 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
2532 ntohs(skb->protocol), skb->len, skb->pkt_type);
2533
2534 napi_gro_receive(&rq->napi, skb);
2535 return;
2536
2537 frame_err:
2538 DEV_STATS_INC(dev, rx_frame_errors);
2539 dev_kfree_skb(skb);
2540 }
2541
receive_buf(struct virtnet_info * vi,struct receive_queue * rq,void * buf,unsigned int len,void ** ctx,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2542 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
2543 void *buf, unsigned int len, void **ctx,
2544 unsigned int *xdp_xmit,
2545 struct virtnet_rq_stats *stats)
2546 {
2547 struct net_device *dev = vi->dev;
2548 struct sk_buff *skb;
2549 u8 flags;
2550
2551 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
2552 pr_debug("%s: short packet %i\n", dev->name, len);
2553 DEV_STATS_INC(dev, rx_length_errors);
2554 virtnet_rq_free_buf(vi, rq, buf);
2555 return;
2556 }
2557
2558 /* 1. Save the flags early, as the XDP program might overwrite them.
2559 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
2560 * stay valid after XDP processing.
2561 * 2. XDP doesn't work with partially checksummed packets (refer to
2562 * virtnet_xdp_set()), so packets marked as
2563 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
2564 */
2565 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
2566
2567 if (vi->mergeable_rx_bufs)
2568 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
2569 stats);
2570 else if (vi->big_packets)
2571 skb = receive_big(dev, vi, rq, buf, len, stats);
2572 else
2573 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
2574
2575 if (unlikely(!skb))
2576 return;
2577
2578 virtnet_receive_done(vi, rq, skb, flags);
2579 }
2580
2581 /* Unlike mergeable buffers, all buffers are allocated to the
2582 * same size, except for the headroom. For this reason we do
2583 * not need to use mergeable_len_to_ctx here - it is enough
2584 * to store the headroom as the context ignoring the truesize.
2585 */
add_recvbuf_small(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2586 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2587 gfp_t gfp)
2588 {
2589 char *buf;
2590 unsigned int xdp_headroom = virtnet_get_headroom(vi);
2591 void *ctx = (void *)(unsigned long)xdp_headroom;
2592 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
2593 int err;
2594
2595 len = SKB_DATA_ALIGN(len) +
2596 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2597
2598 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
2599 return -ENOMEM;
2600
2601 buf = virtnet_rq_alloc(rq, len, gfp);
2602 if (unlikely(!buf))
2603 return -ENOMEM;
2604
2605 buf += VIRTNET_RX_PAD + xdp_headroom;
2606
2607 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
2608
2609 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2610 if (err < 0) {
2611 virtnet_rq_unmap(rq, buf, 0);
2612 put_page(virt_to_head_page(buf));
2613 }
2614
2615 return err;
2616 }
2617
add_recvbuf_big(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2618 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2619 gfp_t gfp)
2620 {
2621 struct page *first, *list = NULL;
2622 char *p;
2623 int i, err, offset;
2624
2625 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2626
2627 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2628 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
2629 first = get_a_page(rq, gfp);
2630 if (!first) {
2631 if (list)
2632 give_pages(rq, list);
2633 return -ENOMEM;
2634 }
2635 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2636
2637 /* chain new page in list head to match sg */
2638 first->private = (unsigned long)list;
2639 list = first;
2640 }
2641
2642 first = get_a_page(rq, gfp);
2643 if (!first) {
2644 give_pages(rq, list);
2645 return -ENOMEM;
2646 }
2647 p = page_address(first);
2648
2649 /* rq->sg[0], rq->sg[1] share the same page */
2650 /* a separated rq->sg[0] for header - required in case !any_header_sg */
2651 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2652
2653 /* rq->sg[1] for data packet, from offset */
2654 offset = sizeof(struct padded_vnet_hdr);
2655 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2656
2657 /* chain first in list head */
2658 first->private = (unsigned long)list;
2659 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2660 first, gfp);
2661 if (err < 0)
2662 give_pages(rq, first);
2663
2664 return err;
2665 }
2666
get_mergeable_buf_len(struct receive_queue * rq,struct ewma_pkt_len * avg_pkt_len,unsigned int room)2667 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2668 struct ewma_pkt_len *avg_pkt_len,
2669 unsigned int room)
2670 {
2671 struct virtnet_info *vi = rq->vq->vdev->priv;
2672 const size_t hdr_len = vi->hdr_len;
2673 unsigned int len;
2674
2675 if (room)
2676 return PAGE_SIZE - room;
2677
2678 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
2679 rq->min_buf_len, PAGE_SIZE - hdr_len);
2680
2681 return ALIGN(len, L1_CACHE_BYTES);
2682 }
2683
add_recvbuf_mergeable(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2684 static int add_recvbuf_mergeable(struct virtnet_info *vi,
2685 struct receive_queue *rq, gfp_t gfp)
2686 {
2687 struct page_frag *alloc_frag = &rq->alloc_frag;
2688 unsigned int headroom = virtnet_get_headroom(vi);
2689 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2690 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2691 unsigned int len, hole;
2692 void *ctx;
2693 char *buf;
2694 int err;
2695
2696 /* Extra tailroom is needed to satisfy XDP's assumption. This
2697 * means rx frags coalescing won't work, but consider we've
2698 * disabled GSO for XDP, it won't be a big issue.
2699 */
2700 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2701
2702 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
2703 return -ENOMEM;
2704
2705 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
2706 len -= sizeof(struct virtnet_rq_dma);
2707
2708 buf = virtnet_rq_alloc(rq, len + room, gfp);
2709 if (unlikely(!buf))
2710 return -ENOMEM;
2711
2712 buf += headroom; /* advance address leaving hole at front of pkt */
2713 hole = alloc_frag->size - alloc_frag->offset;
2714 if (hole < len + room) {
2715 /* To avoid internal fragmentation, if there is very likely not
2716 * enough space for another buffer, add the remaining space to
2717 * the current buffer.
2718 * XDP core assumes that frame_size of xdp_buff and the length
2719 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
2720 */
2721 if (!headroom)
2722 len += hole;
2723 alloc_frag->offset += hole;
2724 }
2725
2726 virtnet_rq_init_one_sg(rq, buf, len);
2727
2728 ctx = mergeable_len_to_ctx(len + room, headroom);
2729 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2730 if (err < 0) {
2731 virtnet_rq_unmap(rq, buf, 0);
2732 put_page(virt_to_head_page(buf));
2733 }
2734
2735 return err;
2736 }
2737
2738 /*
2739 * Returns false if we couldn't fill entirely (OOM).
2740 *
2741 * Normally run in the receive path, but can also be run from ndo_open
2742 * before we're receiving packets, or from refill_work which is
2743 * careful to disable receiving (using napi_disable).
2744 */
try_fill_recv(struct virtnet_info * vi,struct receive_queue * rq,gfp_t gfp)2745 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2746 gfp_t gfp)
2747 {
2748 int err;
2749
2750 if (rq->xsk_pool) {
2751 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
2752 goto kick;
2753 }
2754
2755 do {
2756 if (vi->mergeable_rx_bufs)
2757 err = add_recvbuf_mergeable(vi, rq, gfp);
2758 else if (vi->big_packets)
2759 err = add_recvbuf_big(vi, rq, gfp);
2760 else
2761 err = add_recvbuf_small(vi, rq, gfp);
2762
2763 if (err)
2764 break;
2765 } while (rq->vq->num_free);
2766
2767 kick:
2768 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2769 unsigned long flags;
2770
2771 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2772 u64_stats_inc(&rq->stats.kicks);
2773 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2774 }
2775
2776 return err != -ENOMEM;
2777 }
2778
skb_recv_done(struct virtqueue * rvq)2779 static void skb_recv_done(struct virtqueue *rvq)
2780 {
2781 struct virtnet_info *vi = rvq->vdev->priv;
2782 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2783
2784 rq->calls++;
2785 virtqueue_napi_schedule(&rq->napi, rvq);
2786 }
2787
virtnet_napi_do_enable(struct virtqueue * vq,struct napi_struct * napi)2788 static void virtnet_napi_do_enable(struct virtqueue *vq,
2789 struct napi_struct *napi)
2790 {
2791 napi_enable(napi);
2792
2793 /* If all buffers were filled by other side before we napi_enabled, we
2794 * won't get another interrupt, so process any outstanding packets now.
2795 * Call local_bh_enable after to trigger softIRQ processing.
2796 */
2797 local_bh_disable();
2798 virtqueue_napi_schedule(napi, vq);
2799 local_bh_enable();
2800 }
2801
virtnet_napi_enable(struct receive_queue * rq)2802 static void virtnet_napi_enable(struct receive_queue *rq)
2803 {
2804 struct virtnet_info *vi = rq->vq->vdev->priv;
2805 int qidx = vq2rxq(rq->vq);
2806
2807 virtnet_napi_do_enable(rq->vq, &rq->napi);
2808 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
2809 }
2810
virtnet_napi_tx_enable(struct send_queue * sq)2811 static void virtnet_napi_tx_enable(struct send_queue *sq)
2812 {
2813 struct virtnet_info *vi = sq->vq->vdev->priv;
2814 struct napi_struct *napi = &sq->napi;
2815 int qidx = vq2txq(sq->vq);
2816
2817 if (!napi->weight)
2818 return;
2819
2820 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2821 * enable the feature if this is likely affine with the transmit path.
2822 */
2823 if (!vi->affinity_hint_set) {
2824 napi->weight = 0;
2825 return;
2826 }
2827
2828 virtnet_napi_do_enable(sq->vq, napi);
2829 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
2830 }
2831
virtnet_napi_tx_disable(struct send_queue * sq)2832 static void virtnet_napi_tx_disable(struct send_queue *sq)
2833 {
2834 struct virtnet_info *vi = sq->vq->vdev->priv;
2835 struct napi_struct *napi = &sq->napi;
2836 int qidx = vq2txq(sq->vq);
2837
2838 if (napi->weight) {
2839 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
2840 napi_disable(napi);
2841 }
2842 }
2843
virtnet_napi_disable(struct receive_queue * rq)2844 static void virtnet_napi_disable(struct receive_queue *rq)
2845 {
2846 struct virtnet_info *vi = rq->vq->vdev->priv;
2847 struct napi_struct *napi = &rq->napi;
2848 int qidx = vq2rxq(rq->vq);
2849
2850 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
2851 napi_disable(napi);
2852 }
2853
refill_work(struct work_struct * work)2854 static void refill_work(struct work_struct *work)
2855 {
2856 struct virtnet_info *vi =
2857 container_of(work, struct virtnet_info, refill.work);
2858 bool still_empty;
2859 int i;
2860
2861 for (i = 0; i < vi->curr_queue_pairs; i++) {
2862 struct receive_queue *rq = &vi->rq[i];
2863
2864 /*
2865 * When queue API support is added in the future and the call
2866 * below becomes napi_disable_locked, this driver will need to
2867 * be refactored.
2868 *
2869 * One possible solution would be to:
2870 * - cancel refill_work with cancel_delayed_work (note:
2871 * non-sync)
2872 * - cancel refill_work with cancel_delayed_work_sync in
2873 * virtnet_remove after the netdev is unregistered
2874 * - wrap all of the work in a lock (perhaps the netdev
2875 * instance lock)
2876 * - check netif_running() and return early to avoid a race
2877 */
2878 napi_disable(&rq->napi);
2879 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2880 virtnet_napi_do_enable(rq->vq, &rq->napi);
2881
2882 /* In theory, this can happen: if we don't get any buffers in
2883 * we will *never* try to fill again.
2884 */
2885 if (still_empty)
2886 schedule_delayed_work(&vi->refill, HZ/2);
2887 }
2888 }
2889
virtnet_receive_xsk_bufs(struct virtnet_info * vi,struct receive_queue * rq,int budget,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2890 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
2891 struct receive_queue *rq,
2892 int budget,
2893 unsigned int *xdp_xmit,
2894 struct virtnet_rq_stats *stats)
2895 {
2896 unsigned int len;
2897 int packets = 0;
2898 void *buf;
2899
2900 while (packets < budget) {
2901 buf = virtqueue_get_buf(rq->vq, &len);
2902 if (!buf)
2903 break;
2904
2905 virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
2906 packets++;
2907 }
2908
2909 return packets;
2910 }
2911
virtnet_receive_packets(struct virtnet_info * vi,struct receive_queue * rq,int budget,unsigned int * xdp_xmit,struct virtnet_rq_stats * stats)2912 static int virtnet_receive_packets(struct virtnet_info *vi,
2913 struct receive_queue *rq,
2914 int budget,
2915 unsigned int *xdp_xmit,
2916 struct virtnet_rq_stats *stats)
2917 {
2918 unsigned int len;
2919 int packets = 0;
2920 void *buf;
2921
2922 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2923 void *ctx;
2924 while (packets < budget &&
2925 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2926 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
2927 packets++;
2928 }
2929 } else {
2930 while (packets < budget &&
2931 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2932 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
2933 packets++;
2934 }
2935 }
2936
2937 return packets;
2938 }
2939
virtnet_receive(struct receive_queue * rq,int budget,unsigned int * xdp_xmit)2940 static int virtnet_receive(struct receive_queue *rq, int budget,
2941 unsigned int *xdp_xmit)
2942 {
2943 struct virtnet_info *vi = rq->vq->vdev->priv;
2944 struct virtnet_rq_stats stats = {};
2945 int i, packets;
2946
2947 if (rq->xsk_pool)
2948 packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
2949 else
2950 packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
2951
2952 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2953 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2954 spin_lock(&vi->refill_lock);
2955 if (vi->refill_enabled)
2956 schedule_delayed_work(&vi->refill, 0);
2957 spin_unlock(&vi->refill_lock);
2958 }
2959 }
2960
2961 u64_stats_set(&stats.packets, packets);
2962 u64_stats_update_begin(&rq->stats.syncp);
2963 for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
2964 size_t offset = virtnet_rq_stats_desc[i].offset;
2965 u64_stats_t *item, *src;
2966
2967 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2968 src = (u64_stats_t *)((u8 *)&stats + offset);
2969 u64_stats_add(item, u64_stats_read(src));
2970 }
2971
2972 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
2973 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
2974
2975 u64_stats_update_end(&rq->stats.syncp);
2976
2977 return packets;
2978 }
2979
virtnet_poll_cleantx(struct receive_queue * rq,int budget)2980 static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
2981 {
2982 struct virtnet_info *vi = rq->vq->vdev->priv;
2983 unsigned int index = vq2rxq(rq->vq);
2984 struct send_queue *sq = &vi->sq[index];
2985 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2986
2987 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2988 return;
2989
2990 if (__netif_tx_trylock(txq)) {
2991 if (sq->reset) {
2992 __netif_tx_unlock(txq);
2993 return;
2994 }
2995
2996 do {
2997 virtqueue_disable_cb(sq->vq);
2998 free_old_xmit(sq, txq, !!budget);
2999 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
3000
3001 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
3002 if (netif_tx_queue_stopped(txq)) {
3003 u64_stats_update_begin(&sq->stats.syncp);
3004 u64_stats_inc(&sq->stats.wake);
3005 u64_stats_update_end(&sq->stats.syncp);
3006 }
3007 netif_tx_wake_queue(txq);
3008 }
3009
3010 __netif_tx_unlock(txq);
3011 }
3012 }
3013
virtnet_rx_dim_update(struct virtnet_info * vi,struct receive_queue * rq)3014 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
3015 {
3016 struct dim_sample cur_sample = {};
3017
3018 if (!rq->packets_in_napi)
3019 return;
3020
3021 /* Don't need protection when fetching stats, since fetcher and
3022 * updater of the stats are in same context
3023 */
3024 dim_update_sample(rq->calls,
3025 u64_stats_read(&rq->stats.packets),
3026 u64_stats_read(&rq->stats.bytes),
3027 &cur_sample);
3028
3029 net_dim(&rq->dim, &cur_sample);
3030 rq->packets_in_napi = 0;
3031 }
3032
virtnet_poll(struct napi_struct * napi,int budget)3033 static int virtnet_poll(struct napi_struct *napi, int budget)
3034 {
3035 struct receive_queue *rq =
3036 container_of(napi, struct receive_queue, napi);
3037 struct virtnet_info *vi = rq->vq->vdev->priv;
3038 struct send_queue *sq;
3039 unsigned int received;
3040 unsigned int xdp_xmit = 0;
3041 bool napi_complete;
3042
3043 virtnet_poll_cleantx(rq, budget);
3044
3045 received = virtnet_receive(rq, budget, &xdp_xmit);
3046 rq->packets_in_napi += received;
3047
3048 if (xdp_xmit & VIRTIO_XDP_REDIR)
3049 xdp_do_flush();
3050
3051 /* Out of packets? */
3052 if (received < budget) {
3053 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
3054 /* Intentionally not taking dim_lock here. This may result in a
3055 * spurious net_dim call. But if that happens virtnet_rx_dim_work
3056 * will not act on the scheduled work.
3057 */
3058 if (napi_complete && rq->dim_enabled)
3059 virtnet_rx_dim_update(vi, rq);
3060 }
3061
3062 if (xdp_xmit & VIRTIO_XDP_TX) {
3063 sq = virtnet_xdp_get_sq(vi);
3064 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3065 u64_stats_update_begin(&sq->stats.syncp);
3066 u64_stats_inc(&sq->stats.kicks);
3067 u64_stats_update_end(&sq->stats.syncp);
3068 }
3069 virtnet_xdp_put_sq(vi, sq);
3070 }
3071
3072 return received;
3073 }
3074
virtnet_disable_queue_pair(struct virtnet_info * vi,int qp_index)3075 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
3076 {
3077 virtnet_napi_tx_disable(&vi->sq[qp_index]);
3078 virtnet_napi_disable(&vi->rq[qp_index]);
3079 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3080 }
3081
virtnet_enable_queue_pair(struct virtnet_info * vi,int qp_index)3082 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
3083 {
3084 struct net_device *dev = vi->dev;
3085 int err;
3086
3087 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
3088 vi->rq[qp_index].napi.napi_id);
3089 if (err < 0)
3090 return err;
3091
3092 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
3093 MEM_TYPE_PAGE_SHARED, NULL);
3094 if (err < 0)
3095 goto err_xdp_reg_mem_model;
3096
3097 virtnet_napi_enable(&vi->rq[qp_index]);
3098 virtnet_napi_tx_enable(&vi->sq[qp_index]);
3099
3100 return 0;
3101
3102 err_xdp_reg_mem_model:
3103 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3104 return err;
3105 }
3106
virtnet_cancel_dim(struct virtnet_info * vi,struct dim * dim)3107 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
3108 {
3109 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3110 return;
3111 net_dim_work_cancel(dim);
3112 }
3113
virtnet_update_settings(struct virtnet_info * vi)3114 static void virtnet_update_settings(struct virtnet_info *vi)
3115 {
3116 u32 speed;
3117 u8 duplex;
3118
3119 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3120 return;
3121
3122 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3123
3124 if (ethtool_validate_speed(speed))
3125 vi->speed = speed;
3126
3127 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3128
3129 if (ethtool_validate_duplex(duplex))
3130 vi->duplex = duplex;
3131 }
3132
virtnet_open(struct net_device * dev)3133 static int virtnet_open(struct net_device *dev)
3134 {
3135 struct virtnet_info *vi = netdev_priv(dev);
3136 int i, err;
3137
3138 enable_delayed_refill(vi);
3139
3140 for (i = 0; i < vi->max_queue_pairs; i++) {
3141 if (i < vi->curr_queue_pairs)
3142 /* Make sure we have some buffers: if oom use wq. */
3143 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
3144 schedule_delayed_work(&vi->refill, 0);
3145
3146 err = virtnet_enable_queue_pair(vi, i);
3147 if (err < 0)
3148 goto err_enable_qp;
3149 }
3150
3151 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3152 if (vi->status & VIRTIO_NET_S_LINK_UP)
3153 netif_carrier_on(vi->dev);
3154 virtio_config_driver_enable(vi->vdev);
3155 } else {
3156 vi->status = VIRTIO_NET_S_LINK_UP;
3157 netif_carrier_on(dev);
3158 }
3159
3160 return 0;
3161
3162 err_enable_qp:
3163 disable_delayed_refill(vi);
3164 cancel_delayed_work_sync(&vi->refill);
3165
3166 for (i--; i >= 0; i--) {
3167 virtnet_disable_queue_pair(vi, i);
3168 virtnet_cancel_dim(vi, &vi->rq[i].dim);
3169 }
3170
3171 return err;
3172 }
3173
virtnet_poll_tx(struct napi_struct * napi,int budget)3174 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
3175 {
3176 struct send_queue *sq = container_of(napi, struct send_queue, napi);
3177 struct virtnet_info *vi = sq->vq->vdev->priv;
3178 unsigned int index = vq2txq(sq->vq);
3179 struct netdev_queue *txq;
3180 int opaque, xsk_done = 0;
3181 bool done;
3182
3183 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
3184 /* We don't need to enable cb for XDP */
3185 napi_complete_done(napi, 0);
3186 return 0;
3187 }
3188
3189 txq = netdev_get_tx_queue(vi->dev, index);
3190 __netif_tx_lock(txq, raw_smp_processor_id());
3191 virtqueue_disable_cb(sq->vq);
3192
3193 if (sq->xsk_pool)
3194 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
3195 else
3196 free_old_xmit(sq, txq, !!budget);
3197
3198 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
3199 if (netif_tx_queue_stopped(txq)) {
3200 u64_stats_update_begin(&sq->stats.syncp);
3201 u64_stats_inc(&sq->stats.wake);
3202 u64_stats_update_end(&sq->stats.syncp);
3203 }
3204 netif_tx_wake_queue(txq);
3205 }
3206
3207 if (xsk_done >= budget) {
3208 __netif_tx_unlock(txq);
3209 return budget;
3210 }
3211
3212 opaque = virtqueue_enable_cb_prepare(sq->vq);
3213
3214 done = napi_complete_done(napi, 0);
3215
3216 if (!done)
3217 virtqueue_disable_cb(sq->vq);
3218
3219 __netif_tx_unlock(txq);
3220
3221 if (done) {
3222 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
3223 if (napi_schedule_prep(napi)) {
3224 __netif_tx_lock(txq, raw_smp_processor_id());
3225 virtqueue_disable_cb(sq->vq);
3226 __netif_tx_unlock(txq);
3227 __napi_schedule(napi);
3228 }
3229 }
3230 }
3231
3232 return 0;
3233 }
3234
xmit_skb(struct send_queue * sq,struct sk_buff * skb,bool orphan)3235 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
3236 {
3237 struct virtio_net_hdr_mrg_rxbuf *hdr;
3238 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
3239 struct virtnet_info *vi = sq->vq->vdev->priv;
3240 int num_sg;
3241 unsigned hdr_len = vi->hdr_len;
3242 bool can_push;
3243
3244 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
3245
3246 can_push = vi->any_header_sg &&
3247 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
3248 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
3249 /* Even if we can, don't push here yet as this would skew
3250 * csum_start offset below. */
3251 if (can_push)
3252 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
3253 else
3254 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
3255
3256 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
3257 virtio_is_little_endian(vi->vdev), false,
3258 0))
3259 return -EPROTO;
3260
3261 if (vi->mergeable_rx_bufs)
3262 hdr->num_buffers = 0;
3263
3264 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
3265 if (can_push) {
3266 __skb_push(skb, hdr_len);
3267 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
3268 if (unlikely(num_sg < 0))
3269 return num_sg;
3270 /* Pull header back to avoid skew in tx bytes calculations. */
3271 __skb_pull(skb, hdr_len);
3272 } else {
3273 sg_set_buf(sq->sg, hdr, hdr_len);
3274 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
3275 if (unlikely(num_sg < 0))
3276 return num_sg;
3277 num_sg++;
3278 }
3279
3280 return virtnet_add_outbuf(sq, num_sg, skb,
3281 orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
3282 }
3283
start_xmit(struct sk_buff * skb,struct net_device * dev)3284 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
3285 {
3286 struct virtnet_info *vi = netdev_priv(dev);
3287 int qnum = skb_get_queue_mapping(skb);
3288 struct send_queue *sq = &vi->sq[qnum];
3289 int err;
3290 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
3291 bool xmit_more = netdev_xmit_more();
3292 bool use_napi = sq->napi.weight;
3293 bool kick;
3294
3295 if (!use_napi)
3296 free_old_xmit(sq, txq, false);
3297 else
3298 virtqueue_disable_cb(sq->vq);
3299
3300 /* timestamp packet in software */
3301 skb_tx_timestamp(skb);
3302
3303 /* Try to transmit */
3304 err = xmit_skb(sq, skb, !use_napi);
3305
3306 /* This should not happen! */
3307 if (unlikely(err)) {
3308 DEV_STATS_INC(dev, tx_fifo_errors);
3309 if (net_ratelimit())
3310 dev_warn(&dev->dev,
3311 "Unexpected TXQ (%d) queue failure: %d\n",
3312 qnum, err);
3313 DEV_STATS_INC(dev, tx_dropped);
3314 dev_kfree_skb_any(skb);
3315 return NETDEV_TX_OK;
3316 }
3317
3318 /* Don't wait up for transmitted skbs to be freed. */
3319 if (!use_napi) {
3320 skb_orphan(skb);
3321 nf_reset_ct(skb);
3322 }
3323
3324 if (use_napi)
3325 tx_may_stop(vi, dev, sq);
3326 else
3327 check_sq_full_and_disable(vi, dev,sq);
3328
3329 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
3330 !xmit_more || netif_xmit_stopped(txq);
3331 if (kick) {
3332 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3333 u64_stats_update_begin(&sq->stats.syncp);
3334 u64_stats_inc(&sq->stats.kicks);
3335 u64_stats_update_end(&sq->stats.syncp);
3336 }
3337 }
3338
3339 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
3340 virtqueue_napi_schedule(&sq->napi, sq->vq);
3341
3342 return NETDEV_TX_OK;
3343 }
3344
virtnet_rx_pause(struct virtnet_info * vi,struct receive_queue * rq)3345 static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
3346 {
3347 bool running = netif_running(vi->dev);
3348
3349 if (running) {
3350 virtnet_napi_disable(rq);
3351 virtnet_cancel_dim(vi, &rq->dim);
3352 }
3353 }
3354
virtnet_rx_resume(struct virtnet_info * vi,struct receive_queue * rq)3355 static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
3356 {
3357 bool running = netif_running(vi->dev);
3358
3359 if (!try_fill_recv(vi, rq, GFP_KERNEL))
3360 schedule_delayed_work(&vi->refill, 0);
3361
3362 if (running)
3363 virtnet_napi_enable(rq);
3364 }
3365
virtnet_rx_resize(struct virtnet_info * vi,struct receive_queue * rq,u32 ring_num)3366 static int virtnet_rx_resize(struct virtnet_info *vi,
3367 struct receive_queue *rq, u32 ring_num)
3368 {
3369 int err, qindex;
3370
3371 qindex = rq - vi->rq;
3372
3373 virtnet_rx_pause(vi, rq);
3374
3375 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
3376 if (err)
3377 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
3378
3379 virtnet_rx_resume(vi, rq);
3380 return err;
3381 }
3382
virtnet_tx_pause(struct virtnet_info * vi,struct send_queue * sq)3383 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
3384 {
3385 bool running = netif_running(vi->dev);
3386 struct netdev_queue *txq;
3387 int qindex;
3388
3389 qindex = sq - vi->sq;
3390
3391 if (running)
3392 virtnet_napi_tx_disable(sq);
3393
3394 txq = netdev_get_tx_queue(vi->dev, qindex);
3395
3396 /* 1. wait all ximt complete
3397 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
3398 */
3399 __netif_tx_lock_bh(txq);
3400
3401 /* Prevent rx poll from accessing sq. */
3402 sq->reset = true;
3403
3404 /* Prevent the upper layer from trying to send packets. */
3405 netif_stop_subqueue(vi->dev, qindex);
3406
3407 __netif_tx_unlock_bh(txq);
3408 }
3409
virtnet_tx_resume(struct virtnet_info * vi,struct send_queue * sq)3410 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
3411 {
3412 bool running = netif_running(vi->dev);
3413 struct netdev_queue *txq;
3414 int qindex;
3415
3416 qindex = sq - vi->sq;
3417
3418 txq = netdev_get_tx_queue(vi->dev, qindex);
3419
3420 __netif_tx_lock_bh(txq);
3421 sq->reset = false;
3422 netif_tx_wake_queue(txq);
3423 __netif_tx_unlock_bh(txq);
3424
3425 if (running)
3426 virtnet_napi_tx_enable(sq);
3427 }
3428
virtnet_tx_resize(struct virtnet_info * vi,struct send_queue * sq,u32 ring_num)3429 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
3430 u32 ring_num)
3431 {
3432 int qindex, err;
3433
3434 qindex = sq - vi->sq;
3435
3436 virtnet_tx_pause(vi, sq);
3437
3438 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
3439 virtnet_sq_free_unused_buf_done);
3440 if (err)
3441 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
3442
3443 virtnet_tx_resume(vi, sq);
3444
3445 return err;
3446 }
3447
3448 /*
3449 * Send command via the control virtqueue and check status. Commands
3450 * supported by the hypervisor, as indicated by feature bits, should
3451 * never fail unless improperly formatted.
3452 */
virtnet_send_command_reply(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out,struct scatterlist * in)3453 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
3454 struct scatterlist *out,
3455 struct scatterlist *in)
3456 {
3457 struct scatterlist *sgs[5], hdr, stat;
3458 u32 out_num = 0, tmp, in_num = 0;
3459 bool ok;
3460 int ret;
3461
3462 /* Caller should know better */
3463 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
3464
3465 mutex_lock(&vi->cvq_lock);
3466 vi->ctrl->status = ~0;
3467 vi->ctrl->hdr.class = class;
3468 vi->ctrl->hdr.cmd = cmd;
3469 /* Add header */
3470 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
3471 sgs[out_num++] = &hdr;
3472
3473 if (out)
3474 sgs[out_num++] = out;
3475
3476 /* Add return status. */
3477 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
3478 sgs[out_num + in_num++] = &stat;
3479
3480 if (in)
3481 sgs[out_num + in_num++] = in;
3482
3483 BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
3484 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
3485 if (ret < 0) {
3486 dev_warn(&vi->vdev->dev,
3487 "Failed to add sgs for command vq: %d\n.", ret);
3488 mutex_unlock(&vi->cvq_lock);
3489 return false;
3490 }
3491
3492 if (unlikely(!virtqueue_kick(vi->cvq)))
3493 goto unlock;
3494
3495 /* Spin for a response, the kick causes an ioport write, trapping
3496 * into the hypervisor, so the request should be handled immediately.
3497 */
3498 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
3499 !virtqueue_is_broken(vi->cvq)) {
3500 cond_resched();
3501 cpu_relax();
3502 }
3503
3504 unlock:
3505 ok = vi->ctrl->status == VIRTIO_NET_OK;
3506 mutex_unlock(&vi->cvq_lock);
3507 return ok;
3508 }
3509
virtnet_send_command(struct virtnet_info * vi,u8 class,u8 cmd,struct scatterlist * out)3510 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3511 struct scatterlist *out)
3512 {
3513 return virtnet_send_command_reply(vi, class, cmd, out, NULL);
3514 }
3515
virtnet_set_mac_address(struct net_device * dev,void * p)3516 static int virtnet_set_mac_address(struct net_device *dev, void *p)
3517 {
3518 struct virtnet_info *vi = netdev_priv(dev);
3519 struct virtio_device *vdev = vi->vdev;
3520 int ret;
3521 struct sockaddr *addr;
3522 struct scatterlist sg;
3523
3524 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3525 return -EOPNOTSUPP;
3526
3527 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
3528 if (!addr)
3529 return -ENOMEM;
3530
3531 ret = eth_prepare_mac_addr_change(dev, addr);
3532 if (ret)
3533 goto out;
3534
3535 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
3536 sg_init_one(&sg, addr->sa_data, dev->addr_len);
3537 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3538 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
3539 dev_warn(&vdev->dev,
3540 "Failed to set mac address by vq command.\n");
3541 ret = -EINVAL;
3542 goto out;
3543 }
3544 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
3545 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3546 unsigned int i;
3547
3548 /* Naturally, this has an atomicity problem. */
3549 for (i = 0; i < dev->addr_len; i++)
3550 virtio_cwrite8(vdev,
3551 offsetof(struct virtio_net_config, mac) +
3552 i, addr->sa_data[i]);
3553 }
3554
3555 eth_commit_mac_addr_change(dev, p);
3556 ret = 0;
3557
3558 out:
3559 kfree(addr);
3560 return ret;
3561 }
3562
virtnet_stats(struct net_device * dev,struct rtnl_link_stats64 * tot)3563 static void virtnet_stats(struct net_device *dev,
3564 struct rtnl_link_stats64 *tot)
3565 {
3566 struct virtnet_info *vi = netdev_priv(dev);
3567 unsigned int start;
3568 int i;
3569
3570 for (i = 0; i < vi->max_queue_pairs; i++) {
3571 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
3572 struct receive_queue *rq = &vi->rq[i];
3573 struct send_queue *sq = &vi->sq[i];
3574
3575 do {
3576 start = u64_stats_fetch_begin(&sq->stats.syncp);
3577 tpackets = u64_stats_read(&sq->stats.packets);
3578 tbytes = u64_stats_read(&sq->stats.bytes);
3579 terrors = u64_stats_read(&sq->stats.tx_timeouts);
3580 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3581
3582 do {
3583 start = u64_stats_fetch_begin(&rq->stats.syncp);
3584 rpackets = u64_stats_read(&rq->stats.packets);
3585 rbytes = u64_stats_read(&rq->stats.bytes);
3586 rdrops = u64_stats_read(&rq->stats.drops);
3587 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3588
3589 tot->rx_packets += rpackets;
3590 tot->tx_packets += tpackets;
3591 tot->rx_bytes += rbytes;
3592 tot->tx_bytes += tbytes;
3593 tot->rx_dropped += rdrops;
3594 tot->tx_errors += terrors;
3595 }
3596
3597 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3598 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
3599 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
3600 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
3601 }
3602
virtnet_ack_link_announce(struct virtnet_info * vi)3603 static void virtnet_ack_link_announce(struct virtnet_info *vi)
3604 {
3605 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
3606 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
3607 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
3608 }
3609
3610 static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3611
virtnet_rss_update_by_qpairs(struct virtnet_info * vi,u16 queue_pairs)3612 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
3613 {
3614 u32 indir_val = 0;
3615 int i = 0;
3616
3617 for (; i < vi->rss_indir_table_size; ++i) {
3618 indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
3619 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
3620 }
3621 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
3622 }
3623
virtnet_set_queues(struct virtnet_info * vi,u16 queue_pairs)3624 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
3625 {
3626 struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
3627 struct virtio_net_rss_config_hdr *old_rss_hdr;
3628 struct virtio_net_rss_config_trailer old_rss_trailer;
3629 struct net_device *dev = vi->dev;
3630 struct scatterlist sg;
3631
3632 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
3633 return 0;
3634
3635 /* Firstly check if we need update rss. Do updating if both (1) rss enabled and
3636 * (2) no user configuration.
3637 *
3638 * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
3639 * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs
3640 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
3641 */
3642 if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
3643 old_rss_hdr = vi->rss_hdr;
3644 old_rss_trailer = vi->rss_trailer;
3645 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
3646 if (!vi->rss_hdr) {
3647 vi->rss_hdr = old_rss_hdr;
3648 return -ENOMEM;
3649 }
3650
3651 *vi->rss_hdr = *old_rss_hdr;
3652 virtnet_rss_update_by_qpairs(vi, queue_pairs);
3653
3654 if (!virtnet_commit_rss_command(vi)) {
3655 /* restore ctrl_rss if commit_rss_command failed */
3656 devm_kfree(&dev->dev, vi->rss_hdr);
3657 vi->rss_hdr = old_rss_hdr;
3658 vi->rss_trailer = old_rss_trailer;
3659
3660 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
3661 queue_pairs);
3662 return -EINVAL;
3663 }
3664 devm_kfree(&dev->dev, old_rss_hdr);
3665 goto succ;
3666 }
3667
3668 mq = kzalloc(sizeof(*mq), GFP_KERNEL);
3669 if (!mq)
3670 return -ENOMEM;
3671
3672 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3673 sg_init_one(&sg, mq, sizeof(*mq));
3674
3675 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3676 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
3677 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
3678 queue_pairs);
3679 return -EINVAL;
3680 }
3681 succ:
3682 vi->curr_queue_pairs = queue_pairs;
3683 /* virtnet_open() will refill when device is going to up. */
3684 if (dev->flags & IFF_UP)
3685 schedule_delayed_work(&vi->refill, 0);
3686
3687 return 0;
3688 }
3689
virtnet_close(struct net_device * dev)3690 static int virtnet_close(struct net_device *dev)
3691 {
3692 struct virtnet_info *vi = netdev_priv(dev);
3693 int i;
3694
3695 /* Make sure NAPI doesn't schedule refill work */
3696 disable_delayed_refill(vi);
3697 /* Make sure refill_work doesn't re-enable napi! */
3698 cancel_delayed_work_sync(&vi->refill);
3699 /* Prevent the config change callback from changing carrier
3700 * after close
3701 */
3702 virtio_config_driver_disable(vi->vdev);
3703 /* Stop getting status/speed updates: we don't care until next
3704 * open
3705 */
3706 cancel_work_sync(&vi->config_work);
3707
3708 for (i = 0; i < vi->max_queue_pairs; i++) {
3709 virtnet_disable_queue_pair(vi, i);
3710 virtnet_cancel_dim(vi, &vi->rq[i].dim);
3711 }
3712
3713 netif_carrier_off(dev);
3714
3715 return 0;
3716 }
3717
virtnet_rx_mode_work(struct work_struct * work)3718 static void virtnet_rx_mode_work(struct work_struct *work)
3719 {
3720 struct virtnet_info *vi =
3721 container_of(work, struct virtnet_info, rx_mode_work);
3722 u8 *promisc_allmulti __free(kfree) = NULL;
3723 struct net_device *dev = vi->dev;
3724 struct scatterlist sg[2];
3725 struct virtio_net_ctrl_mac *mac_data;
3726 struct netdev_hw_addr *ha;
3727 int uc_count;
3728 int mc_count;
3729 void *buf;
3730 int i;
3731
3732 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
3733 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
3734 return;
3735
3736 promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL);
3737 if (!promisc_allmulti) {
3738 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
3739 return;
3740 }
3741
3742 rtnl_lock();
3743
3744 *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
3745 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3746
3747 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3748 VIRTIO_NET_CTRL_RX_PROMISC, sg))
3749 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
3750 *promisc_allmulti ? "en" : "dis");
3751
3752 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
3753 sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3754
3755 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3756 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
3757 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
3758 *promisc_allmulti ? "en" : "dis");
3759
3760 netif_addr_lock_bh(dev);
3761
3762 uc_count = netdev_uc_count(dev);
3763 mc_count = netdev_mc_count(dev);
3764 /* MAC filter - use one buffer for both lists */
3765 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
3766 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
3767 mac_data = buf;
3768 if (!buf) {
3769 netif_addr_unlock_bh(dev);
3770 rtnl_unlock();
3771 return;
3772 }
3773
3774 sg_init_table(sg, 2);
3775
3776 /* Store the unicast list and count in the front of the buffer */
3777 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
3778 i = 0;
3779 netdev_for_each_uc_addr(ha, dev)
3780 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3781
3782 sg_set_buf(&sg[0], mac_data,
3783 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
3784
3785 /* multicast list and count fill the end */
3786 mac_data = (void *)&mac_data->macs[uc_count][0];
3787
3788 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
3789 i = 0;
3790 netdev_for_each_mc_addr(ha, dev)
3791 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3792
3793 netif_addr_unlock_bh(dev);
3794
3795 sg_set_buf(&sg[1], mac_data,
3796 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
3797
3798 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3799 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
3800 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
3801
3802 rtnl_unlock();
3803
3804 kfree(buf);
3805 }
3806
virtnet_set_rx_mode(struct net_device * dev)3807 static void virtnet_set_rx_mode(struct net_device *dev)
3808 {
3809 struct virtnet_info *vi = netdev_priv(dev);
3810
3811 if (vi->rx_mode_work_enabled)
3812 schedule_work(&vi->rx_mode_work);
3813 }
3814
virtnet_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)3815 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
3816 __be16 proto, u16 vid)
3817 {
3818 struct virtnet_info *vi = netdev_priv(dev);
3819 __virtio16 *_vid __free(kfree) = NULL;
3820 struct scatterlist sg;
3821
3822 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3823 if (!_vid)
3824 return -ENOMEM;
3825
3826 *_vid = cpu_to_virtio16(vi->vdev, vid);
3827 sg_init_one(&sg, _vid, sizeof(*_vid));
3828
3829 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3830 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3831 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
3832 return 0;
3833 }
3834
virtnet_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)3835 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3836 __be16 proto, u16 vid)
3837 {
3838 struct virtnet_info *vi = netdev_priv(dev);
3839 __virtio16 *_vid __free(kfree) = NULL;
3840 struct scatterlist sg;
3841
3842 _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3843 if (!_vid)
3844 return -ENOMEM;
3845
3846 *_vid = cpu_to_virtio16(vi->vdev, vid);
3847 sg_init_one(&sg, _vid, sizeof(*_vid));
3848
3849 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3850 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3851 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
3852 return 0;
3853 }
3854
virtnet_clean_affinity(struct virtnet_info * vi)3855 static void virtnet_clean_affinity(struct virtnet_info *vi)
3856 {
3857 int i;
3858
3859 if (vi->affinity_hint_set) {
3860 for (i = 0; i < vi->max_queue_pairs; i++) {
3861 virtqueue_set_affinity(vi->rq[i].vq, NULL);
3862 virtqueue_set_affinity(vi->sq[i].vq, NULL);
3863 }
3864
3865 vi->affinity_hint_set = false;
3866 }
3867 }
3868
virtnet_set_affinity(struct virtnet_info * vi)3869 static void virtnet_set_affinity(struct virtnet_info *vi)
3870 {
3871 cpumask_var_t mask;
3872 int stragglers;
3873 int group_size;
3874 int i, start = 0, cpu;
3875 int num_cpu;
3876 int stride;
3877
3878 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3879 virtnet_clean_affinity(vi);
3880 return;
3881 }
3882
3883 num_cpu = num_online_cpus();
3884 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
3885 stragglers = num_cpu >= vi->curr_queue_pairs ?
3886 num_cpu % vi->curr_queue_pairs :
3887 0;
3888
3889 for (i = 0; i < vi->curr_queue_pairs; i++) {
3890 group_size = stride + (i < stragglers ? 1 : 0);
3891
3892 for_each_online_cpu_wrap(cpu, start) {
3893 if (!group_size--) {
3894 start = cpu;
3895 break;
3896 }
3897 cpumask_set_cpu(cpu, mask);
3898 }
3899
3900 virtqueue_set_affinity(vi->rq[i].vq, mask);
3901 virtqueue_set_affinity(vi->sq[i].vq, mask);
3902 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
3903 cpumask_clear(mask);
3904 }
3905
3906 vi->affinity_hint_set = true;
3907 free_cpumask_var(mask);
3908 }
3909
virtnet_cpu_online(unsigned int cpu,struct hlist_node * node)3910 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
3911 {
3912 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3913 node);
3914 virtnet_set_affinity(vi);
3915 return 0;
3916 }
3917
virtnet_cpu_dead(unsigned int cpu,struct hlist_node * node)3918 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
3919 {
3920 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3921 node_dead);
3922 virtnet_set_affinity(vi);
3923 return 0;
3924 }
3925
virtnet_cpu_down_prep(unsigned int cpu,struct hlist_node * node)3926 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
3927 {
3928 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3929 node);
3930
3931 virtnet_clean_affinity(vi);
3932 return 0;
3933 }
3934
3935 static enum cpuhp_state virtionet_online;
3936
virtnet_cpu_notif_add(struct virtnet_info * vi)3937 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
3938 {
3939 int ret;
3940
3941 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
3942 if (ret)
3943 return ret;
3944 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3945 &vi->node_dead);
3946 if (!ret)
3947 return ret;
3948 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3949 return ret;
3950 }
3951
virtnet_cpu_notif_remove(struct virtnet_info * vi)3952 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
3953 {
3954 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3955 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3956 &vi->node_dead);
3957 }
3958
virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 vqn,u32 max_usecs,u32 max_packets)3959 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3960 u16 vqn, u32 max_usecs, u32 max_packets)
3961 {
3962 struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
3963 struct scatterlist sgs;
3964
3965 coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
3966 if (!coal_vq)
3967 return -ENOMEM;
3968
3969 coal_vq->vqn = cpu_to_le16(vqn);
3970 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
3971 coal_vq->coal.max_packets = cpu_to_le32(max_packets);
3972 sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
3973
3974 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3975 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3976 &sgs))
3977 return -EINVAL;
3978
3979 return 0;
3980 }
3981
virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 queue,u32 max_usecs,u32 max_packets)3982 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3983 u16 queue, u32 max_usecs,
3984 u32 max_packets)
3985 {
3986 int err;
3987
3988 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3989 return -EOPNOTSUPP;
3990
3991 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3992 max_usecs, max_packets);
3993 if (err)
3994 return err;
3995
3996 vi->rq[queue].intr_coal.max_usecs = max_usecs;
3997 vi->rq[queue].intr_coal.max_packets = max_packets;
3998
3999 return 0;
4000 }
4001
virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info * vi,u16 queue,u32 max_usecs,u32 max_packets)4002 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4003 u16 queue, u32 max_usecs,
4004 u32 max_packets)
4005 {
4006 int err;
4007
4008 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4009 return -EOPNOTSUPP;
4010
4011 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
4012 max_usecs, max_packets);
4013 if (err)
4014 return err;
4015
4016 vi->sq[queue].intr_coal.max_usecs = max_usecs;
4017 vi->sq[queue].intr_coal.max_packets = max_packets;
4018
4019 return 0;
4020 }
4021
virtnet_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)4022 static void virtnet_get_ringparam(struct net_device *dev,
4023 struct ethtool_ringparam *ring,
4024 struct kernel_ethtool_ringparam *kernel_ring,
4025 struct netlink_ext_ack *extack)
4026 {
4027 struct virtnet_info *vi = netdev_priv(dev);
4028
4029 ring->rx_max_pending = vi->rq[0].vq->num_max;
4030 ring->tx_max_pending = vi->sq[0].vq->num_max;
4031 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4032 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4033 }
4034
virtnet_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)4035 static int virtnet_set_ringparam(struct net_device *dev,
4036 struct ethtool_ringparam *ring,
4037 struct kernel_ethtool_ringparam *kernel_ring,
4038 struct netlink_ext_ack *extack)
4039 {
4040 struct virtnet_info *vi = netdev_priv(dev);
4041 u32 rx_pending, tx_pending;
4042 struct receive_queue *rq;
4043 struct send_queue *sq;
4044 int i, err;
4045
4046 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
4047 return -EINVAL;
4048
4049 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4050 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4051
4052 if (ring->rx_pending == rx_pending &&
4053 ring->tx_pending == tx_pending)
4054 return 0;
4055
4056 if (ring->rx_pending > vi->rq[0].vq->num_max)
4057 return -EINVAL;
4058
4059 if (ring->tx_pending > vi->sq[0].vq->num_max)
4060 return -EINVAL;
4061
4062 for (i = 0; i < vi->max_queue_pairs; i++) {
4063 rq = vi->rq + i;
4064 sq = vi->sq + i;
4065
4066 if (ring->tx_pending != tx_pending) {
4067 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
4068 if (err)
4069 return err;
4070
4071 /* Upon disabling and re-enabling a transmit virtqueue, the device must
4072 * set the coalescing parameters of the virtqueue to those configured
4073 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
4074 * did not set any TX coalescing parameters, to 0.
4075 */
4076 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
4077 vi->intr_coal_tx.max_usecs,
4078 vi->intr_coal_tx.max_packets);
4079
4080 /* Don't break the tx resize action if the vq coalescing is not
4081 * supported. The same is true for rx resize below.
4082 */
4083 if (err && err != -EOPNOTSUPP)
4084 return err;
4085 }
4086
4087 if (ring->rx_pending != rx_pending) {
4088 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
4089 if (err)
4090 return err;
4091
4092 /* The reason is same as the transmit virtqueue reset */
4093 mutex_lock(&vi->rq[i].dim_lock);
4094 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
4095 vi->intr_coal_rx.max_usecs,
4096 vi->intr_coal_rx.max_packets);
4097 mutex_unlock(&vi->rq[i].dim_lock);
4098 if (err && err != -EOPNOTSUPP)
4099 return err;
4100 }
4101 }
4102
4103 return 0;
4104 }
4105
virtnet_commit_rss_command(struct virtnet_info * vi)4106 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
4107 {
4108 struct net_device *dev = vi->dev;
4109 struct scatterlist sgs[2];
4110
4111 /* prepare sgs */
4112 sg_init_table(sgs, 2);
4113 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
4114 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
4115
4116 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
4117 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
4118 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
4119 goto err;
4120
4121 return true;
4122
4123 err:
4124 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
4125 return false;
4126
4127 }
4128
virtnet_init_default_rss(struct virtnet_info * vi)4129 static void virtnet_init_default_rss(struct virtnet_info *vi)
4130 {
4131 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
4132 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
4133 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
4134 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
4135 vi->rss_hdr->unclassified_queue = 0;
4136
4137 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
4138
4139 vi->rss_trailer.hash_key_length = vi->rss_key_size;
4140
4141 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
4142 }
4143
virtnet_get_hashflow(const struct virtnet_info * vi,struct ethtool_rxnfc * info)4144 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
4145 {
4146 info->data = 0;
4147 switch (info->flow_type) {
4148 case TCP_V4_FLOW:
4149 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
4150 info->data = RXH_IP_SRC | RXH_IP_DST |
4151 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4152 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4153 info->data = RXH_IP_SRC | RXH_IP_DST;
4154 }
4155 break;
4156 case TCP_V6_FLOW:
4157 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
4158 info->data = RXH_IP_SRC | RXH_IP_DST |
4159 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4160 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4161 info->data = RXH_IP_SRC | RXH_IP_DST;
4162 }
4163 break;
4164 case UDP_V4_FLOW:
4165 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
4166 info->data = RXH_IP_SRC | RXH_IP_DST |
4167 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4168 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4169 info->data = RXH_IP_SRC | RXH_IP_DST;
4170 }
4171 break;
4172 case UDP_V6_FLOW:
4173 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
4174 info->data = RXH_IP_SRC | RXH_IP_DST |
4175 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4176 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4177 info->data = RXH_IP_SRC | RXH_IP_DST;
4178 }
4179 break;
4180 case IPV4_FLOW:
4181 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
4182 info->data = RXH_IP_SRC | RXH_IP_DST;
4183
4184 break;
4185 case IPV6_FLOW:
4186 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
4187 info->data = RXH_IP_SRC | RXH_IP_DST;
4188
4189 break;
4190 default:
4191 info->data = 0;
4192 break;
4193 }
4194 }
4195
virtnet_set_hashflow(struct virtnet_info * vi,struct ethtool_rxnfc * info)4196 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
4197 {
4198 u32 new_hashtypes = vi->rss_hash_types_saved;
4199 bool is_disable = info->data & RXH_DISCARD;
4200 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
4201
4202 /* supports only 'sd', 'sdfn' and 'r' */
4203 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
4204 return false;
4205
4206 switch (info->flow_type) {
4207 case TCP_V4_FLOW:
4208 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
4209 if (!is_disable)
4210 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4211 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
4212 break;
4213 case UDP_V4_FLOW:
4214 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
4215 if (!is_disable)
4216 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4217 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
4218 break;
4219 case IPV4_FLOW:
4220 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4221 if (!is_disable)
4222 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4223 break;
4224 case TCP_V6_FLOW:
4225 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
4226 if (!is_disable)
4227 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4228 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
4229 break;
4230 case UDP_V6_FLOW:
4231 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
4232 if (!is_disable)
4233 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4234 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
4235 break;
4236 case IPV6_FLOW:
4237 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4238 if (!is_disable)
4239 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4240 break;
4241 default:
4242 /* unsupported flow */
4243 return false;
4244 }
4245
4246 /* if unsupported hashtype was set */
4247 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
4248 return false;
4249
4250 if (new_hashtypes != vi->rss_hash_types_saved) {
4251 vi->rss_hash_types_saved = new_hashtypes;
4252 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
4253 if (vi->dev->features & NETIF_F_RXHASH)
4254 return virtnet_commit_rss_command(vi);
4255 }
4256
4257 return true;
4258 }
4259
virtnet_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4260 static void virtnet_get_drvinfo(struct net_device *dev,
4261 struct ethtool_drvinfo *info)
4262 {
4263 struct virtnet_info *vi = netdev_priv(dev);
4264 struct virtio_device *vdev = vi->vdev;
4265
4266 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
4267 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
4268 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
4269
4270 }
4271
4272 /* TODO: Eliminate OOO packets during switching */
virtnet_set_channels(struct net_device * dev,struct ethtool_channels * channels)4273 static int virtnet_set_channels(struct net_device *dev,
4274 struct ethtool_channels *channels)
4275 {
4276 struct virtnet_info *vi = netdev_priv(dev);
4277 u16 queue_pairs = channels->combined_count;
4278 int err;
4279
4280 /* We don't support separate rx/tx channels.
4281 * We don't allow setting 'other' channels.
4282 */
4283 if (channels->rx_count || channels->tx_count || channels->other_count)
4284 return -EINVAL;
4285
4286 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
4287 return -EINVAL;
4288
4289 /* For now we don't support modifying channels while XDP is loaded
4290 * also when XDP is loaded all RX queues have XDP programs so we only
4291 * need to check a single RX queue.
4292 */
4293 if (vi->rq[0].xdp_prog)
4294 return -EINVAL;
4295
4296 cpus_read_lock();
4297 err = virtnet_set_queues(vi, queue_pairs);
4298 if (err) {
4299 cpus_read_unlock();
4300 goto err;
4301 }
4302 virtnet_set_affinity(vi);
4303 cpus_read_unlock();
4304
4305 netif_set_real_num_tx_queues(dev, queue_pairs);
4306 netif_set_real_num_rx_queues(dev, queue_pairs);
4307 err:
4308 return err;
4309 }
4310
virtnet_stats_sprintf(u8 ** p,const char * fmt,const char * noq_fmt,int num,int qid,const struct virtnet_stat_desc * desc)4311 static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
4312 int num, int qid, const struct virtnet_stat_desc *desc)
4313 {
4314 int i;
4315
4316 if (qid < 0) {
4317 for (i = 0; i < num; ++i)
4318 ethtool_sprintf(p, noq_fmt, desc[i].desc);
4319 } else {
4320 for (i = 0; i < num; ++i)
4321 ethtool_sprintf(p, fmt, qid, desc[i].desc);
4322 }
4323 }
4324
4325 /* qid == -1: for rx/tx queue total field */
virtnet_get_stats_string(struct virtnet_info * vi,int type,int qid,u8 ** data)4326 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
4327 {
4328 const struct virtnet_stat_desc *desc;
4329 const char *fmt, *noq_fmt;
4330 u8 *p = *data;
4331 u32 num;
4332
4333 if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
4334 noq_fmt = "cq_hw_%s";
4335
4336 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4337 desc = &virtnet_stats_cvq_desc[0];
4338 num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4339
4340 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
4341 }
4342 }
4343
4344 if (type == VIRTNET_Q_TYPE_RX) {
4345 fmt = "rx%u_%s";
4346 noq_fmt = "rx_%s";
4347
4348 desc = &virtnet_rq_stats_desc[0];
4349 num = ARRAY_SIZE(virtnet_rq_stats_desc);
4350
4351 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4352
4353 fmt = "rx%u_hw_%s";
4354 noq_fmt = "rx_hw_%s";
4355
4356 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4357 desc = &virtnet_stats_rx_basic_desc[0];
4358 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4359
4360 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4361 }
4362
4363 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4364 desc = &virtnet_stats_rx_csum_desc[0];
4365 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4366
4367 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4368 }
4369
4370 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4371 desc = &virtnet_stats_rx_speed_desc[0];
4372 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4373
4374 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4375 }
4376 }
4377
4378 if (type == VIRTNET_Q_TYPE_TX) {
4379 fmt = "tx%u_%s";
4380 noq_fmt = "tx_%s";
4381
4382 desc = &virtnet_sq_stats_desc[0];
4383 num = ARRAY_SIZE(virtnet_sq_stats_desc);
4384
4385 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4386
4387 fmt = "tx%u_hw_%s";
4388 noq_fmt = "tx_hw_%s";
4389
4390 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4391 desc = &virtnet_stats_tx_basic_desc[0];
4392 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4393
4394 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4395 }
4396
4397 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4398 desc = &virtnet_stats_tx_gso_desc[0];
4399 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4400
4401 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4402 }
4403
4404 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4405 desc = &virtnet_stats_tx_speed_desc[0];
4406 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4407
4408 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4409 }
4410 }
4411
4412 *data = p;
4413 }
4414
4415 struct virtnet_stats_ctx {
4416 /* The stats are write to qstats or ethtool -S */
4417 bool to_qstat;
4418
4419 /* Used to calculate the offset inside the output buffer. */
4420 u32 desc_num[3];
4421
4422 /* The actual supported stat types. */
4423 u64 bitmap[3];
4424
4425 /* Used to calculate the reply buffer size. */
4426 u32 size[3];
4427
4428 /* Record the output buffer. */
4429 u64 *data;
4430 };
4431
virtnet_stats_ctx_init(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,u64 * data,bool to_qstat)4432 static void virtnet_stats_ctx_init(struct virtnet_info *vi,
4433 struct virtnet_stats_ctx *ctx,
4434 u64 *data, bool to_qstat)
4435 {
4436 u32 queue_type;
4437
4438 ctx->data = data;
4439 ctx->to_qstat = to_qstat;
4440
4441 if (to_qstat) {
4442 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4443 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4444
4445 queue_type = VIRTNET_Q_TYPE_RX;
4446
4447 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4448 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4449 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4450 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
4451 }
4452
4453 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4454 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4455 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4456 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
4457 }
4458
4459 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4460 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO;
4461 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4462 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso);
4463 }
4464
4465 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4466 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4467 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4468 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
4469 }
4470
4471 queue_type = VIRTNET_Q_TYPE_TX;
4472
4473 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4474 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4475 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4476 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
4477 }
4478
4479 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4480 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
4481 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4482 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum);
4483 }
4484
4485 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4486 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4487 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4488 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
4489 }
4490
4491 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4492 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4493 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4494 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
4495 }
4496
4497 return;
4498 }
4499
4500 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
4501 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
4502
4503 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4504 queue_type = VIRTNET_Q_TYPE_CQ;
4505
4506 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ;
4507 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
4508 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq);
4509 }
4510
4511 queue_type = VIRTNET_Q_TYPE_RX;
4512
4513 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4514 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4515 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4516 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
4517 }
4518
4519 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4520 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4521 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4522 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
4523 }
4524
4525 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4526 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4527 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4528 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
4529 }
4530
4531 queue_type = VIRTNET_Q_TYPE_TX;
4532
4533 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4534 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4535 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4536 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
4537 }
4538
4539 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4540 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4541 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4542 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
4543 }
4544
4545 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4546 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4547 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4548 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
4549 }
4550 }
4551
4552 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4553 * @sum: the position to store the sum values
4554 * @num: field num
4555 * @q_value: the first queue fields
4556 * @q_num: number of the queues
4557 */
stats_sum_queue(u64 * sum,u32 num,u64 * q_value,u32 q_num)4558 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
4559 {
4560 u32 step = num;
4561 int i, j;
4562 u64 *p;
4563
4564 for (i = 0; i < num; ++i) {
4565 p = sum + i;
4566 *p = 0;
4567
4568 for (j = 0; j < q_num; ++j)
4569 *p += *(q_value + i + j * step);
4570 }
4571 }
4572
virtnet_fill_total_fields(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx)4573 static void virtnet_fill_total_fields(struct virtnet_info *vi,
4574 struct virtnet_stats_ctx *ctx)
4575 {
4576 u64 *data, *first_rx_q, *first_tx_q;
4577 u32 num_cq, num_rx, num_tx;
4578
4579 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4580 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4581 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4582
4583 first_rx_q = ctx->data + num_rx + num_tx + num_cq;
4584 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
4585
4586 data = ctx->data;
4587
4588 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
4589
4590 data = ctx->data + num_rx;
4591
4592 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
4593 }
4594
virtnet_fill_stats_qstat(struct virtnet_info * vi,u32 qid,struct virtnet_stats_ctx * ctx,const u8 * base,bool drv_stats,u8 reply_type)4595 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
4596 struct virtnet_stats_ctx *ctx,
4597 const u8 *base, bool drv_stats, u8 reply_type)
4598 {
4599 const struct virtnet_stat_desc *desc;
4600 const u64_stats_t *v_stat;
4601 u64 offset, bitmap;
4602 const __le64 *v;
4603 u32 queue_type;
4604 int i, num;
4605
4606 queue_type = vq_type(vi, qid);
4607 bitmap = ctx->bitmap[queue_type];
4608
4609 if (drv_stats) {
4610 if (queue_type == VIRTNET_Q_TYPE_RX) {
4611 desc = &virtnet_rq_stats_desc_qstat[0];
4612 num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4613 } else {
4614 desc = &virtnet_sq_stats_desc_qstat[0];
4615 num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4616 }
4617
4618 for (i = 0; i < num; ++i) {
4619 offset = desc[i].qstat_offset / sizeof(*ctx->data);
4620 v_stat = (const u64_stats_t *)(base + desc[i].offset);
4621 ctx->data[offset] = u64_stats_read(v_stat);
4622 }
4623 return;
4624 }
4625
4626 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4627 desc = &virtnet_stats_rx_basic_desc_qstat[0];
4628 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4629 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4630 goto found;
4631 }
4632
4633 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4634 desc = &virtnet_stats_rx_csum_desc_qstat[0];
4635 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4636 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4637 goto found;
4638 }
4639
4640 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4641 desc = &virtnet_stats_rx_gso_desc_qstat[0];
4642 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4643 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
4644 goto found;
4645 }
4646
4647 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4648 desc = &virtnet_stats_rx_speed_desc_qstat[0];
4649 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4650 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4651 goto found;
4652 }
4653
4654 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4655 desc = &virtnet_stats_tx_basic_desc_qstat[0];
4656 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4657 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4658 goto found;
4659 }
4660
4661 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4662 desc = &virtnet_stats_tx_csum_desc_qstat[0];
4663 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4664 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
4665 goto found;
4666 }
4667
4668 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4669 desc = &virtnet_stats_tx_gso_desc_qstat[0];
4670 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4671 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4672 goto found;
4673 }
4674
4675 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4676 desc = &virtnet_stats_tx_speed_desc_qstat[0];
4677 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4678 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4679 goto found;
4680 }
4681
4682 return;
4683
4684 found:
4685 for (i = 0; i < num; ++i) {
4686 offset = desc[i].qstat_offset / sizeof(*ctx->data);
4687 v = (const __le64 *)(base + desc[i].offset);
4688 ctx->data[offset] = le64_to_cpu(*v);
4689 }
4690 }
4691
4692 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4693 * The stats source is the device or the driver.
4694 *
4695 * @vi: virtio net info
4696 * @qid: the vq id
4697 * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
4698 * @base: pointer to the device reply or the driver stats structure.
4699 * @drv_stats: designate the base type (device reply, driver stats)
4700 * @type: the type of the device reply (if drv_stats is true, this must be zero)
4701 */
virtnet_fill_stats(struct virtnet_info * vi,u32 qid,struct virtnet_stats_ctx * ctx,const u8 * base,bool drv_stats,u8 reply_type)4702 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
4703 struct virtnet_stats_ctx *ctx,
4704 const u8 *base, bool drv_stats, u8 reply_type)
4705 {
4706 u32 queue_type, num_rx, num_tx, num_cq;
4707 const struct virtnet_stat_desc *desc;
4708 const u64_stats_t *v_stat;
4709 u64 offset, bitmap;
4710 const __le64 *v;
4711 int i, num;
4712
4713 if (ctx->to_qstat)
4714 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
4715
4716 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4717 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4718 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4719
4720 queue_type = vq_type(vi, qid);
4721 bitmap = ctx->bitmap[queue_type];
4722
4723 /* skip the total fields of pairs */
4724 offset = num_rx + num_tx;
4725
4726 if (queue_type == VIRTNET_Q_TYPE_TX) {
4727 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
4728
4729 num = ARRAY_SIZE(virtnet_sq_stats_desc);
4730 if (drv_stats) {
4731 desc = &virtnet_sq_stats_desc[0];
4732 goto drv_stats;
4733 }
4734
4735 offset += num;
4736
4737 } else if (queue_type == VIRTNET_Q_TYPE_RX) {
4738 offset += num_cq + num_rx * (qid / 2);
4739
4740 num = ARRAY_SIZE(virtnet_rq_stats_desc);
4741 if (drv_stats) {
4742 desc = &virtnet_rq_stats_desc[0];
4743 goto drv_stats;
4744 }
4745
4746 offset += num;
4747 }
4748
4749 if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
4750 desc = &virtnet_stats_cvq_desc[0];
4751 num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4752 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
4753 goto found;
4754
4755 offset += num;
4756 }
4757
4758 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4759 desc = &virtnet_stats_rx_basic_desc[0];
4760 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4761 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4762 goto found;
4763
4764 offset += num;
4765 }
4766
4767 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4768 desc = &virtnet_stats_rx_csum_desc[0];
4769 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4770 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4771 goto found;
4772
4773 offset += num;
4774 }
4775
4776 if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4777 desc = &virtnet_stats_rx_speed_desc[0];
4778 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4779 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4780 goto found;
4781
4782 offset += num;
4783 }
4784
4785 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4786 desc = &virtnet_stats_tx_basic_desc[0];
4787 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4788 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4789 goto found;
4790
4791 offset += num;
4792 }
4793
4794 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4795 desc = &virtnet_stats_tx_gso_desc[0];
4796 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4797 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4798 goto found;
4799
4800 offset += num;
4801 }
4802
4803 if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4804 desc = &virtnet_stats_tx_speed_desc[0];
4805 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4806 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4807 goto found;
4808
4809 offset += num;
4810 }
4811
4812 return;
4813
4814 found:
4815 for (i = 0; i < num; ++i) {
4816 v = (const __le64 *)(base + desc[i].offset);
4817 ctx->data[offset + i] = le64_to_cpu(*v);
4818 }
4819
4820 return;
4821
4822 drv_stats:
4823 for (i = 0; i < num; ++i) {
4824 v_stat = (const u64_stats_t *)(base + desc[i].offset);
4825 ctx->data[offset + i] = u64_stats_read(v_stat);
4826 }
4827 }
4828
__virtnet_get_hw_stats(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,struct virtio_net_ctrl_queue_stats * req,int req_size,void * reply,int res_size)4829 static int __virtnet_get_hw_stats(struct virtnet_info *vi,
4830 struct virtnet_stats_ctx *ctx,
4831 struct virtio_net_ctrl_queue_stats *req,
4832 int req_size, void *reply, int res_size)
4833 {
4834 struct virtio_net_stats_reply_hdr *hdr;
4835 struct scatterlist sgs_in, sgs_out;
4836 void *p;
4837 u32 qid;
4838 int ok;
4839
4840 sg_init_one(&sgs_out, req, req_size);
4841 sg_init_one(&sgs_in, reply, res_size);
4842
4843 ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
4844 VIRTIO_NET_CTRL_STATS_GET,
4845 &sgs_out, &sgs_in);
4846
4847 if (!ok)
4848 return ok;
4849
4850 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
4851 hdr = p;
4852 qid = le16_to_cpu(hdr->vq_index);
4853 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
4854 }
4855
4856 return 0;
4857 }
4858
virtnet_make_stat_req(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,struct virtio_net_ctrl_queue_stats * req,int qid,int * idx)4859 static void virtnet_make_stat_req(struct virtnet_info *vi,
4860 struct virtnet_stats_ctx *ctx,
4861 struct virtio_net_ctrl_queue_stats *req,
4862 int qid, int *idx)
4863 {
4864 int qtype = vq_type(vi, qid);
4865 u64 bitmap = ctx->bitmap[qtype];
4866
4867 if (!bitmap)
4868 return;
4869
4870 req->stats[*idx].vq_index = cpu_to_le16(qid);
4871 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
4872 *idx += 1;
4873 }
4874
4875 /* qid: -1: get stats of all vq.
4876 * > 0: get the stats for the special vq. This must not be cvq.
4877 */
virtnet_get_hw_stats(struct virtnet_info * vi,struct virtnet_stats_ctx * ctx,int qid)4878 static int virtnet_get_hw_stats(struct virtnet_info *vi,
4879 struct virtnet_stats_ctx *ctx, int qid)
4880 {
4881 int qnum, i, j, res_size, qtype, last_vq, first_vq;
4882 struct virtio_net_ctrl_queue_stats *req;
4883 bool enable_cvq;
4884 void *reply;
4885 int ok;
4886
4887 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
4888 return 0;
4889
4890 if (qid == -1) {
4891 last_vq = vi->curr_queue_pairs * 2 - 1;
4892 first_vq = 0;
4893 enable_cvq = true;
4894 } else {
4895 last_vq = qid;
4896 first_vq = qid;
4897 enable_cvq = false;
4898 }
4899
4900 qnum = 0;
4901 res_size = 0;
4902 for (i = first_vq; i <= last_vq ; ++i) {
4903 qtype = vq_type(vi, i);
4904 if (ctx->bitmap[qtype]) {
4905 ++qnum;
4906 res_size += ctx->size[qtype];
4907 }
4908 }
4909
4910 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
4911 res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
4912 qnum += 1;
4913 }
4914
4915 req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
4916 if (!req)
4917 return -ENOMEM;
4918
4919 reply = kmalloc(res_size, GFP_KERNEL);
4920 if (!reply) {
4921 kfree(req);
4922 return -ENOMEM;
4923 }
4924
4925 j = 0;
4926 for (i = first_vq; i <= last_vq ; ++i)
4927 virtnet_make_stat_req(vi, ctx, req, i, &j);
4928
4929 if (enable_cvq)
4930 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
4931
4932 ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
4933
4934 kfree(req);
4935 kfree(reply);
4936
4937 return ok;
4938 }
4939
virtnet_get_strings(struct net_device * dev,u32 stringset,u8 * data)4940 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4941 {
4942 struct virtnet_info *vi = netdev_priv(dev);
4943 unsigned int i;
4944 u8 *p = data;
4945
4946 switch (stringset) {
4947 case ETH_SS_STATS:
4948 /* Generate the total field names. */
4949 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
4950 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
4951
4952 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
4953
4954 for (i = 0; i < vi->curr_queue_pairs; ++i)
4955 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
4956
4957 for (i = 0; i < vi->curr_queue_pairs; ++i)
4958 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
4959 break;
4960 }
4961 }
4962
virtnet_get_sset_count(struct net_device * dev,int sset)4963 static int virtnet_get_sset_count(struct net_device *dev, int sset)
4964 {
4965 struct virtnet_info *vi = netdev_priv(dev);
4966 struct virtnet_stats_ctx ctx = {0};
4967 u32 pair_count;
4968
4969 switch (sset) {
4970 case ETH_SS_STATS:
4971 virtnet_stats_ctx_init(vi, &ctx, NULL, false);
4972
4973 pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
4974
4975 return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
4976 vi->curr_queue_pairs * pair_count;
4977 default:
4978 return -EOPNOTSUPP;
4979 }
4980 }
4981
virtnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4982 static void virtnet_get_ethtool_stats(struct net_device *dev,
4983 struct ethtool_stats *stats, u64 *data)
4984 {
4985 struct virtnet_info *vi = netdev_priv(dev);
4986 struct virtnet_stats_ctx ctx = {0};
4987 unsigned int start, i;
4988 const u8 *stats_base;
4989
4990 virtnet_stats_ctx_init(vi, &ctx, data, false);
4991 if (virtnet_get_hw_stats(vi, &ctx, -1))
4992 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
4993
4994 for (i = 0; i < vi->curr_queue_pairs; i++) {
4995 struct receive_queue *rq = &vi->rq[i];
4996 struct send_queue *sq = &vi->sq[i];
4997
4998 stats_base = (const u8 *)&rq->stats;
4999 do {
5000 start = u64_stats_fetch_begin(&rq->stats.syncp);
5001 virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
5002 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
5003
5004 stats_base = (const u8 *)&sq->stats;
5005 do {
5006 start = u64_stats_fetch_begin(&sq->stats.syncp);
5007 virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
5008 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
5009 }
5010
5011 virtnet_fill_total_fields(vi, &ctx);
5012 }
5013
virtnet_get_channels(struct net_device * dev,struct ethtool_channels * channels)5014 static void virtnet_get_channels(struct net_device *dev,
5015 struct ethtool_channels *channels)
5016 {
5017 struct virtnet_info *vi = netdev_priv(dev);
5018
5019 channels->combined_count = vi->curr_queue_pairs;
5020 channels->max_combined = vi->max_queue_pairs;
5021 channels->max_other = 0;
5022 channels->rx_count = 0;
5023 channels->tx_count = 0;
5024 channels->other_count = 0;
5025 }
5026
virtnet_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5027 static int virtnet_set_link_ksettings(struct net_device *dev,
5028 const struct ethtool_link_ksettings *cmd)
5029 {
5030 struct virtnet_info *vi = netdev_priv(dev);
5031
5032 return ethtool_virtdev_set_link_ksettings(dev, cmd,
5033 &vi->speed, &vi->duplex);
5034 }
5035
virtnet_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5036 static int virtnet_get_link_ksettings(struct net_device *dev,
5037 struct ethtool_link_ksettings *cmd)
5038 {
5039 struct virtnet_info *vi = netdev_priv(dev);
5040
5041 cmd->base.speed = vi->speed;
5042 cmd->base.duplex = vi->duplex;
5043 cmd->base.port = PORT_OTHER;
5044
5045 return 0;
5046 }
5047
virtnet_send_tx_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5048 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
5049 struct ethtool_coalesce *ec)
5050 {
5051 struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
5052 struct scatterlist sgs_tx;
5053 int i;
5054
5055 coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
5056 if (!coal_tx)
5057 return -ENOMEM;
5058
5059 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
5060 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
5061 sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
5062
5063 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5064 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
5065 &sgs_tx))
5066 return -EINVAL;
5067
5068 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
5069 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
5070 for (i = 0; i < vi->max_queue_pairs; i++) {
5071 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
5072 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
5073 }
5074
5075 return 0;
5076 }
5077
virtnet_send_rx_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5078 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
5079 struct ethtool_coalesce *ec)
5080 {
5081 struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
5082 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5083 struct scatterlist sgs_rx;
5084 int i;
5085
5086 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5087 return -EOPNOTSUPP;
5088
5089 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
5090 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
5091 return -EINVAL;
5092
5093 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
5094 vi->rx_dim_enabled = true;
5095 for (i = 0; i < vi->max_queue_pairs; i++) {
5096 mutex_lock(&vi->rq[i].dim_lock);
5097 vi->rq[i].dim_enabled = true;
5098 mutex_unlock(&vi->rq[i].dim_lock);
5099 }
5100 return 0;
5101 }
5102
5103 coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
5104 if (!coal_rx)
5105 return -ENOMEM;
5106
5107 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
5108 vi->rx_dim_enabled = false;
5109 for (i = 0; i < vi->max_queue_pairs; i++) {
5110 mutex_lock(&vi->rq[i].dim_lock);
5111 vi->rq[i].dim_enabled = false;
5112 mutex_unlock(&vi->rq[i].dim_lock);
5113 }
5114 }
5115
5116 /* Since the per-queue coalescing params can be set,
5117 * we need apply the global new params even if they
5118 * are not updated.
5119 */
5120 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
5121 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
5122 sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
5123
5124 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5125 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
5126 &sgs_rx))
5127 return -EINVAL;
5128
5129 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
5130 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
5131 for (i = 0; i < vi->max_queue_pairs; i++) {
5132 mutex_lock(&vi->rq[i].dim_lock);
5133 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
5134 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
5135 mutex_unlock(&vi->rq[i].dim_lock);
5136 }
5137
5138 return 0;
5139 }
5140
virtnet_send_notf_coal_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec)5141 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
5142 struct ethtool_coalesce *ec)
5143 {
5144 int err;
5145
5146 err = virtnet_send_tx_notf_coal_cmds(vi, ec);
5147 if (err)
5148 return err;
5149
5150 err = virtnet_send_rx_notf_coal_cmds(vi, ec);
5151 if (err)
5152 return err;
5153
5154 return 0;
5155 }
5156
virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec,u16 queue)5157 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
5158 struct ethtool_coalesce *ec,
5159 u16 queue)
5160 {
5161 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5162 u32 max_usecs, max_packets;
5163 bool cur_rx_dim;
5164 int err;
5165
5166 mutex_lock(&vi->rq[queue].dim_lock);
5167 cur_rx_dim = vi->rq[queue].dim_enabled;
5168 max_usecs = vi->rq[queue].intr_coal.max_usecs;
5169 max_packets = vi->rq[queue].intr_coal.max_packets;
5170
5171 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
5172 ec->rx_max_coalesced_frames != max_packets)) {
5173 mutex_unlock(&vi->rq[queue].dim_lock);
5174 return -EINVAL;
5175 }
5176
5177 if (rx_ctrl_dim_on && !cur_rx_dim) {
5178 vi->rq[queue].dim_enabled = true;
5179 mutex_unlock(&vi->rq[queue].dim_lock);
5180 return 0;
5181 }
5182
5183 if (!rx_ctrl_dim_on && cur_rx_dim)
5184 vi->rq[queue].dim_enabled = false;
5185
5186 /* If no params are updated, userspace ethtool will
5187 * reject the modification.
5188 */
5189 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
5190 ec->rx_coalesce_usecs,
5191 ec->rx_max_coalesced_frames);
5192 mutex_unlock(&vi->rq[queue].dim_lock);
5193 return err;
5194 }
5195
virtnet_send_notf_coal_vq_cmds(struct virtnet_info * vi,struct ethtool_coalesce * ec,u16 queue)5196 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
5197 struct ethtool_coalesce *ec,
5198 u16 queue)
5199 {
5200 int err;
5201
5202 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
5203 if (err)
5204 return err;
5205
5206 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
5207 ec->tx_coalesce_usecs,
5208 ec->tx_max_coalesced_frames);
5209 if (err)
5210 return err;
5211
5212 return 0;
5213 }
5214
virtnet_rx_dim_work(struct work_struct * work)5215 static void virtnet_rx_dim_work(struct work_struct *work)
5216 {
5217 struct dim *dim = container_of(work, struct dim, work);
5218 struct receive_queue *rq = container_of(dim,
5219 struct receive_queue, dim);
5220 struct virtnet_info *vi = rq->vq->vdev->priv;
5221 struct net_device *dev = vi->dev;
5222 struct dim_cq_moder update_moder;
5223 int qnum, err;
5224
5225 qnum = rq - vi->rq;
5226
5227 mutex_lock(&rq->dim_lock);
5228 if (!rq->dim_enabled)
5229 goto out;
5230
5231 update_moder = net_dim_get_rx_irq_moder(dev, dim);
5232 if (update_moder.usec != rq->intr_coal.max_usecs ||
5233 update_moder.pkts != rq->intr_coal.max_packets) {
5234 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
5235 update_moder.usec,
5236 update_moder.pkts);
5237 if (err)
5238 pr_debug("%s: Failed to send dim parameters on rxq%d\n",
5239 dev->name, qnum);
5240 }
5241 out:
5242 dim->state = DIM_START_MEASURE;
5243 mutex_unlock(&rq->dim_lock);
5244 }
5245
virtnet_coal_params_supported(struct ethtool_coalesce * ec)5246 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
5247 {
5248 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
5249 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
5250 */
5251 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
5252 return -EOPNOTSUPP;
5253
5254 if (ec->tx_max_coalesced_frames > 1 ||
5255 ec->rx_max_coalesced_frames != 1)
5256 return -EINVAL;
5257
5258 return 0;
5259 }
5260
virtnet_should_update_vq_weight(int dev_flags,int weight,int vq_weight,bool * should_update)5261 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
5262 int vq_weight, bool *should_update)
5263 {
5264 if (weight ^ vq_weight) {
5265 if (dev_flags & IFF_UP)
5266 return -EBUSY;
5267 *should_update = true;
5268 }
5269
5270 return 0;
5271 }
5272
virtnet_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5273 static int virtnet_set_coalesce(struct net_device *dev,
5274 struct ethtool_coalesce *ec,
5275 struct kernel_ethtool_coalesce *kernel_coal,
5276 struct netlink_ext_ack *extack)
5277 {
5278 struct virtnet_info *vi = netdev_priv(dev);
5279 int ret, queue_number, napi_weight, i;
5280 bool update_napi = false;
5281
5282 /* Can't change NAPI weight if the link is up */
5283 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5284 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
5285 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5286 vi->sq[queue_number].napi.weight,
5287 &update_napi);
5288 if (ret)
5289 return ret;
5290
5291 if (update_napi) {
5292 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
5293 * updated for the sake of simplicity, which might not be necessary
5294 */
5295 break;
5296 }
5297 }
5298
5299 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
5300 ret = virtnet_send_notf_coal_cmds(vi, ec);
5301 else
5302 ret = virtnet_coal_params_supported(ec);
5303
5304 if (ret)
5305 return ret;
5306
5307 if (update_napi) {
5308 /* xsk xmit depends on the tx napi. So if xsk is active,
5309 * prevent modifications to tx napi.
5310 */
5311 for (i = queue_number; i < vi->max_queue_pairs; i++) {
5312 if (vi->sq[i].xsk_pool)
5313 return -EBUSY;
5314 }
5315
5316 for (; queue_number < vi->max_queue_pairs; queue_number++)
5317 vi->sq[queue_number].napi.weight = napi_weight;
5318 }
5319
5320 return ret;
5321 }
5322
virtnet_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)5323 static int virtnet_get_coalesce(struct net_device *dev,
5324 struct ethtool_coalesce *ec,
5325 struct kernel_ethtool_coalesce *kernel_coal,
5326 struct netlink_ext_ack *extack)
5327 {
5328 struct virtnet_info *vi = netdev_priv(dev);
5329
5330 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
5331 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
5332 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
5333 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
5334 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
5335 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
5336 } else {
5337 ec->rx_max_coalesced_frames = 1;
5338
5339 if (vi->sq[0].napi.weight)
5340 ec->tx_max_coalesced_frames = 1;
5341 }
5342
5343 return 0;
5344 }
5345
virtnet_set_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)5346 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
5347 u32 queue,
5348 struct ethtool_coalesce *ec)
5349 {
5350 struct virtnet_info *vi = netdev_priv(dev);
5351 int ret, napi_weight;
5352 bool update_napi = false;
5353
5354 if (queue >= vi->max_queue_pairs)
5355 return -EINVAL;
5356
5357 /* Can't change NAPI weight if the link is up */
5358 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5359 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5360 vi->sq[queue].napi.weight,
5361 &update_napi);
5362 if (ret)
5363 return ret;
5364
5365 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5366 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
5367 else
5368 ret = virtnet_coal_params_supported(ec);
5369
5370 if (ret)
5371 return ret;
5372
5373 if (update_napi)
5374 vi->sq[queue].napi.weight = napi_weight;
5375
5376 return 0;
5377 }
5378
virtnet_get_per_queue_coalesce(struct net_device * dev,u32 queue,struct ethtool_coalesce * ec)5379 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
5380 u32 queue,
5381 struct ethtool_coalesce *ec)
5382 {
5383 struct virtnet_info *vi = netdev_priv(dev);
5384
5385 if (queue >= vi->max_queue_pairs)
5386 return -EINVAL;
5387
5388 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
5389 mutex_lock(&vi->rq[queue].dim_lock);
5390 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
5391 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
5392 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
5393 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
5394 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
5395 mutex_unlock(&vi->rq[queue].dim_lock);
5396 } else {
5397 ec->rx_max_coalesced_frames = 1;
5398
5399 if (vi->sq[queue].napi.weight)
5400 ec->tx_max_coalesced_frames = 1;
5401 }
5402
5403 return 0;
5404 }
5405
virtnet_init_settings(struct net_device * dev)5406 static void virtnet_init_settings(struct net_device *dev)
5407 {
5408 struct virtnet_info *vi = netdev_priv(dev);
5409
5410 vi->speed = SPEED_UNKNOWN;
5411 vi->duplex = DUPLEX_UNKNOWN;
5412 }
5413
virtnet_get_rxfh_key_size(struct net_device * dev)5414 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
5415 {
5416 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
5417 }
5418
virtnet_get_rxfh_indir_size(struct net_device * dev)5419 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
5420 {
5421 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
5422 }
5423
virtnet_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)5424 static int virtnet_get_rxfh(struct net_device *dev,
5425 struct ethtool_rxfh_param *rxfh)
5426 {
5427 struct virtnet_info *vi = netdev_priv(dev);
5428 int i;
5429
5430 if (rxfh->indir) {
5431 for (i = 0; i < vi->rss_indir_table_size; ++i)
5432 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
5433 }
5434
5435 if (rxfh->key)
5436 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
5437
5438 rxfh->hfunc = ETH_RSS_HASH_TOP;
5439
5440 return 0;
5441 }
5442
virtnet_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)5443 static int virtnet_set_rxfh(struct net_device *dev,
5444 struct ethtool_rxfh_param *rxfh,
5445 struct netlink_ext_ack *extack)
5446 {
5447 struct virtnet_info *vi = netdev_priv(dev);
5448 bool update = false;
5449 int i;
5450
5451 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5452 rxfh->hfunc != ETH_RSS_HASH_TOP)
5453 return -EOPNOTSUPP;
5454
5455 if (rxfh->indir) {
5456 if (!vi->has_rss)
5457 return -EOPNOTSUPP;
5458
5459 for (i = 0; i < vi->rss_indir_table_size; ++i)
5460 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
5461 update = true;
5462 }
5463
5464 if (rxfh->key) {
5465 /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
5466 * device provides hash calculation capabilities, that is,
5467 * hash_key is configured.
5468 */
5469 if (!vi->has_rss && !vi->has_rss_hash_report)
5470 return -EOPNOTSUPP;
5471
5472 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
5473 update = true;
5474 }
5475
5476 if (update)
5477 virtnet_commit_rss_command(vi);
5478
5479 return 0;
5480 }
5481
virtnet_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rule_locs)5482 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
5483 {
5484 struct virtnet_info *vi = netdev_priv(dev);
5485 int rc = 0;
5486
5487 switch (info->cmd) {
5488 case ETHTOOL_GRXRINGS:
5489 info->data = vi->curr_queue_pairs;
5490 break;
5491 case ETHTOOL_GRXFH:
5492 virtnet_get_hashflow(vi, info);
5493 break;
5494 default:
5495 rc = -EOPNOTSUPP;
5496 }
5497
5498 return rc;
5499 }
5500
virtnet_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info)5501 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
5502 {
5503 struct virtnet_info *vi = netdev_priv(dev);
5504 int rc = 0;
5505
5506 switch (info->cmd) {
5507 case ETHTOOL_SRXFH:
5508 if (!virtnet_set_hashflow(vi, info))
5509 rc = -EINVAL;
5510
5511 break;
5512 default:
5513 rc = -EOPNOTSUPP;
5514 }
5515
5516 return rc;
5517 }
5518
5519 static const struct ethtool_ops virtnet_ethtool_ops = {
5520 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
5521 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
5522 .get_drvinfo = virtnet_get_drvinfo,
5523 .get_link = ethtool_op_get_link,
5524 .get_ringparam = virtnet_get_ringparam,
5525 .set_ringparam = virtnet_set_ringparam,
5526 .get_strings = virtnet_get_strings,
5527 .get_sset_count = virtnet_get_sset_count,
5528 .get_ethtool_stats = virtnet_get_ethtool_stats,
5529 .set_channels = virtnet_set_channels,
5530 .get_channels = virtnet_get_channels,
5531 .get_ts_info = ethtool_op_get_ts_info,
5532 .get_link_ksettings = virtnet_get_link_ksettings,
5533 .set_link_ksettings = virtnet_set_link_ksettings,
5534 .set_coalesce = virtnet_set_coalesce,
5535 .get_coalesce = virtnet_get_coalesce,
5536 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
5537 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
5538 .get_rxfh_key_size = virtnet_get_rxfh_key_size,
5539 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
5540 .get_rxfh = virtnet_get_rxfh,
5541 .set_rxfh = virtnet_set_rxfh,
5542 .get_rxnfc = virtnet_get_rxnfc,
5543 .set_rxnfc = virtnet_set_rxnfc,
5544 };
5545
virtnet_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)5546 static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
5547 struct netdev_queue_stats_rx *stats)
5548 {
5549 struct virtnet_info *vi = netdev_priv(dev);
5550 struct receive_queue *rq = &vi->rq[i];
5551 struct virtnet_stats_ctx ctx = {0};
5552
5553 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5554
5555 virtnet_get_hw_stats(vi, &ctx, i * 2);
5556 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
5557 }
5558
virtnet_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)5559 static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
5560 struct netdev_queue_stats_tx *stats)
5561 {
5562 struct virtnet_info *vi = netdev_priv(dev);
5563 struct send_queue *sq = &vi->sq[i];
5564 struct virtnet_stats_ctx ctx = {0};
5565
5566 virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5567
5568 virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
5569 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
5570 }
5571
virtnet_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)5572 static void virtnet_get_base_stats(struct net_device *dev,
5573 struct netdev_queue_stats_rx *rx,
5574 struct netdev_queue_stats_tx *tx)
5575 {
5576 struct virtnet_info *vi = netdev_priv(dev);
5577
5578 /* The queue stats of the virtio-net will not be reset. So here we
5579 * return 0.
5580 */
5581 rx->bytes = 0;
5582 rx->packets = 0;
5583
5584 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
5585 rx->hw_drops = 0;
5586 rx->hw_drop_overruns = 0;
5587 }
5588
5589 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
5590 rx->csum_unnecessary = 0;
5591 rx->csum_none = 0;
5592 rx->csum_bad = 0;
5593 }
5594
5595 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
5596 rx->hw_gro_packets = 0;
5597 rx->hw_gro_bytes = 0;
5598 rx->hw_gro_wire_packets = 0;
5599 rx->hw_gro_wire_bytes = 0;
5600 }
5601
5602 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
5603 rx->hw_drop_ratelimits = 0;
5604
5605 tx->bytes = 0;
5606 tx->packets = 0;
5607 tx->stop = 0;
5608 tx->wake = 0;
5609
5610 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
5611 tx->hw_drops = 0;
5612 tx->hw_drop_errors = 0;
5613 }
5614
5615 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
5616 tx->csum_none = 0;
5617 tx->needs_csum = 0;
5618 }
5619
5620 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
5621 tx->hw_gso_packets = 0;
5622 tx->hw_gso_bytes = 0;
5623 tx->hw_gso_wire_packets = 0;
5624 tx->hw_gso_wire_bytes = 0;
5625 }
5626
5627 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
5628 tx->hw_drop_ratelimits = 0;
5629 }
5630
5631 static const struct netdev_stat_ops virtnet_stat_ops = {
5632 .get_queue_stats_rx = virtnet_get_queue_stats_rx,
5633 .get_queue_stats_tx = virtnet_get_queue_stats_tx,
5634 .get_base_stats = virtnet_get_base_stats,
5635 };
5636
virtnet_freeze_down(struct virtio_device * vdev)5637 static void virtnet_freeze_down(struct virtio_device *vdev)
5638 {
5639 struct virtnet_info *vi = vdev->priv;
5640
5641 /* Make sure no work handler is accessing the device */
5642 flush_work(&vi->config_work);
5643 disable_rx_mode_work(vi);
5644 flush_work(&vi->rx_mode_work);
5645
5646 netif_tx_lock_bh(vi->dev);
5647 netif_device_detach(vi->dev);
5648 netif_tx_unlock_bh(vi->dev);
5649 if (netif_running(vi->dev)) {
5650 rtnl_lock();
5651 virtnet_close(vi->dev);
5652 rtnl_unlock();
5653 }
5654 }
5655
5656 static int init_vqs(struct virtnet_info *vi);
5657
virtnet_restore_up(struct virtio_device * vdev)5658 static int virtnet_restore_up(struct virtio_device *vdev)
5659 {
5660 struct virtnet_info *vi = vdev->priv;
5661 int err;
5662
5663 err = init_vqs(vi);
5664 if (err)
5665 return err;
5666
5667 virtio_device_ready(vdev);
5668
5669 enable_delayed_refill(vi);
5670 enable_rx_mode_work(vi);
5671
5672 if (netif_running(vi->dev)) {
5673 rtnl_lock();
5674 err = virtnet_open(vi->dev);
5675 rtnl_unlock();
5676 if (err)
5677 return err;
5678 }
5679
5680 netif_tx_lock_bh(vi->dev);
5681 netif_device_attach(vi->dev);
5682 netif_tx_unlock_bh(vi->dev);
5683 return err;
5684 }
5685
virtnet_set_guest_offloads(struct virtnet_info * vi,u64 offloads)5686 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
5687 {
5688 __virtio64 *_offloads __free(kfree) = NULL;
5689 struct scatterlist sg;
5690
5691 _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
5692 if (!_offloads)
5693 return -ENOMEM;
5694
5695 *_offloads = cpu_to_virtio64(vi->vdev, offloads);
5696
5697 sg_init_one(&sg, _offloads, sizeof(*_offloads));
5698
5699 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
5700 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
5701 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
5702 return -EINVAL;
5703 }
5704
5705 return 0;
5706 }
5707
virtnet_clear_guest_offloads(struct virtnet_info * vi)5708 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
5709 {
5710 u64 offloads = 0;
5711
5712 if (!vi->guest_offloads)
5713 return 0;
5714
5715 return virtnet_set_guest_offloads(vi, offloads);
5716 }
5717
virtnet_restore_guest_offloads(struct virtnet_info * vi)5718 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
5719 {
5720 u64 offloads = vi->guest_offloads;
5721
5722 if (!vi->guest_offloads)
5723 return 0;
5724
5725 return virtnet_set_guest_offloads(vi, offloads);
5726 }
5727
virtnet_rq_bind_xsk_pool(struct virtnet_info * vi,struct receive_queue * rq,struct xsk_buff_pool * pool)5728 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
5729 struct xsk_buff_pool *pool)
5730 {
5731 int err, qindex;
5732
5733 qindex = rq - vi->rq;
5734
5735 if (pool) {
5736 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
5737 if (err < 0)
5738 return err;
5739
5740 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
5741 MEM_TYPE_XSK_BUFF_POOL, NULL);
5742 if (err < 0)
5743 goto unreg;
5744
5745 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
5746 }
5747
5748 virtnet_rx_pause(vi, rq);
5749
5750 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
5751 if (err) {
5752 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
5753
5754 pool = NULL;
5755 }
5756
5757 rq->xsk_pool = pool;
5758
5759 virtnet_rx_resume(vi, rq);
5760
5761 if (pool)
5762 return 0;
5763
5764 unreg:
5765 xdp_rxq_info_unreg(&rq->xsk_rxq_info);
5766 return err;
5767 }
5768
virtnet_sq_bind_xsk_pool(struct virtnet_info * vi,struct send_queue * sq,struct xsk_buff_pool * pool)5769 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
5770 struct send_queue *sq,
5771 struct xsk_buff_pool *pool)
5772 {
5773 int err, qindex;
5774
5775 qindex = sq - vi->sq;
5776
5777 virtnet_tx_pause(vi, sq);
5778
5779 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
5780 virtnet_sq_free_unused_buf_done);
5781 if (err) {
5782 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
5783 pool = NULL;
5784 }
5785
5786 sq->xsk_pool = pool;
5787
5788 virtnet_tx_resume(vi, sq);
5789
5790 return err;
5791 }
5792
virtnet_xsk_pool_enable(struct net_device * dev,struct xsk_buff_pool * pool,u16 qid)5793 static int virtnet_xsk_pool_enable(struct net_device *dev,
5794 struct xsk_buff_pool *pool,
5795 u16 qid)
5796 {
5797 struct virtnet_info *vi = netdev_priv(dev);
5798 struct receive_queue *rq;
5799 struct device *dma_dev;
5800 struct send_queue *sq;
5801 dma_addr_t hdr_dma;
5802 int err, size;
5803
5804 if (vi->hdr_len > xsk_pool_get_headroom(pool))
5805 return -EINVAL;
5806
5807 /* In big_packets mode, xdp cannot work, so there is no need to
5808 * initialize xsk of rq.
5809 */
5810 if (vi->big_packets && !vi->mergeable_rx_bufs)
5811 return -ENOENT;
5812
5813 if (qid >= vi->curr_queue_pairs)
5814 return -EINVAL;
5815
5816 sq = &vi->sq[qid];
5817 rq = &vi->rq[qid];
5818
5819 /* xsk assumes that tx and rx must have the same dma device. The af-xdp
5820 * may use one buffer to receive from the rx and reuse this buffer to
5821 * send by the tx. So the dma dev of sq and rq must be the same one.
5822 *
5823 * But vq->dma_dev allows every vq has the respective dma dev. So I
5824 * check the dma dev of vq and sq is the same dev.
5825 */
5826 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
5827 return -EINVAL;
5828
5829 dma_dev = virtqueue_dma_dev(rq->vq);
5830 if (!dma_dev)
5831 return -EINVAL;
5832
5833 size = virtqueue_get_vring_size(rq->vq);
5834
5835 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL);
5836 if (!rq->xsk_buffs)
5837 return -ENOMEM;
5838
5839 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
5840 DMA_TO_DEVICE, 0);
5841 if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
5842 return -ENOMEM;
5843
5844 err = xsk_pool_dma_map(pool, dma_dev, 0);
5845 if (err)
5846 goto err_xsk_map;
5847
5848 err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
5849 if (err)
5850 goto err_rq;
5851
5852 err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
5853 if (err)
5854 goto err_sq;
5855
5856 /* Now, we do not support tx offload(such as tx csum), so all the tx
5857 * virtnet hdr is zero. So all the tx packets can share a single hdr.
5858 */
5859 sq->xsk_hdr_dma_addr = hdr_dma;
5860
5861 return 0;
5862
5863 err_sq:
5864 virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5865 err_rq:
5866 xsk_pool_dma_unmap(pool, 0);
5867 err_xsk_map:
5868 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
5869 DMA_TO_DEVICE, 0);
5870 return err;
5871 }
5872
virtnet_xsk_pool_disable(struct net_device * dev,u16 qid)5873 static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5874 {
5875 struct virtnet_info *vi = netdev_priv(dev);
5876 struct xsk_buff_pool *pool;
5877 struct receive_queue *rq;
5878 struct send_queue *sq;
5879 int err;
5880
5881 if (qid >= vi->curr_queue_pairs)
5882 return -EINVAL;
5883
5884 sq = &vi->sq[qid];
5885 rq = &vi->rq[qid];
5886
5887 pool = rq->xsk_pool;
5888
5889 err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5890 err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
5891
5892 xsk_pool_dma_unmap(pool, 0);
5893
5894 virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
5895 vi->hdr_len, DMA_TO_DEVICE, 0);
5896 kvfree(rq->xsk_buffs);
5897
5898 return err;
5899 }
5900
virtnet_xsk_pool_setup(struct net_device * dev,struct netdev_bpf * xdp)5901 static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
5902 {
5903 if (xdp->xsk.pool)
5904 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
5905 xdp->xsk.queue_id);
5906 else
5907 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
5908 }
5909
virtnet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)5910 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5911 struct netlink_ext_ack *extack)
5912 {
5913 unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
5914 sizeof(struct skb_shared_info));
5915 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
5916 struct virtnet_info *vi = netdev_priv(dev);
5917 struct bpf_prog *old_prog;
5918 u16 xdp_qp = 0, curr_qp;
5919 int i, err;
5920
5921 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
5922 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5923 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
5924 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
5925 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
5926 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
5927 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
5928 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
5929 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
5930 return -EOPNOTSUPP;
5931 }
5932
5933 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
5934 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
5935 return -EINVAL;
5936 }
5937
5938 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
5939 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
5940 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
5941 return -EINVAL;
5942 }
5943
5944 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
5945 if (prog)
5946 xdp_qp = nr_cpu_ids;
5947
5948 /* XDP requires extra queues for XDP_TX */
5949 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
5950 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
5951 curr_qp + xdp_qp, vi->max_queue_pairs);
5952 xdp_qp = 0;
5953 }
5954
5955 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
5956 if (!prog && !old_prog)
5957 return 0;
5958
5959 if (prog)
5960 bpf_prog_add(prog, vi->max_queue_pairs - 1);
5961
5962 /* Make sure NAPI is not using any XDP TX queues for RX. */
5963 if (netif_running(dev)) {
5964 for (i = 0; i < vi->max_queue_pairs; i++) {
5965 virtnet_napi_disable(&vi->rq[i]);
5966 virtnet_napi_tx_disable(&vi->sq[i]);
5967 }
5968 }
5969
5970 if (!prog) {
5971 for (i = 0; i < vi->max_queue_pairs; i++) {
5972 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
5973 if (i == 0)
5974 virtnet_restore_guest_offloads(vi);
5975 }
5976 synchronize_net();
5977 }
5978
5979 err = virtnet_set_queues(vi, curr_qp + xdp_qp);
5980 if (err)
5981 goto err;
5982 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
5983 vi->xdp_queue_pairs = xdp_qp;
5984
5985 if (prog) {
5986 vi->xdp_enabled = true;
5987 for (i = 0; i < vi->max_queue_pairs; i++) {
5988 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
5989 if (i == 0 && !old_prog)
5990 virtnet_clear_guest_offloads(vi);
5991 }
5992 if (!old_prog)
5993 xdp_features_set_redirect_target(dev, true);
5994 } else {
5995 xdp_features_clear_redirect_target(dev);
5996 vi->xdp_enabled = false;
5997 }
5998
5999 for (i = 0; i < vi->max_queue_pairs; i++) {
6000 if (old_prog)
6001 bpf_prog_put(old_prog);
6002 if (netif_running(dev)) {
6003 virtnet_napi_enable(&vi->rq[i]);
6004 virtnet_napi_tx_enable(&vi->sq[i]);
6005 }
6006 }
6007
6008 return 0;
6009
6010 err:
6011 if (!prog) {
6012 virtnet_clear_guest_offloads(vi);
6013 for (i = 0; i < vi->max_queue_pairs; i++)
6014 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
6015 }
6016
6017 if (netif_running(dev)) {
6018 for (i = 0; i < vi->max_queue_pairs; i++) {
6019 virtnet_napi_enable(&vi->rq[i]);
6020 virtnet_napi_tx_enable(&vi->sq[i]);
6021 }
6022 }
6023 if (prog)
6024 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
6025 return err;
6026 }
6027
virtnet_xdp(struct net_device * dev,struct netdev_bpf * xdp)6028 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
6029 {
6030 switch (xdp->command) {
6031 case XDP_SETUP_PROG:
6032 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
6033 case XDP_SETUP_XSK_POOL:
6034 return virtnet_xsk_pool_setup(dev, xdp);
6035 default:
6036 return -EINVAL;
6037 }
6038 }
6039
virtnet_get_phys_port_name(struct net_device * dev,char * buf,size_t len)6040 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
6041 size_t len)
6042 {
6043 struct virtnet_info *vi = netdev_priv(dev);
6044 int ret;
6045
6046 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
6047 return -EOPNOTSUPP;
6048
6049 ret = snprintf(buf, len, "sby");
6050 if (ret >= len)
6051 return -EOPNOTSUPP;
6052
6053 return 0;
6054 }
6055
virtnet_set_features(struct net_device * dev,netdev_features_t features)6056 static int virtnet_set_features(struct net_device *dev,
6057 netdev_features_t features)
6058 {
6059 struct virtnet_info *vi = netdev_priv(dev);
6060 u64 offloads;
6061 int err;
6062
6063 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
6064 if (vi->xdp_enabled)
6065 return -EBUSY;
6066
6067 if (features & NETIF_F_GRO_HW)
6068 offloads = vi->guest_offloads_capable;
6069 else
6070 offloads = vi->guest_offloads_capable &
6071 ~GUEST_OFFLOAD_GRO_HW_MASK;
6072
6073 err = virtnet_set_guest_offloads(vi, offloads);
6074 if (err)
6075 return err;
6076 vi->guest_offloads = offloads;
6077 }
6078
6079 if ((dev->features ^ features) & NETIF_F_RXHASH) {
6080 if (features & NETIF_F_RXHASH)
6081 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
6082 else
6083 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
6084
6085 if (!virtnet_commit_rss_command(vi))
6086 return -EINVAL;
6087 }
6088
6089 return 0;
6090 }
6091
virtnet_tx_timeout(struct net_device * dev,unsigned int txqueue)6092 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
6093 {
6094 struct virtnet_info *priv = netdev_priv(dev);
6095 struct send_queue *sq = &priv->sq[txqueue];
6096 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
6097
6098 u64_stats_update_begin(&sq->stats.syncp);
6099 u64_stats_inc(&sq->stats.tx_timeouts);
6100 u64_stats_update_end(&sq->stats.syncp);
6101
6102 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
6103 txqueue, sq->name, sq->vq->index, sq->vq->name,
6104 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
6105 }
6106
virtnet_init_irq_moder(struct virtnet_info * vi)6107 static int virtnet_init_irq_moder(struct virtnet_info *vi)
6108 {
6109 u8 profile_flags = 0, coal_flags = 0;
6110 int ret, i;
6111
6112 profile_flags |= DIM_PROFILE_RX;
6113 coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
6114 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
6115 DIM_CQ_PERIOD_MODE_START_FROM_EQE,
6116 0, virtnet_rx_dim_work, NULL);
6117
6118 if (ret)
6119 return ret;
6120
6121 for (i = 0; i < vi->max_queue_pairs; i++)
6122 net_dim_setting(vi->dev, &vi->rq[i].dim, false);
6123
6124 return 0;
6125 }
6126
virtnet_free_irq_moder(struct virtnet_info * vi)6127 static void virtnet_free_irq_moder(struct virtnet_info *vi)
6128 {
6129 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
6130 return;
6131
6132 rtnl_lock();
6133 net_dim_free_irq_moder(vi->dev);
6134 rtnl_unlock();
6135 }
6136
6137 static const struct net_device_ops virtnet_netdev = {
6138 .ndo_open = virtnet_open,
6139 .ndo_stop = virtnet_close,
6140 .ndo_start_xmit = start_xmit,
6141 .ndo_validate_addr = eth_validate_addr,
6142 .ndo_set_mac_address = virtnet_set_mac_address,
6143 .ndo_set_rx_mode = virtnet_set_rx_mode,
6144 .ndo_get_stats64 = virtnet_stats,
6145 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
6146 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
6147 .ndo_bpf = virtnet_xdp,
6148 .ndo_xdp_xmit = virtnet_xdp_xmit,
6149 .ndo_xsk_wakeup = virtnet_xsk_wakeup,
6150 .ndo_features_check = passthru_features_check,
6151 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
6152 .ndo_set_features = virtnet_set_features,
6153 .ndo_tx_timeout = virtnet_tx_timeout,
6154 };
6155
virtnet_config_changed_work(struct work_struct * work)6156 static void virtnet_config_changed_work(struct work_struct *work)
6157 {
6158 struct virtnet_info *vi =
6159 container_of(work, struct virtnet_info, config_work);
6160 u16 v;
6161
6162 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
6163 struct virtio_net_config, status, &v) < 0)
6164 return;
6165
6166 if (v & VIRTIO_NET_S_ANNOUNCE) {
6167 netdev_notify_peers(vi->dev);
6168 virtnet_ack_link_announce(vi);
6169 }
6170
6171 /* Ignore unknown (future) status bits */
6172 v &= VIRTIO_NET_S_LINK_UP;
6173
6174 if (vi->status == v)
6175 return;
6176
6177 vi->status = v;
6178
6179 if (vi->status & VIRTIO_NET_S_LINK_UP) {
6180 virtnet_update_settings(vi);
6181 netif_carrier_on(vi->dev);
6182 netif_tx_wake_all_queues(vi->dev);
6183 } else {
6184 netif_carrier_off(vi->dev);
6185 netif_tx_stop_all_queues(vi->dev);
6186 }
6187 }
6188
virtnet_config_changed(struct virtio_device * vdev)6189 static void virtnet_config_changed(struct virtio_device *vdev)
6190 {
6191 struct virtnet_info *vi = vdev->priv;
6192
6193 schedule_work(&vi->config_work);
6194 }
6195
virtnet_free_queues(struct virtnet_info * vi)6196 static void virtnet_free_queues(struct virtnet_info *vi)
6197 {
6198 int i;
6199
6200 for (i = 0; i < vi->max_queue_pairs; i++) {
6201 __netif_napi_del(&vi->rq[i].napi);
6202 __netif_napi_del(&vi->sq[i].napi);
6203 }
6204
6205 /* We called __netif_napi_del(),
6206 * we need to respect an RCU grace period before freeing vi->rq
6207 */
6208 synchronize_net();
6209
6210 kfree(vi->rq);
6211 kfree(vi->sq);
6212 kfree(vi->ctrl);
6213 }
6214
_free_receive_bufs(struct virtnet_info * vi)6215 static void _free_receive_bufs(struct virtnet_info *vi)
6216 {
6217 struct bpf_prog *old_prog;
6218 int i;
6219
6220 for (i = 0; i < vi->max_queue_pairs; i++) {
6221 while (vi->rq[i].pages)
6222 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
6223
6224 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
6225 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
6226 if (old_prog)
6227 bpf_prog_put(old_prog);
6228 }
6229 }
6230
free_receive_bufs(struct virtnet_info * vi)6231 static void free_receive_bufs(struct virtnet_info *vi)
6232 {
6233 rtnl_lock();
6234 _free_receive_bufs(vi);
6235 rtnl_unlock();
6236 }
6237
free_receive_page_frags(struct virtnet_info * vi)6238 static void free_receive_page_frags(struct virtnet_info *vi)
6239 {
6240 int i;
6241 for (i = 0; i < vi->max_queue_pairs; i++)
6242 if (vi->rq[i].alloc_frag.page) {
6243 if (vi->rq[i].last_dma)
6244 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
6245 put_page(vi->rq[i].alloc_frag.page);
6246 }
6247 }
6248
virtnet_sq_free_unused_buf(struct virtqueue * vq,void * buf)6249 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
6250 {
6251 struct virtnet_info *vi = vq->vdev->priv;
6252 struct send_queue *sq;
6253 int i = vq2txq(vq);
6254
6255 sq = &vi->sq[i];
6256
6257 switch (virtnet_xmit_ptr_unpack(&buf)) {
6258 case VIRTNET_XMIT_TYPE_SKB:
6259 case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
6260 dev_kfree_skb(buf);
6261 break;
6262
6263 case VIRTNET_XMIT_TYPE_XDP:
6264 xdp_return_frame(buf);
6265 break;
6266
6267 case VIRTNET_XMIT_TYPE_XSK:
6268 xsk_tx_completed(sq->xsk_pool, 1);
6269 break;
6270 }
6271 }
6272
virtnet_sq_free_unused_buf_done(struct virtqueue * vq)6273 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
6274 {
6275 struct virtnet_info *vi = vq->vdev->priv;
6276 int i = vq2txq(vq);
6277
6278 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
6279 }
6280
free_unused_bufs(struct virtnet_info * vi)6281 static void free_unused_bufs(struct virtnet_info *vi)
6282 {
6283 void *buf;
6284 int i;
6285
6286 for (i = 0; i < vi->max_queue_pairs; i++) {
6287 struct virtqueue *vq = vi->sq[i].vq;
6288 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6289 virtnet_sq_free_unused_buf(vq, buf);
6290 cond_resched();
6291 }
6292
6293 for (i = 0; i < vi->max_queue_pairs; i++) {
6294 struct virtqueue *vq = vi->rq[i].vq;
6295
6296 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6297 virtnet_rq_unmap_free_buf(vq, buf);
6298 cond_resched();
6299 }
6300 }
6301
virtnet_del_vqs(struct virtnet_info * vi)6302 static void virtnet_del_vqs(struct virtnet_info *vi)
6303 {
6304 struct virtio_device *vdev = vi->vdev;
6305
6306 virtnet_clean_affinity(vi);
6307
6308 vdev->config->del_vqs(vdev);
6309
6310 virtnet_free_queues(vi);
6311 }
6312
6313 /* How large should a single buffer be so a queue full of these can fit at
6314 * least one full packet?
6315 * Logic below assumes the mergeable buffer header is used.
6316 */
mergeable_min_buf_len(struct virtnet_info * vi,struct virtqueue * vq)6317 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
6318 {
6319 const unsigned int hdr_len = vi->hdr_len;
6320 unsigned int rq_size = virtqueue_get_vring_size(vq);
6321 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
6322 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
6323 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
6324
6325 return max(max(min_buf_len, hdr_len) - hdr_len,
6326 (unsigned int)GOOD_PACKET_LEN);
6327 }
6328
virtnet_find_vqs(struct virtnet_info * vi)6329 static int virtnet_find_vqs(struct virtnet_info *vi)
6330 {
6331 struct virtqueue_info *vqs_info;
6332 struct virtqueue **vqs;
6333 int ret = -ENOMEM;
6334 int total_vqs;
6335 bool *ctx;
6336 u16 i;
6337
6338 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
6339 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
6340 * possible control vq.
6341 */
6342 total_vqs = vi->max_queue_pairs * 2 +
6343 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
6344
6345 /* Allocate space for find_vqs parameters */
6346 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
6347 if (!vqs)
6348 goto err_vq;
6349 vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
6350 if (!vqs_info)
6351 goto err_vqs_info;
6352 if (!vi->big_packets || vi->mergeable_rx_bufs) {
6353 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
6354 if (!ctx)
6355 goto err_ctx;
6356 } else {
6357 ctx = NULL;
6358 }
6359
6360 /* Parameters for control virtqueue, if any */
6361 if (vi->has_cvq) {
6362 vqs_info[total_vqs - 1].name = "control";
6363 }
6364
6365 /* Allocate/initialize parameters for send/receive virtqueues */
6366 for (i = 0; i < vi->max_queue_pairs; i++) {
6367 vqs_info[rxq2vq(i)].callback = skb_recv_done;
6368 vqs_info[txq2vq(i)].callback = skb_xmit_done;
6369 sprintf(vi->rq[i].name, "input.%u", i);
6370 sprintf(vi->sq[i].name, "output.%u", i);
6371 vqs_info[rxq2vq(i)].name = vi->rq[i].name;
6372 vqs_info[txq2vq(i)].name = vi->sq[i].name;
6373 if (ctx)
6374 vqs_info[rxq2vq(i)].ctx = true;
6375 }
6376
6377 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL);
6378 if (ret)
6379 goto err_find;
6380
6381 if (vi->has_cvq) {
6382 vi->cvq = vqs[total_vqs - 1];
6383 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
6384 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6385 }
6386
6387 for (i = 0; i < vi->max_queue_pairs; i++) {
6388 vi->rq[i].vq = vqs[rxq2vq(i)];
6389 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
6390 vi->sq[i].vq = vqs[txq2vq(i)];
6391 }
6392
6393 /* run here: ret == 0. */
6394
6395
6396 err_find:
6397 kfree(ctx);
6398 err_ctx:
6399 kfree(vqs_info);
6400 err_vqs_info:
6401 kfree(vqs);
6402 err_vq:
6403 return ret;
6404 }
6405
virtnet_alloc_queues(struct virtnet_info * vi)6406 static int virtnet_alloc_queues(struct virtnet_info *vi)
6407 {
6408 int i;
6409
6410 if (vi->has_cvq) {
6411 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
6412 if (!vi->ctrl)
6413 goto err_ctrl;
6414 } else {
6415 vi->ctrl = NULL;
6416 }
6417 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
6418 if (!vi->sq)
6419 goto err_sq;
6420 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
6421 if (!vi->rq)
6422 goto err_rq;
6423
6424 INIT_DELAYED_WORK(&vi->refill, refill_work);
6425 for (i = 0; i < vi->max_queue_pairs; i++) {
6426 vi->rq[i].pages = NULL;
6427 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
6428 i);
6429 vi->rq[i].napi.weight = napi_weight;
6430 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
6431 virtnet_poll_tx,
6432 napi_tx ? napi_weight : 0);
6433
6434 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
6435 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
6436 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
6437
6438 u64_stats_init(&vi->rq[i].stats.syncp);
6439 u64_stats_init(&vi->sq[i].stats.syncp);
6440 mutex_init(&vi->rq[i].dim_lock);
6441 }
6442
6443 return 0;
6444
6445 err_rq:
6446 kfree(vi->sq);
6447 err_sq:
6448 kfree(vi->ctrl);
6449 err_ctrl:
6450 return -ENOMEM;
6451 }
6452
init_vqs(struct virtnet_info * vi)6453 static int init_vqs(struct virtnet_info *vi)
6454 {
6455 int ret;
6456
6457 /* Allocate send & receive queues */
6458 ret = virtnet_alloc_queues(vi);
6459 if (ret)
6460 goto err;
6461
6462 ret = virtnet_find_vqs(vi);
6463 if (ret)
6464 goto err_free;
6465
6466 cpus_read_lock();
6467 virtnet_set_affinity(vi);
6468 cpus_read_unlock();
6469
6470 return 0;
6471
6472 err_free:
6473 virtnet_free_queues(vi);
6474 err:
6475 return ret;
6476 }
6477
6478 #ifdef CONFIG_SYSFS
mergeable_rx_buffer_size_show(struct netdev_rx_queue * queue,char * buf)6479 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
6480 char *buf)
6481 {
6482 struct virtnet_info *vi = netdev_priv(queue->dev);
6483 unsigned int queue_index = get_netdev_rx_queue_index(queue);
6484 unsigned int headroom = virtnet_get_headroom(vi);
6485 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
6486 struct ewma_pkt_len *avg;
6487
6488 BUG_ON(queue_index >= vi->max_queue_pairs);
6489 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
6490 return sprintf(buf, "%u\n",
6491 get_mergeable_buf_len(&vi->rq[queue_index], avg,
6492 SKB_DATA_ALIGN(headroom + tailroom)));
6493 }
6494
6495 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
6496 __ATTR_RO(mergeable_rx_buffer_size);
6497
6498 static struct attribute *virtio_net_mrg_rx_attrs[] = {
6499 &mergeable_rx_buffer_size_attribute.attr,
6500 NULL
6501 };
6502
6503 static const struct attribute_group virtio_net_mrg_rx_group = {
6504 .name = "virtio_net",
6505 .attrs = virtio_net_mrg_rx_attrs
6506 };
6507 #endif
6508
virtnet_fail_on_feature(struct virtio_device * vdev,unsigned int fbit,const char * fname,const char * dname)6509 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
6510 unsigned int fbit,
6511 const char *fname, const char *dname)
6512 {
6513 if (!virtio_has_feature(vdev, fbit))
6514 return false;
6515
6516 dev_err(&vdev->dev, "device advertises feature %s but not %s",
6517 fname, dname);
6518
6519 return true;
6520 }
6521
6522 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
6523 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
6524
virtnet_validate_features(struct virtio_device * vdev)6525 static bool virtnet_validate_features(struct virtio_device *vdev)
6526 {
6527 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
6528 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
6529 "VIRTIO_NET_F_CTRL_VQ") ||
6530 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
6531 "VIRTIO_NET_F_CTRL_VQ") ||
6532 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
6533 "VIRTIO_NET_F_CTRL_VQ") ||
6534 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
6535 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
6536 "VIRTIO_NET_F_CTRL_VQ") ||
6537 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
6538 "VIRTIO_NET_F_CTRL_VQ") ||
6539 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
6540 "VIRTIO_NET_F_CTRL_VQ") ||
6541 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
6542 "VIRTIO_NET_F_CTRL_VQ") ||
6543 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
6544 "VIRTIO_NET_F_CTRL_VQ"))) {
6545 return false;
6546 }
6547
6548 return true;
6549 }
6550
6551 #define MIN_MTU ETH_MIN_MTU
6552 #define MAX_MTU ETH_MAX_MTU
6553
virtnet_validate(struct virtio_device * vdev)6554 static int virtnet_validate(struct virtio_device *vdev)
6555 {
6556 if (!vdev->config->get) {
6557 dev_err(&vdev->dev, "%s failure: config access disabled\n",
6558 __func__);
6559 return -EINVAL;
6560 }
6561
6562 if (!virtnet_validate_features(vdev))
6563 return -EINVAL;
6564
6565 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6566 int mtu = virtio_cread16(vdev,
6567 offsetof(struct virtio_net_config,
6568 mtu));
6569 if (mtu < MIN_MTU)
6570 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
6571 }
6572
6573 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
6574 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6575 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
6576 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
6577 }
6578
6579 return 0;
6580 }
6581
virtnet_check_guest_gso(const struct virtnet_info * vi)6582 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
6583 {
6584 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6585 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
6586 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
6587 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
6588 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
6589 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
6590 }
6591
virtnet_set_big_packets(struct virtnet_info * vi,const int mtu)6592 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
6593 {
6594 bool guest_gso = virtnet_check_guest_gso(vi);
6595
6596 /* If device can receive ANY guest GSO packets, regardless of mtu,
6597 * allocate packets of maximum size, otherwise limit it to only
6598 * mtu size worth only.
6599 */
6600 if (mtu > ETH_DATA_LEN || guest_gso) {
6601 vi->big_packets = true;
6602 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
6603 }
6604 }
6605
6606 #define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10
6607 static enum xdp_rss_hash_type
6608 virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
6609 [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
6610 [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
6611 [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6612 [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6613 [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
6614 [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6615 [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6616 [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6617 [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6618 [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
6619 };
6620
virtnet_xdp_rx_hash(const struct xdp_md * _ctx,u32 * hash,enum xdp_rss_hash_type * rss_type)6621 static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6622 enum xdp_rss_hash_type *rss_type)
6623 {
6624 const struct xdp_buff *xdp = (void *)_ctx;
6625 struct virtio_net_hdr_v1_hash *hdr_hash;
6626 struct virtnet_info *vi;
6627 u16 hash_report;
6628
6629 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
6630 return -ENODATA;
6631
6632 vi = netdev_priv(xdp->rxq->dev);
6633 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
6634 hash_report = __le16_to_cpu(hdr_hash->hash_report);
6635
6636 if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
6637 hash_report = VIRTIO_NET_HASH_REPORT_NONE;
6638
6639 *rss_type = virtnet_xdp_rss_type[hash_report];
6640 *hash = __le32_to_cpu(hdr_hash->hash_value);
6641 return 0;
6642 }
6643
6644 static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
6645 .xmo_rx_hash = virtnet_xdp_rx_hash,
6646 };
6647
virtnet_probe(struct virtio_device * vdev)6648 static int virtnet_probe(struct virtio_device *vdev)
6649 {
6650 int i, err = -ENOMEM;
6651 struct net_device *dev;
6652 struct virtnet_info *vi;
6653 u16 max_queue_pairs;
6654 int mtu = 0;
6655
6656 /* Find if host supports multiqueue/rss virtio_net device */
6657 max_queue_pairs = 1;
6658 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
6659 max_queue_pairs =
6660 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
6661
6662 /* We need at least 2 queue's */
6663 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
6664 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
6665 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6666 max_queue_pairs = 1;
6667
6668 /* Allocate ourselves a network device with room for our info */
6669 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
6670 if (!dev)
6671 return -ENOMEM;
6672
6673 /* Set up network device as normal. */
6674 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
6675 IFF_TX_SKB_NO_LINEAR;
6676 dev->netdev_ops = &virtnet_netdev;
6677 dev->stat_ops = &virtnet_stat_ops;
6678 dev->features = NETIF_F_HIGHDMA;
6679
6680 dev->ethtool_ops = &virtnet_ethtool_ops;
6681 SET_NETDEV_DEV(dev, &vdev->dev);
6682
6683 /* Do we support "hardware" checksums? */
6684 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
6685 /* This opens up the world of extra features. */
6686 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6687 if (csum)
6688 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6689
6690 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
6691 dev->hw_features |= NETIF_F_TSO
6692 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
6693 }
6694 /* Individual feature bits: what can host handle? */
6695 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
6696 dev->hw_features |= NETIF_F_TSO;
6697 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
6698 dev->hw_features |= NETIF_F_TSO6;
6699 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
6700 dev->hw_features |= NETIF_F_TSO_ECN;
6701 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
6702 dev->hw_features |= NETIF_F_GSO_UDP_L4;
6703
6704 dev->features |= NETIF_F_GSO_ROBUST;
6705
6706 if (gso)
6707 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
6708 /* (!csum && gso) case will be fixed by register_netdev() */
6709 }
6710
6711 /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
6712 * need to calculate checksums for partially checksummed packets,
6713 * as they're considered valid by the upper layer.
6714 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
6715 * receives fully checksummed packets. The device may assist in
6716 * validating these packets' checksums, so the driver won't have to.
6717 */
6718 dev->features |= NETIF_F_RXCSUM;
6719
6720 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6721 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
6722 dev->features |= NETIF_F_GRO_HW;
6723 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
6724 dev->hw_features |= NETIF_F_GRO_HW;
6725
6726 dev->vlan_features = dev->features;
6727 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6728 NETDEV_XDP_ACT_XSK_ZEROCOPY;
6729
6730 /* MTU range: 68 - 65535 */
6731 dev->min_mtu = MIN_MTU;
6732 dev->max_mtu = MAX_MTU;
6733
6734 /* Configuration may specify what MAC to use. Otherwise random. */
6735 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6736 u8 addr[ETH_ALEN];
6737
6738 virtio_cread_bytes(vdev,
6739 offsetof(struct virtio_net_config, mac),
6740 addr, ETH_ALEN);
6741 eth_hw_addr_set(dev, addr);
6742 } else {
6743 eth_hw_addr_random(dev);
6744 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
6745 dev->dev_addr);
6746 }
6747
6748 /* Set up our device-specific information */
6749 vi = netdev_priv(dev);
6750 vi->dev = dev;
6751 vi->vdev = vdev;
6752 vdev->priv = vi;
6753
6754 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
6755 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
6756 spin_lock_init(&vi->refill_lock);
6757
6758 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
6759 vi->mergeable_rx_bufs = true;
6760 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
6761 }
6762
6763 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
6764 vi->has_rss_hash_report = true;
6765
6766 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
6767 vi->has_rss = true;
6768
6769 vi->rss_indir_table_size =
6770 virtio_cread16(vdev, offsetof(struct virtio_net_config,
6771 rss_max_indirection_table_length));
6772 }
6773 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
6774 if (!vi->rss_hdr) {
6775 err = -ENOMEM;
6776 goto free;
6777 }
6778
6779 if (vi->has_rss || vi->has_rss_hash_report) {
6780 vi->rss_key_size =
6781 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
6782 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
6783 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
6784 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
6785 err = -EINVAL;
6786 goto free;
6787 }
6788
6789 vi->rss_hash_types_supported =
6790 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
6791 vi->rss_hash_types_supported &=
6792 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
6793 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
6794 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
6795
6796 dev->hw_features |= NETIF_F_RXHASH;
6797 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
6798 }
6799
6800 if (vi->has_rss_hash_report)
6801 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
6802 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
6803 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6804 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
6805 else
6806 vi->hdr_len = sizeof(struct virtio_net_hdr);
6807
6808 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
6809 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6810 vi->any_header_sg = true;
6811
6812 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6813 vi->has_cvq = true;
6814
6815 mutex_init(&vi->cvq_lock);
6816
6817 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6818 mtu = virtio_cread16(vdev,
6819 offsetof(struct virtio_net_config,
6820 mtu));
6821 if (mtu < dev->min_mtu) {
6822 /* Should never trigger: MTU was previously validated
6823 * in virtnet_validate.
6824 */
6825 dev_err(&vdev->dev,
6826 "device MTU appears to have changed it is now %d < %d",
6827 mtu, dev->min_mtu);
6828 err = -EINVAL;
6829 goto free;
6830 }
6831
6832 dev->mtu = mtu;
6833 dev->max_mtu = mtu;
6834 }
6835
6836 virtnet_set_big_packets(vi, mtu);
6837
6838 if (vi->any_header_sg)
6839 dev->needed_headroom = vi->hdr_len;
6840
6841 /* Enable multiqueue by default */
6842 if (num_online_cpus() >= max_queue_pairs)
6843 vi->curr_queue_pairs = max_queue_pairs;
6844 else
6845 vi->curr_queue_pairs = num_online_cpus();
6846 vi->max_queue_pairs = max_queue_pairs;
6847
6848 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
6849 err = init_vqs(vi);
6850 if (err)
6851 goto free;
6852
6853 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
6854 vi->intr_coal_rx.max_usecs = 0;
6855 vi->intr_coal_tx.max_usecs = 0;
6856 vi->intr_coal_rx.max_packets = 0;
6857
6858 /* Keep the default values of the coalescing parameters
6859 * aligned with the default napi_tx state.
6860 */
6861 if (vi->sq[0].napi.weight)
6862 vi->intr_coal_tx.max_packets = 1;
6863 else
6864 vi->intr_coal_tx.max_packets = 0;
6865 }
6866
6867 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
6868 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
6869 for (i = 0; i < vi->max_queue_pairs; i++)
6870 if (vi->sq[i].napi.weight)
6871 vi->sq[i].intr_coal.max_packets = 1;
6872
6873 err = virtnet_init_irq_moder(vi);
6874 if (err)
6875 goto free;
6876 }
6877
6878 #ifdef CONFIG_SYSFS
6879 if (vi->mergeable_rx_bufs)
6880 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
6881 #endif
6882 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
6883 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
6884
6885 virtnet_init_settings(dev);
6886
6887 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
6888 vi->failover = net_failover_create(vi->dev);
6889 if (IS_ERR(vi->failover)) {
6890 err = PTR_ERR(vi->failover);
6891 goto free_vqs;
6892 }
6893 }
6894
6895 if (vi->has_rss || vi->has_rss_hash_report)
6896 virtnet_init_default_rss(vi);
6897
6898 enable_rx_mode_work(vi);
6899
6900 /* serialize netdev register + virtio_device_ready() with ndo_open() */
6901 rtnl_lock();
6902
6903 err = register_netdevice(dev);
6904 if (err) {
6905 pr_debug("virtio_net: registering device failed\n");
6906 rtnl_unlock();
6907 goto free_failover;
6908 }
6909
6910 /* Disable config change notification until ndo_open. */
6911 virtio_config_driver_disable(vi->vdev);
6912
6913 virtio_device_ready(vdev);
6914
6915 if (vi->has_rss || vi->has_rss_hash_report) {
6916 if (!virtnet_commit_rss_command(vi)) {
6917 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
6918 dev->hw_features &= ~NETIF_F_RXHASH;
6919 vi->has_rss_hash_report = false;
6920 vi->has_rss = false;
6921 }
6922 }
6923
6924 virtnet_set_queues(vi, vi->curr_queue_pairs);
6925
6926 /* a random MAC address has been assigned, notify the device.
6927 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
6928 * because many devices work fine without getting MAC explicitly
6929 */
6930 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
6931 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
6932 struct scatterlist sg;
6933
6934 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
6935 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
6936 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
6937 pr_debug("virtio_net: setting MAC address failed\n");
6938 rtnl_unlock();
6939 err = -EINVAL;
6940 goto free_unregister_netdev;
6941 }
6942 }
6943
6944 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
6945 struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL;
6946 struct scatterlist sg;
6947 __le64 v;
6948
6949 stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
6950 if (!stats_cap) {
6951 rtnl_unlock();
6952 err = -ENOMEM;
6953 goto free_unregister_netdev;
6954 }
6955
6956 sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
6957
6958 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
6959 VIRTIO_NET_CTRL_STATS_QUERY,
6960 NULL, &sg)) {
6961 pr_debug("virtio_net: fail to get stats capability\n");
6962 rtnl_unlock();
6963 err = -EINVAL;
6964 goto free_unregister_netdev;
6965 }
6966
6967 v = stats_cap->supported_stats_types[0];
6968 vi->device_stats_cap = le64_to_cpu(v);
6969 }
6970
6971 /* Assume link up if device can't report link status,
6972 otherwise get link status from config. */
6973 netif_carrier_off(dev);
6974 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
6975 virtnet_config_changed_work(&vi->config_work);
6976 } else {
6977 vi->status = VIRTIO_NET_S_LINK_UP;
6978 virtnet_update_settings(vi);
6979 netif_carrier_on(dev);
6980 }
6981
6982 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
6983 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
6984 set_bit(guest_offloads[i], &vi->guest_offloads);
6985 vi->guest_offloads_capable = vi->guest_offloads;
6986
6987 rtnl_unlock();
6988
6989 err = virtnet_cpu_notif_add(vi);
6990 if (err) {
6991 pr_debug("virtio_net: registering cpu notifier failed\n");
6992 goto free_unregister_netdev;
6993 }
6994
6995 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
6996 dev->name, max_queue_pairs);
6997
6998 return 0;
6999
7000 free_unregister_netdev:
7001 unregister_netdev(dev);
7002 free_failover:
7003 net_failover_destroy(vi->failover);
7004 free_vqs:
7005 virtio_reset_device(vdev);
7006 cancel_delayed_work_sync(&vi->refill);
7007 free_receive_page_frags(vi);
7008 virtnet_del_vqs(vi);
7009 free:
7010 free_netdev(dev);
7011 return err;
7012 }
7013
remove_vq_common(struct virtnet_info * vi)7014 static void remove_vq_common(struct virtnet_info *vi)
7015 {
7016 int i;
7017
7018 virtio_reset_device(vi->vdev);
7019
7020 /* Free unused buffers in both send and recv, if any. */
7021 free_unused_bufs(vi);
7022
7023 /*
7024 * Rule of thumb is netdev_tx_reset_queue() should follow any
7025 * skb freeing not followed by netdev_tx_completed_queue()
7026 */
7027 for (i = 0; i < vi->max_queue_pairs; i++)
7028 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
7029
7030 free_receive_bufs(vi);
7031
7032 free_receive_page_frags(vi);
7033
7034 virtnet_del_vqs(vi);
7035 }
7036
virtnet_remove(struct virtio_device * vdev)7037 static void virtnet_remove(struct virtio_device *vdev)
7038 {
7039 struct virtnet_info *vi = vdev->priv;
7040
7041 virtnet_cpu_notif_remove(vi);
7042
7043 /* Make sure no work handler is accessing the device. */
7044 flush_work(&vi->config_work);
7045 disable_rx_mode_work(vi);
7046 flush_work(&vi->rx_mode_work);
7047
7048 virtnet_free_irq_moder(vi);
7049
7050 unregister_netdev(vi->dev);
7051
7052 net_failover_destroy(vi->failover);
7053
7054 remove_vq_common(vi);
7055
7056 free_netdev(vi->dev);
7057 }
7058
virtnet_freeze(struct virtio_device * vdev)7059 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
7060 {
7061 struct virtnet_info *vi = vdev->priv;
7062
7063 virtnet_cpu_notif_remove(vi);
7064 virtnet_freeze_down(vdev);
7065 remove_vq_common(vi);
7066
7067 return 0;
7068 }
7069
virtnet_restore(struct virtio_device * vdev)7070 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
7071 {
7072 struct virtnet_info *vi = vdev->priv;
7073 int err;
7074
7075 err = virtnet_restore_up(vdev);
7076 if (err)
7077 return err;
7078 virtnet_set_queues(vi, vi->curr_queue_pairs);
7079
7080 err = virtnet_cpu_notif_add(vi);
7081 if (err) {
7082 virtnet_freeze_down(vdev);
7083 remove_vq_common(vi);
7084 return err;
7085 }
7086
7087 return 0;
7088 }
7089
7090 static struct virtio_device_id id_table[] = {
7091 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
7092 { 0 },
7093 };
7094
7095 #define VIRTNET_FEATURES \
7096 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
7097 VIRTIO_NET_F_MAC, \
7098 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
7099 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
7100 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
7101 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
7102 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
7103 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
7104 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
7105 VIRTIO_NET_F_CTRL_MAC_ADDR, \
7106 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
7107 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
7108 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
7109 VIRTIO_NET_F_VQ_NOTF_COAL, \
7110 VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
7111
7112 static unsigned int features[] = {
7113 VIRTNET_FEATURES,
7114 };
7115
7116 static unsigned int features_legacy[] = {
7117 VIRTNET_FEATURES,
7118 VIRTIO_NET_F_GSO,
7119 VIRTIO_F_ANY_LAYOUT,
7120 };
7121
7122 static struct virtio_driver virtio_net_driver = {
7123 .feature_table = features,
7124 .feature_table_size = ARRAY_SIZE(features),
7125 .feature_table_legacy = features_legacy,
7126 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
7127 .driver.name = KBUILD_MODNAME,
7128 .id_table = id_table,
7129 .validate = virtnet_validate,
7130 .probe = virtnet_probe,
7131 .remove = virtnet_remove,
7132 .config_changed = virtnet_config_changed,
7133 #ifdef CONFIG_PM_SLEEP
7134 .freeze = virtnet_freeze,
7135 .restore = virtnet_restore,
7136 #endif
7137 };
7138
virtio_net_driver_init(void)7139 static __init int virtio_net_driver_init(void)
7140 {
7141 int ret;
7142
7143 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
7144 virtnet_cpu_online,
7145 virtnet_cpu_down_prep);
7146 if (ret < 0)
7147 goto out;
7148 virtionet_online = ret;
7149 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
7150 NULL, virtnet_cpu_dead);
7151 if (ret)
7152 goto err_dead;
7153 ret = register_virtio_driver(&virtio_net_driver);
7154 if (ret)
7155 goto err_virtio;
7156 return 0;
7157 err_virtio:
7158 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7159 err_dead:
7160 cpuhp_remove_multi_state(virtionet_online);
7161 out:
7162 return ret;
7163 }
7164 module_init(virtio_net_driver_init);
7165
virtio_net_driver_exit(void)7166 static __exit void virtio_net_driver_exit(void)
7167 {
7168 unregister_virtio_driver(&virtio_net_driver);
7169 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7170 cpuhp_remove_multi_state(virtionet_online);
7171 }
7172 module_exit(virtio_net_driver_exit);
7173
7174 MODULE_DEVICE_TABLE(virtio, id_table);
7175 MODULE_DESCRIPTION("Virtio network driver");
7176 MODULE_LICENSE("GPL");
7177