xref: /linux/drivers/net/virtio_net.c (revision 2ed4b46b4fc77749cb0f8dd31a01441b82c8dbaa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <linux/dim.h>
23 #include <net/route.h>
24 #include <net/xdp.h>
25 #include <net/net_failover.h>
26 #include <net/netdev_rx_queue.h>
27 #include <net/netdev_queues.h>
28 #include <net/xdp_sock_drv.h>
29 
30 static int napi_weight = NAPI_POLL_WEIGHT;
31 module_param(napi_weight, int, 0444);
32 
33 static bool csum = true, gso = true, napi_tx = true;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36 module_param(napi_tx, bool, 0644);
37 
38 #define VIRTIO_OFFLOAD_MAP_MIN	46
39 #define VIRTIO_OFFLOAD_MAP_MAX	47
40 #define VIRTIO_FEATURES_MAP_MIN	65
41 #define VIRTIO_O2F_DELTA	(VIRTIO_FEATURES_MAP_MIN - \
42 				 VIRTIO_OFFLOAD_MAP_MIN)
43 
44 static bool virtio_is_mapped_offload(unsigned int obit)
45 {
46 	return obit >= VIRTIO_OFFLOAD_MAP_MIN &&
47 	       obit <= VIRTIO_OFFLOAD_MAP_MAX;
48 }
49 
50 static unsigned int virtio_offload_to_feature(unsigned int obit)
51 {
52 	return virtio_is_mapped_offload(obit) ? obit + VIRTIO_O2F_DELTA : obit;
53 }
54 
55 /* FIXME: MTU in config. */
56 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
57 #define GOOD_COPY_LEN	128
58 
59 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
60 
61 /* Separating two types of XDP xmit */
62 #define VIRTIO_XDP_TX		BIT(0)
63 #define VIRTIO_XDP_REDIR	BIT(1)
64 
65 /* RX packet size EWMA. The average packet size is used to determine the packet
66  * buffer size when refilling RX rings. As the entire RX ring may be refilled
67  * at once, the weight is chosen so that the EWMA will be insensitive to short-
68  * term, transient changes in packet size.
69  */
70 DECLARE_EWMA(pkt_len, 0, 64)
71 
72 #define VIRTNET_DRIVER_VERSION "1.0.0"
73 
74 static const unsigned long guest_offloads[] = {
75 	VIRTIO_NET_F_GUEST_TSO4,
76 	VIRTIO_NET_F_GUEST_TSO6,
77 	VIRTIO_NET_F_GUEST_ECN,
78 	VIRTIO_NET_F_GUEST_UFO,
79 	VIRTIO_NET_F_GUEST_CSUM,
80 	VIRTIO_NET_F_GUEST_USO4,
81 	VIRTIO_NET_F_GUEST_USO6,
82 	VIRTIO_NET_F_GUEST_HDRLEN,
83 	VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED,
84 	VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED,
85 };
86 
87 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
88 			(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
89 			(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
90 			(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
91 			(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
92 			(1ULL << VIRTIO_NET_F_GUEST_USO6) | \
93 			(1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | \
94 			(1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED))
95 
96 struct virtnet_stat_desc {
97 	char desc[ETH_GSTRING_LEN];
98 	size_t offset;
99 	size_t qstat_offset;
100 };
101 
102 struct virtnet_sq_free_stats {
103 	u64 packets;
104 	u64 bytes;
105 	u64 napi_packets;
106 	u64 napi_bytes;
107 	u64 xsk;
108 };
109 
110 struct virtnet_sq_stats {
111 	struct u64_stats_sync syncp;
112 	u64_stats_t packets;
113 	u64_stats_t bytes;
114 	u64_stats_t xdp_tx;
115 	u64_stats_t xdp_tx_drops;
116 	u64_stats_t kicks;
117 	u64_stats_t tx_timeouts;
118 	u64_stats_t stop;
119 	u64_stats_t wake;
120 };
121 
122 struct virtnet_rq_stats {
123 	struct u64_stats_sync syncp;
124 	u64_stats_t packets;
125 	u64_stats_t bytes;
126 	u64_stats_t drops;
127 	u64_stats_t xdp_packets;
128 	u64_stats_t xdp_tx;
129 	u64_stats_t xdp_redirects;
130 	u64_stats_t xdp_drops;
131 	u64_stats_t kicks;
132 };
133 
134 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
135 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
136 
137 #define VIRTNET_SQ_STAT_QSTAT(name, m)				\
138 	{							\
139 		name,						\
140 		offsetof(struct virtnet_sq_stats, m),		\
141 		offsetof(struct netdev_queue_stats_tx, m),	\
142 	}
143 
144 #define VIRTNET_RQ_STAT_QSTAT(name, m)				\
145 	{							\
146 		name,						\
147 		offsetof(struct virtnet_rq_stats, m),		\
148 		offsetof(struct netdev_queue_stats_rx, m),	\
149 	}
150 
151 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
152 	VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
153 	VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
154 	VIRTNET_SQ_STAT("kicks",        kicks),
155 	VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),
156 };
157 
158 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
159 	VIRTNET_RQ_STAT("drops",         drops),
160 	VIRTNET_RQ_STAT("xdp_packets",   xdp_packets),
161 	VIRTNET_RQ_STAT("xdp_tx",        xdp_tx),
162 	VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
163 	VIRTNET_RQ_STAT("xdp_drops",     xdp_drops),
164 	VIRTNET_RQ_STAT("kicks",         kicks),
165 };
166 
167 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
168 	VIRTNET_SQ_STAT_QSTAT("packets", packets),
169 	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
170 	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
171 	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
172 };
173 
174 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
175 	VIRTNET_RQ_STAT_QSTAT("packets", packets),
176 	VIRTNET_RQ_STAT_QSTAT("bytes",   bytes),
177 };
178 
179 #define VIRTNET_STATS_DESC_CQ(name) \
180 	{#name, offsetof(struct virtio_net_stats_cvq, name), -1}
181 
182 #define VIRTNET_STATS_DESC_RX(class, name) \
183 	{#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
184 
185 #define VIRTNET_STATS_DESC_TX(class, name) \
186 	{#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
187 
188 
189 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
190 	VIRTNET_STATS_DESC_CQ(command_num),
191 	VIRTNET_STATS_DESC_CQ(ok_num),
192 };
193 
194 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
195 	VIRTNET_STATS_DESC_RX(basic, packets),
196 	VIRTNET_STATS_DESC_RX(basic, bytes),
197 
198 	VIRTNET_STATS_DESC_RX(basic, notifications),
199 	VIRTNET_STATS_DESC_RX(basic, interrupts),
200 };
201 
202 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
203 	VIRTNET_STATS_DESC_TX(basic, packets),
204 	VIRTNET_STATS_DESC_TX(basic, bytes),
205 
206 	VIRTNET_STATS_DESC_TX(basic, notifications),
207 	VIRTNET_STATS_DESC_TX(basic, interrupts),
208 };
209 
210 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
211 	VIRTNET_STATS_DESC_RX(csum, needs_csum),
212 };
213 
214 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
215 	VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
216 	VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
217 };
218 
219 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
220 	VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
221 };
222 
223 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
224 	VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
225 };
226 
227 #define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field)			\
228 	{									\
229 		#name,								\
230 		offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name),	\
231 		offsetof(struct netdev_queue_stats_rx, qstat_field),		\
232 	}
233 
234 #define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field)			\
235 	{									\
236 		#name,								\
237 		offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name),	\
238 		offsetof(struct netdev_queue_stats_tx, qstat_field),		\
239 	}
240 
241 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
242 	VIRTNET_STATS_DESC_RX_QSTAT(basic, drops,         hw_drops),
243 	VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
244 };
245 
246 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
247 	VIRTNET_STATS_DESC_TX_QSTAT(basic, drops,          hw_drops),
248 	VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
249 };
250 
251 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
252 	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
253 	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none,  csum_none),
254 	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad,   csum_bad),
255 };
256 
257 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
258 	VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none,  csum_none),
259 	VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
260 };
261 
262 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
263 	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets,           hw_gro_packets),
264 	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes,             hw_gro_bytes),
265 	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
266 	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced,   hw_gro_wire_bytes),
267 };
268 
269 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
270 	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets,        hw_gso_packets),
271 	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes,          hw_gso_bytes),
272 	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments,       hw_gso_wire_packets),
273 	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
274 };
275 
276 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
277 	VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
278 };
279 
280 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
281 	VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
282 };
283 
284 #define VIRTNET_Q_TYPE_RX 0
285 #define VIRTNET_Q_TYPE_TX 1
286 #define VIRTNET_Q_TYPE_CQ 2
287 
288 struct virtnet_interrupt_coalesce {
289 	u32 max_packets;
290 	u32 max_usecs;
291 };
292 
293 /* The dma information of pages allocated at a time. */
294 struct virtnet_rq_dma {
295 	dma_addr_t addr;
296 	u32 ref;
297 	u16 len;
298 	u16 need_sync;
299 };
300 
301 /* Internal representation of a send virtqueue */
302 struct send_queue {
303 	/* Virtqueue associated with this send _queue */
304 	struct virtqueue *vq;
305 
306 	/* TX: fragments + linear part + virtio header */
307 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
308 
309 	/* Name of the send queue: output.$index */
310 	char name[16];
311 
312 	struct virtnet_sq_stats stats;
313 
314 	struct virtnet_interrupt_coalesce intr_coal;
315 
316 	struct napi_struct napi;
317 
318 	/* Record whether sq is in reset state. */
319 	bool reset;
320 
321 	struct xsk_buff_pool *xsk_pool;
322 
323 	dma_addr_t xsk_hdr_dma_addr;
324 };
325 
326 /* Internal representation of a receive virtqueue */
327 struct receive_queue {
328 	/* Virtqueue associated with this receive_queue */
329 	struct virtqueue *vq;
330 
331 	struct napi_struct napi;
332 
333 	struct bpf_prog __rcu *xdp_prog;
334 
335 	struct virtnet_rq_stats stats;
336 
337 	/* The number of rx notifications */
338 	u16 calls;
339 
340 	/* Is dynamic interrupt moderation enabled? */
341 	bool dim_enabled;
342 
343 	/* Used to protect dim_enabled and inter_coal */
344 	struct mutex dim_lock;
345 
346 	/* Dynamic Interrupt Moderation */
347 	struct dim dim;
348 
349 	u32 packets_in_napi;
350 
351 	struct virtnet_interrupt_coalesce intr_coal;
352 
353 	/* Chain pages by the private ptr. */
354 	struct page *pages;
355 
356 	/* Average packet length for mergeable receive buffers. */
357 	struct ewma_pkt_len mrg_avg_pkt_len;
358 
359 	/* Page frag for packet buffer allocation. */
360 	struct page_frag alloc_frag;
361 
362 	/* RX: fragments + linear part + virtio header */
363 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
364 
365 	/* Min single buffer size for mergeable buffers case. */
366 	unsigned int min_buf_len;
367 
368 	/* Name of this receive queue: input.$index */
369 	char name[16];
370 
371 	struct xdp_rxq_info xdp_rxq;
372 
373 	/* Record the last dma info to free after new pages is allocated. */
374 	struct virtnet_rq_dma *last_dma;
375 
376 	struct xsk_buff_pool *xsk_pool;
377 
378 	/* xdp rxq used by xsk */
379 	struct xdp_rxq_info xsk_rxq_info;
380 
381 	struct xdp_buff **xsk_buffs;
382 };
383 
384 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
385 
386 /* Control VQ buffers: protected by the rtnl lock */
387 struct control_buf {
388 	struct virtio_net_ctrl_hdr hdr;
389 	virtio_net_ctrl_ack status;
390 };
391 
392 struct virtnet_info {
393 	struct virtio_device *vdev;
394 	struct virtqueue *cvq;
395 	struct net_device *dev;
396 	struct send_queue *sq;
397 	struct receive_queue *rq;
398 	unsigned int status;
399 
400 	/* Max # of queue pairs supported by the device */
401 	u16 max_queue_pairs;
402 
403 	/* # of queue pairs currently used by the driver */
404 	u16 curr_queue_pairs;
405 
406 	/* # of XDP queue pairs currently used by the driver */
407 	u16 xdp_queue_pairs;
408 
409 	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
410 	bool xdp_enabled;
411 
412 	/* I like... big packets and I cannot lie! */
413 	bool big_packets;
414 
415 	/* number of sg entries allocated for big packets */
416 	unsigned int big_packets_num_skbfrags;
417 
418 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
419 	bool mergeable_rx_bufs;
420 
421 	/* Host supports rss and/or hash report */
422 	bool has_rss;
423 	bool has_rss_hash_report;
424 	u8 rss_key_size;
425 	u16 rss_indir_table_size;
426 	u32 rss_hash_types_supported;
427 	u32 rss_hash_types_saved;
428 
429 	/* Has control virtqueue */
430 	bool has_cvq;
431 
432 	/* Lock to protect the control VQ */
433 	struct mutex cvq_lock;
434 
435 	/* Host can handle any s/g split between our header and packet data */
436 	bool any_header_sg;
437 
438 	/* Packet virtio header size */
439 	u8 hdr_len;
440 
441 	/* UDP tunnel support */
442 	bool tx_tnl;
443 
444 	bool rx_tnl;
445 
446 	bool rx_tnl_csum;
447 
448 	/* Work struct for config space updates */
449 	struct work_struct config_work;
450 
451 	/* Work struct for setting rx mode */
452 	struct work_struct rx_mode_work;
453 
454 	/* OK to queue work setting RX mode? */
455 	bool rx_mode_work_enabled;
456 
457 	/* Does the affinity hint is set for virtqueues? */
458 	bool affinity_hint_set;
459 
460 	/* CPU hotplug instances for online & dead */
461 	struct hlist_node node;
462 	struct hlist_node node_dead;
463 
464 	struct control_buf *ctrl;
465 
466 	/* Ethtool settings */
467 	u8 duplex;
468 	u32 speed;
469 
470 	/* Is rx dynamic interrupt moderation enabled? */
471 	bool rx_dim_enabled;
472 
473 	/* Interrupt coalescing settings */
474 	struct virtnet_interrupt_coalesce intr_coal_tx;
475 	struct virtnet_interrupt_coalesce intr_coal_rx;
476 
477 	unsigned long guest_offloads;
478 	unsigned long guest_offloads_capable;
479 
480 	/* failover when STANDBY feature enabled */
481 	struct failover *failover;
482 
483 	u64 device_stats_cap;
484 
485 	struct virtio_net_rss_config_hdr *rss_hdr;
486 
487 	/* Must be last as it ends in a flexible-array member. */
488 	TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
489 		u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
490 	);
491 };
492 static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
493 	      offsetof(struct virtnet_info, rss_hash_key_data));
494 
495 struct padded_vnet_hdr {
496 	struct virtio_net_hdr_v1_hash hdr;
497 	/*
498 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
499 	 * with this header sg. This padding makes next sg 16 byte aligned
500 	 * after the header.
501 	 */
502 	char padding[12];
503 };
504 
505 struct virtio_net_common_hdr {
506 	union {
507 		struct virtio_net_hdr hdr;
508 		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
509 		struct virtio_net_hdr_v1_hash hash_v1_hdr;
510 		struct virtio_net_hdr_v1_hash_tunnel tnl_hdr;
511 	};
512 };
513 
514 static struct virtio_net_common_hdr xsk_hdr;
515 
516 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
517 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
518 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
519 			       struct net_device *dev,
520 			       unsigned int *xdp_xmit,
521 			       struct virtnet_rq_stats *stats);
522 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
523 				 struct sk_buff *skb, u8 flags);
524 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
525 					       struct sk_buff *curr_skb,
526 					       struct page *page, void *buf,
527 					       int len, int truesize);
528 static void virtnet_xsk_completed(struct send_queue *sq, int num);
529 
530 enum virtnet_xmit_type {
531 	VIRTNET_XMIT_TYPE_SKB,
532 	VIRTNET_XMIT_TYPE_SKB_ORPHAN,
533 	VIRTNET_XMIT_TYPE_XDP,
534 	VIRTNET_XMIT_TYPE_XSK,
535 };
536 
537 static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
538 {
539 	u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
540 
541 	return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
542 }
543 
544 static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
545 {
546 	return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
547 }
548 
549 /* We use the last two bits of the pointer to distinguish the xmit type. */
550 #define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
551 
552 #define VIRTIO_XSK_FLAG_OFFSET 2
553 
554 static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
555 {
556 	unsigned long p = (unsigned long)*ptr;
557 
558 	*ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
559 
560 	return p & VIRTNET_XMIT_TYPE_MASK;
561 }
562 
563 static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
564 {
565 	return (void *)((unsigned long)ptr | type);
566 }
567 
568 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
569 			      enum virtnet_xmit_type type)
570 {
571 	return virtqueue_add_outbuf(sq->vq, sq->sg, num,
572 				    virtnet_xmit_ptr_pack(data, type),
573 				    GFP_ATOMIC);
574 }
575 
576 static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
577 {
578 	return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
579 }
580 
581 static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
582 {
583 	sg_dma_address(sg) = addr;
584 	sg_dma_len(sg) = len;
585 }
586 
587 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
588 			    bool in_napi, struct virtnet_sq_free_stats *stats)
589 {
590 	struct xdp_frame *frame;
591 	struct sk_buff *skb;
592 	unsigned int len;
593 	void *ptr;
594 
595 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
596 		switch (virtnet_xmit_ptr_unpack(&ptr)) {
597 		case VIRTNET_XMIT_TYPE_SKB:
598 			skb = ptr;
599 
600 			pr_debug("Sent skb %p\n", skb);
601 			stats->napi_packets++;
602 			stats->napi_bytes += skb->len;
603 			napi_consume_skb(skb, in_napi);
604 			break;
605 
606 		case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
607 			skb = ptr;
608 
609 			stats->packets++;
610 			stats->bytes += skb->len;
611 			napi_consume_skb(skb, in_napi);
612 			break;
613 
614 		case VIRTNET_XMIT_TYPE_XDP:
615 			frame = ptr;
616 
617 			stats->packets++;
618 			stats->bytes += xdp_get_frame_len(frame);
619 			xdp_return_frame(frame);
620 			break;
621 
622 		case VIRTNET_XMIT_TYPE_XSK:
623 			stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
624 			stats->xsk++;
625 			break;
626 		}
627 	}
628 	netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
629 }
630 
631 static void virtnet_free_old_xmit(struct send_queue *sq,
632 				  struct netdev_queue *txq,
633 				  bool in_napi,
634 				  struct virtnet_sq_free_stats *stats)
635 {
636 	__free_old_xmit(sq, txq, in_napi, stats);
637 
638 	if (stats->xsk)
639 		virtnet_xsk_completed(sq, stats->xsk);
640 }
641 
642 /* Converting between virtqueue no. and kernel tx/rx queue no.
643  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
644  */
645 static int vq2txq(struct virtqueue *vq)
646 {
647 	return (vq->index - 1) / 2;
648 }
649 
650 static int txq2vq(int txq)
651 {
652 	return txq * 2 + 1;
653 }
654 
655 static int vq2rxq(struct virtqueue *vq)
656 {
657 	return vq->index / 2;
658 }
659 
660 static int rxq2vq(int rxq)
661 {
662 	return rxq * 2;
663 }
664 
665 static int vq_type(struct virtnet_info *vi, int qid)
666 {
667 	if (qid == vi->max_queue_pairs * 2)
668 		return VIRTNET_Q_TYPE_CQ;
669 
670 	if (qid % 2)
671 		return VIRTNET_Q_TYPE_TX;
672 
673 	return VIRTNET_Q_TYPE_RX;
674 }
675 
676 static inline struct virtio_net_common_hdr *
677 skb_vnet_common_hdr(struct sk_buff *skb)
678 {
679 	return (struct virtio_net_common_hdr *)skb->cb;
680 }
681 
682 /*
683  * private is used to chain pages for big packets, put the whole
684  * most recent used list in the beginning for reuse
685  */
686 static void give_pages(struct receive_queue *rq, struct page *page)
687 {
688 	struct page *end;
689 
690 	/* Find end of list, sew whole thing into vi->rq.pages. */
691 	for (end = page; end->private; end = (struct page *)end->private);
692 	end->private = (unsigned long)rq->pages;
693 	rq->pages = page;
694 }
695 
696 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
697 {
698 	struct page *p = rq->pages;
699 
700 	if (p) {
701 		rq->pages = (struct page *)p->private;
702 		/* clear private here, it is used to chain pages */
703 		p->private = 0;
704 	} else
705 		p = alloc_page(gfp_mask);
706 	return p;
707 }
708 
709 static void virtnet_rq_free_buf(struct virtnet_info *vi,
710 				struct receive_queue *rq, void *buf)
711 {
712 	if (vi->mergeable_rx_bufs)
713 		put_page(virt_to_head_page(buf));
714 	else if (vi->big_packets)
715 		give_pages(rq, buf);
716 	else
717 		put_page(virt_to_head_page(buf));
718 }
719 
720 static void enable_rx_mode_work(struct virtnet_info *vi)
721 {
722 	rtnl_lock();
723 	vi->rx_mode_work_enabled = true;
724 	rtnl_unlock();
725 }
726 
727 static void disable_rx_mode_work(struct virtnet_info *vi)
728 {
729 	rtnl_lock();
730 	vi->rx_mode_work_enabled = false;
731 	rtnl_unlock();
732 }
733 
734 static void virtqueue_napi_schedule(struct napi_struct *napi,
735 				    struct virtqueue *vq)
736 {
737 	if (napi_schedule_prep(napi)) {
738 		virtqueue_disable_cb(vq);
739 		__napi_schedule(napi);
740 	}
741 }
742 
743 static bool virtqueue_napi_complete(struct napi_struct *napi,
744 				    struct virtqueue *vq, int processed)
745 {
746 	int opaque;
747 
748 	opaque = virtqueue_enable_cb_prepare(vq);
749 	if (napi_complete_done(napi, processed)) {
750 		if (unlikely(virtqueue_poll(vq, opaque)))
751 			virtqueue_napi_schedule(napi, vq);
752 		else
753 			return true;
754 	} else {
755 		virtqueue_disable_cb(vq);
756 	}
757 
758 	return false;
759 }
760 
761 static void virtnet_tx_wake_queue(struct virtnet_info *vi,
762 				struct send_queue *sq)
763 {
764 	unsigned int index = vq2txq(sq->vq);
765 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
766 
767 	if (netif_tx_queue_stopped(txq)) {
768 		u64_stats_update_begin(&sq->stats.syncp);
769 		u64_stats_inc(&sq->stats.wake);
770 		u64_stats_update_end(&sq->stats.syncp);
771 		netif_tx_wake_queue(txq);
772 	}
773 }
774 
775 static void skb_xmit_done(struct virtqueue *vq)
776 {
777 	struct virtnet_info *vi = vq->vdev->priv;
778 	unsigned int index = vq2txq(vq);
779 	struct send_queue *sq = &vi->sq[index];
780 	struct napi_struct *napi = &sq->napi;
781 
782 	/* Suppress further interrupts. */
783 	virtqueue_disable_cb(vq);
784 
785 	if (napi->weight)
786 		virtqueue_napi_schedule(napi, vq);
787 	else
788 		virtnet_tx_wake_queue(vi, sq);
789 }
790 
791 #define MRG_CTX_HEADER_SHIFT 22
792 static void *mergeable_len_to_ctx(unsigned int truesize,
793 				  unsigned int headroom)
794 {
795 	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
796 }
797 
798 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
799 {
800 	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
801 }
802 
803 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
804 {
805 	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
806 }
807 
808 static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
809 			       unsigned int len)
810 {
811 	unsigned int headroom, tailroom, room, truesize;
812 
813 	truesize = mergeable_ctx_to_truesize(mrg_ctx);
814 	headroom = mergeable_ctx_to_headroom(mrg_ctx);
815 	tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
816 	room = SKB_DATA_ALIGN(headroom + tailroom);
817 
818 	if (len > truesize - room) {
819 		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
820 			 dev->name, len, (unsigned long)(truesize - room));
821 		DEV_STATS_INC(dev, rx_length_errors);
822 		return -1;
823 	}
824 
825 	return 0;
826 }
827 
828 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
829 					 unsigned int headroom,
830 					 unsigned int len)
831 {
832 	struct sk_buff *skb;
833 
834 	skb = build_skb(buf, buflen);
835 	if (unlikely(!skb))
836 		return NULL;
837 
838 	skb_reserve(skb, headroom);
839 	skb_put(skb, len);
840 
841 	return skb;
842 }
843 
844 /* Called from bottom half context */
845 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
846 				   struct receive_queue *rq,
847 				   struct page *page, unsigned int offset,
848 				   unsigned int len, unsigned int truesize,
849 				   unsigned int headroom)
850 {
851 	struct sk_buff *skb;
852 	struct virtio_net_common_hdr *hdr;
853 	unsigned int copy, hdr_len, hdr_padded_len;
854 	struct page *page_to_free = NULL;
855 	int tailroom, shinfo_size;
856 	char *p, *hdr_p, *buf;
857 
858 	p = page_address(page) + offset;
859 	hdr_p = p;
860 
861 	hdr_len = vi->hdr_len;
862 	if (vi->mergeable_rx_bufs)
863 		hdr_padded_len = hdr_len;
864 	else
865 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
866 
867 	buf = p - headroom;
868 	len -= hdr_len;
869 	offset += hdr_padded_len;
870 	p += hdr_padded_len;
871 	tailroom = truesize - headroom  - hdr_padded_len - len;
872 
873 	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
874 
875 	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
876 		skb = virtnet_build_skb(buf, truesize, p - buf, len);
877 		if (unlikely(!skb))
878 			return NULL;
879 
880 		page = (struct page *)page->private;
881 		if (page)
882 			give_pages(rq, page);
883 		goto ok;
884 	}
885 
886 	/* copy small packet so we can reuse these pages for small data */
887 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
888 	if (unlikely(!skb))
889 		return NULL;
890 
891 	/* Copy all frame if it fits skb->head, otherwise
892 	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
893 	 */
894 	if (len <= skb_tailroom(skb))
895 		copy = len;
896 	else
897 		copy = ETH_HLEN;
898 	skb_put_data(skb, p, copy);
899 
900 	len -= copy;
901 	offset += copy;
902 
903 	if (vi->mergeable_rx_bufs) {
904 		if (len)
905 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
906 		else
907 			page_to_free = page;
908 		goto ok;
909 	}
910 
911 	BUG_ON(offset >= PAGE_SIZE);
912 	while (len) {
913 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
914 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
915 				frag_size, truesize);
916 		len -= frag_size;
917 		page = (struct page *)page->private;
918 		offset = 0;
919 	}
920 
921 	if (page)
922 		give_pages(rq, page);
923 
924 ok:
925 	hdr = skb_vnet_common_hdr(skb);
926 	memcpy(hdr, hdr_p, hdr_len);
927 	if (page_to_free)
928 		put_page(page_to_free);
929 
930 	return skb;
931 }
932 
933 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
934 {
935 	struct virtnet_info *vi = rq->vq->vdev->priv;
936 	struct page *page = virt_to_head_page(buf);
937 	struct virtnet_rq_dma *dma;
938 	void *head;
939 	int offset;
940 
941 	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
942 
943 	head = page_address(page);
944 
945 	dma = head;
946 
947 	--dma->ref;
948 
949 	if (dma->need_sync && len) {
950 		offset = buf - (head + sizeof(*dma));
951 
952 		virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr,
953 							offset, len,
954 							DMA_FROM_DEVICE);
955 	}
956 
957 	if (dma->ref)
958 		return;
959 
960 	virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len,
961 				     DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
962 	put_page(page);
963 }
964 
965 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
966 {
967 	struct virtnet_info *vi = rq->vq->vdev->priv;
968 	void *buf;
969 
970 	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
971 
972 	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
973 	if (buf)
974 		virtnet_rq_unmap(rq, buf, *len);
975 
976 	return buf;
977 }
978 
979 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
980 {
981 	struct virtnet_info *vi = rq->vq->vdev->priv;
982 	struct virtnet_rq_dma *dma;
983 	dma_addr_t addr;
984 	u32 offset;
985 	void *head;
986 
987 	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
988 
989 	head = page_address(rq->alloc_frag.page);
990 
991 	offset = buf - head;
992 
993 	dma = head;
994 
995 	addr = dma->addr - sizeof(*dma) + offset;
996 
997 	sg_init_table(rq->sg, 1);
998 	sg_fill_dma(rq->sg, addr, len);
999 }
1000 
1001 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
1002 {
1003 	struct page_frag *alloc_frag = &rq->alloc_frag;
1004 	struct virtnet_info *vi = rq->vq->vdev->priv;
1005 	struct virtnet_rq_dma *dma;
1006 	void *buf, *head;
1007 	dma_addr_t addr;
1008 
1009 	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
1010 
1011 	head = page_address(alloc_frag->page);
1012 
1013 	dma = head;
1014 
1015 	/* new pages */
1016 	if (!alloc_frag->offset) {
1017 		if (rq->last_dma) {
1018 			/* Now, the new page is allocated, the last dma
1019 			 * will not be used. So the dma can be unmapped
1020 			 * if the ref is 0.
1021 			 */
1022 			virtnet_rq_unmap(rq, rq->last_dma, 0);
1023 			rq->last_dma = NULL;
1024 		}
1025 
1026 		dma->len = alloc_frag->size - sizeof(*dma);
1027 
1028 		addr = virtqueue_map_single_attrs(rq->vq, dma + 1,
1029 						  dma->len, DMA_FROM_DEVICE, 0);
1030 		if (virtqueue_map_mapping_error(rq->vq, addr))
1031 			return NULL;
1032 
1033 		dma->addr = addr;
1034 		dma->need_sync = virtqueue_map_need_sync(rq->vq, addr);
1035 
1036 		/* Add a reference to dma to prevent the entire dma from
1037 		 * being released during error handling. This reference
1038 		 * will be freed after the pages are no longer used.
1039 		 */
1040 		get_page(alloc_frag->page);
1041 		dma->ref = 1;
1042 		alloc_frag->offset = sizeof(*dma);
1043 
1044 		rq->last_dma = dma;
1045 	}
1046 
1047 	++dma->ref;
1048 
1049 	buf = head + alloc_frag->offset;
1050 
1051 	get_page(alloc_frag->page);
1052 	alloc_frag->offset += size;
1053 
1054 	return buf;
1055 }
1056 
1057 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
1058 {
1059 	struct virtnet_info *vi = vq->vdev->priv;
1060 	struct receive_queue *rq;
1061 	int i = vq2rxq(vq);
1062 
1063 	rq = &vi->rq[i];
1064 
1065 	if (rq->xsk_pool) {
1066 		xsk_buff_free((struct xdp_buff *)buf);
1067 		return;
1068 	}
1069 
1070 	if (!vi->big_packets || vi->mergeable_rx_bufs)
1071 		virtnet_rq_unmap(rq, buf, 0);
1072 
1073 	virtnet_rq_free_buf(vi, rq, buf);
1074 }
1075 
1076 static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
1077 			  bool in_napi)
1078 {
1079 	struct virtnet_sq_free_stats stats = {0};
1080 
1081 	virtnet_free_old_xmit(sq, txq, in_napi, &stats);
1082 
1083 	/* Avoid overhead when no packets have been processed
1084 	 * happens when called speculatively from start_xmit.
1085 	 */
1086 	if (!stats.packets && !stats.napi_packets)
1087 		return;
1088 
1089 	u64_stats_update_begin(&sq->stats.syncp);
1090 	u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
1091 	u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
1092 	u64_stats_update_end(&sq->stats.syncp);
1093 }
1094 
1095 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1096 {
1097 	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1098 		return false;
1099 	else if (q < vi->curr_queue_pairs)
1100 		return true;
1101 	else
1102 		return false;
1103 }
1104 
1105 static bool tx_may_stop(struct virtnet_info *vi,
1106 			struct net_device *dev,
1107 			struct send_queue *sq)
1108 {
1109 	int qnum;
1110 
1111 	qnum = sq - vi->sq;
1112 
1113 	/* If running out of space, stop queue to avoid getting packets that we
1114 	 * are then unable to transmit.
1115 	 * An alternative would be to force queuing layer to requeue the skb by
1116 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1117 	 * returned in a normal path of operation: it means that driver is not
1118 	 * maintaining the TX queue stop/start state properly, and causes
1119 	 * the stack to do a non-trivial amount of useless work.
1120 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
1121 	 * early means 16 slots are typically wasted.
1122 	 */
1123 	if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
1124 		struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1125 
1126 		netif_tx_stop_queue(txq);
1127 		u64_stats_update_begin(&sq->stats.syncp);
1128 		u64_stats_inc(&sq->stats.stop);
1129 		u64_stats_update_end(&sq->stats.syncp);
1130 
1131 		return true;
1132 	}
1133 
1134 	return false;
1135 }
1136 
1137 static void check_sq_full_and_disable(struct virtnet_info *vi,
1138 				      struct net_device *dev,
1139 				      struct send_queue *sq)
1140 {
1141 	bool use_napi = sq->napi.weight;
1142 	int qnum;
1143 
1144 	qnum = sq - vi->sq;
1145 
1146 	if (tx_may_stop(vi, dev, sq)) {
1147 		struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1148 
1149 		if (use_napi) {
1150 			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1151 				virtqueue_napi_schedule(&sq->napi, sq->vq);
1152 		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1153 			/* More just got used, free them then recheck. */
1154 			free_old_xmit(sq, txq, false);
1155 			if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
1156 				netif_start_subqueue(dev, qnum);
1157 				u64_stats_update_begin(&sq->stats.syncp);
1158 				u64_stats_inc(&sq->stats.wake);
1159 				u64_stats_update_end(&sq->stats.syncp);
1160 				virtqueue_disable_cb(sq->vq);
1161 			}
1162 		}
1163 	}
1164 }
1165 
1166 /* Note that @len is the length of received data without virtio header */
1167 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
1168 				   struct receive_queue *rq, void *buf,
1169 				   u32 len, bool first_buf)
1170 {
1171 	struct xdp_buff *xdp;
1172 	u32 bufsize;
1173 
1174 	xdp = (struct xdp_buff *)buf;
1175 
1176 	/* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
1177 	 * virtio header and ask the vhost to fill data from
1178 	 *         hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
1179 	 * The first buffer has virtio header so the remaining region for frame
1180 	 * data is
1181 	 *         xsk_pool_get_rx_frame_size()
1182 	 * While other buffers than the first one do not have virtio header, so
1183 	 * the maximum frame data's length can be
1184 	 *         xsk_pool_get_rx_frame_size() + vi->hdr_len
1185 	 */
1186 	bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
1187 	if (!first_buf)
1188 		bufsize += vi->hdr_len;
1189 
1190 	if (unlikely(len > bufsize)) {
1191 		pr_debug("%s: rx error: len %u exceeds truesize %u\n",
1192 			 vi->dev->name, len, bufsize);
1193 		DEV_STATS_INC(vi->dev, rx_length_errors);
1194 		xsk_buff_free(xdp);
1195 		return NULL;
1196 	}
1197 
1198 	if (first_buf) {
1199 		xsk_buff_set_size(xdp, len);
1200 	} else {
1201 		xdp_prepare_buff(xdp, xdp->data_hard_start,
1202 				 XDP_PACKET_HEADROOM - vi->hdr_len, len, 1);
1203 		xdp->flags = 0;
1204 	}
1205 
1206 	xsk_buff_dma_sync_for_cpu(xdp);
1207 
1208 	return xdp;
1209 }
1210 
1211 static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
1212 					 struct xdp_buff *xdp)
1213 {
1214 	unsigned int metasize = xdp->data - xdp->data_meta;
1215 	struct sk_buff *skb;
1216 	unsigned int size;
1217 
1218 	size = xdp->data_end - xdp->data_hard_start;
1219 	skb = napi_alloc_skb(&rq->napi, size);
1220 	if (unlikely(!skb)) {
1221 		xsk_buff_free(xdp);
1222 		return NULL;
1223 	}
1224 
1225 	skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
1226 
1227 	size = xdp->data_end - xdp->data_meta;
1228 	memcpy(__skb_put(skb, size), xdp->data_meta, size);
1229 
1230 	if (metasize) {
1231 		__skb_pull(skb, metasize);
1232 		skb_metadata_set(skb, metasize);
1233 	}
1234 
1235 	xsk_buff_free(xdp);
1236 
1237 	return skb;
1238 }
1239 
1240 static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
1241 						 struct receive_queue *rq, struct xdp_buff *xdp,
1242 						 unsigned int *xdp_xmit,
1243 						 struct virtnet_rq_stats *stats)
1244 {
1245 	struct bpf_prog *prog;
1246 	u32 ret;
1247 
1248 	ret = XDP_PASS;
1249 	rcu_read_lock();
1250 	prog = rcu_dereference(rq->xdp_prog);
1251 	if (prog)
1252 		ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1253 	rcu_read_unlock();
1254 
1255 	switch (ret) {
1256 	case XDP_PASS:
1257 		return xsk_construct_skb(rq, xdp);
1258 
1259 	case XDP_TX:
1260 	case XDP_REDIRECT:
1261 		return NULL;
1262 
1263 	default:
1264 		/* drop packet */
1265 		xsk_buff_free(xdp);
1266 		u64_stats_inc(&stats->drops);
1267 		return NULL;
1268 	}
1269 }
1270 
1271 static void xsk_drop_follow_bufs(struct net_device *dev,
1272 				 struct receive_queue *rq,
1273 				 u32 num_buf,
1274 				 struct virtnet_rq_stats *stats)
1275 {
1276 	struct xdp_buff *xdp;
1277 	u32 len;
1278 
1279 	while (num_buf-- > 1) {
1280 		xdp = virtqueue_get_buf(rq->vq, &len);
1281 		if (unlikely(!xdp)) {
1282 			pr_debug("%s: rx error: %d buffers missing\n",
1283 				 dev->name, num_buf);
1284 			DEV_STATS_INC(dev, rx_length_errors);
1285 			break;
1286 		}
1287 		u64_stats_add(&stats->bytes, len);
1288 		xsk_buff_free(xdp);
1289 	}
1290 }
1291 
1292 static int xsk_append_merge_buffer(struct virtnet_info *vi,
1293 				   struct receive_queue *rq,
1294 				   struct sk_buff *head_skb,
1295 				   u32 num_buf,
1296 				   struct virtio_net_hdr_mrg_rxbuf *hdr,
1297 				   struct virtnet_rq_stats *stats)
1298 {
1299 	struct sk_buff *curr_skb;
1300 	struct xdp_buff *xdp;
1301 	u32 len, truesize;
1302 	struct page *page;
1303 	void *buf;
1304 
1305 	curr_skb = head_skb;
1306 
1307 	while (--num_buf) {
1308 		buf = virtqueue_get_buf(rq->vq, &len);
1309 		if (unlikely(!buf)) {
1310 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1311 				 vi->dev->name, num_buf,
1312 				 virtio16_to_cpu(vi->vdev,
1313 						 hdr->num_buffers));
1314 			DEV_STATS_INC(vi->dev, rx_length_errors);
1315 			return -EINVAL;
1316 		}
1317 
1318 		u64_stats_add(&stats->bytes, len);
1319 
1320 		xdp = buf_to_xdp(vi, rq, buf, len, false);
1321 		if (!xdp)
1322 			goto err;
1323 
1324 		buf = napi_alloc_frag(len);
1325 		if (!buf) {
1326 			xsk_buff_free(xdp);
1327 			goto err;
1328 		}
1329 
1330 		memcpy(buf, xdp->data, len);
1331 
1332 		xsk_buff_free(xdp);
1333 
1334 		page = virt_to_page(buf);
1335 
1336 		truesize = len;
1337 
1338 		curr_skb  = virtnet_skb_append_frag(head_skb, curr_skb, page,
1339 						    buf, len, truesize);
1340 		if (!curr_skb) {
1341 			put_page(page);
1342 			goto err;
1343 		}
1344 	}
1345 
1346 	return 0;
1347 
1348 err:
1349 	xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
1350 	return -EINVAL;
1351 }
1352 
1353 static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
1354 						 struct receive_queue *rq, struct xdp_buff *xdp,
1355 						 unsigned int *xdp_xmit,
1356 						 struct virtnet_rq_stats *stats)
1357 {
1358 	struct virtio_net_hdr_mrg_rxbuf *hdr;
1359 	struct bpf_prog *prog;
1360 	struct sk_buff *skb;
1361 	u32 ret, num_buf;
1362 
1363 	hdr = xdp->data - vi->hdr_len;
1364 	num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1365 
1366 	ret = XDP_PASS;
1367 	rcu_read_lock();
1368 	prog = rcu_dereference(rq->xdp_prog);
1369 	if (prog) {
1370 		/* TODO: support multi buffer. */
1371 		if (num_buf == 1)
1372 			ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
1373 						  stats);
1374 		else
1375 			ret = XDP_ABORTED;
1376 	}
1377 	rcu_read_unlock();
1378 
1379 	switch (ret) {
1380 	case XDP_PASS:
1381 		skb = xsk_construct_skb(rq, xdp);
1382 		if (!skb)
1383 			goto drop_bufs;
1384 
1385 		if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
1386 			dev_kfree_skb(skb);
1387 			goto drop;
1388 		}
1389 
1390 		return skb;
1391 
1392 	case XDP_TX:
1393 	case XDP_REDIRECT:
1394 		return NULL;
1395 
1396 	default:
1397 		/* drop packet */
1398 		xsk_buff_free(xdp);
1399 	}
1400 
1401 drop_bufs:
1402 	xsk_drop_follow_bufs(dev, rq, num_buf, stats);
1403 
1404 drop:
1405 	u64_stats_inc(&stats->drops);
1406 	return NULL;
1407 }
1408 
1409 static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
1410 				    void *buf, u32 len,
1411 				    unsigned int *xdp_xmit,
1412 				    struct virtnet_rq_stats *stats)
1413 {
1414 	struct net_device *dev = vi->dev;
1415 	struct sk_buff *skb = NULL;
1416 	struct xdp_buff *xdp;
1417 	u8 flags;
1418 
1419 	len -= vi->hdr_len;
1420 
1421 	u64_stats_add(&stats->bytes, len);
1422 
1423 	xdp = buf_to_xdp(vi, rq, buf, len, true);
1424 	if (!xdp)
1425 		return;
1426 
1427 	if (unlikely(len < ETH_HLEN)) {
1428 		pr_debug("%s: short packet %i\n", dev->name, len);
1429 		DEV_STATS_INC(dev, rx_length_errors);
1430 		xsk_buff_free(xdp);
1431 		return;
1432 	}
1433 
1434 	flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
1435 
1436 	if (!vi->mergeable_rx_bufs)
1437 		skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
1438 	else
1439 		skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
1440 
1441 	if (skb)
1442 		virtnet_receive_done(vi, rq, skb, flags);
1443 }
1444 
1445 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
1446 				   struct xsk_buff_pool *pool, gfp_t gfp)
1447 {
1448 	struct xdp_buff **xsk_buffs;
1449 	dma_addr_t addr;
1450 	int err = 0;
1451 	u32 len, i;
1452 	int num;
1453 
1454 	xsk_buffs = rq->xsk_buffs;
1455 
1456 	num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
1457 	if (!num) {
1458 		if (xsk_uses_need_wakeup(pool)) {
1459 			xsk_set_rx_need_wakeup(pool);
1460 			/* Return 0 instead of -ENOMEM so that NAPI is
1461 			 * descheduled.
1462 			 */
1463 			return 0;
1464 		}
1465 
1466 		return -ENOMEM;
1467 	} else {
1468 		xsk_clear_rx_need_wakeup(pool);
1469 	}
1470 
1471 	len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
1472 
1473 	for (i = 0; i < num; ++i) {
1474 		/* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
1475 		 * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
1476 		 * (see function virtnet_xsk_pool_enable)
1477 		 */
1478 		addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
1479 
1480 		sg_init_table(rq->sg, 1);
1481 		sg_fill_dma(rq->sg, addr, len);
1482 
1483 		err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
1484 						    xsk_buffs[i], NULL, gfp);
1485 		if (err)
1486 			goto err;
1487 	}
1488 
1489 	return num;
1490 
1491 err:
1492 	for (; i < num; ++i)
1493 		xsk_buff_free(xsk_buffs[i]);
1494 
1495 	return err;
1496 }
1497 
1498 static void *virtnet_xsk_to_ptr(u32 len)
1499 {
1500 	unsigned long p;
1501 
1502 	p = len << VIRTIO_XSK_FLAG_OFFSET;
1503 
1504 	return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
1505 }
1506 
1507 static int virtnet_xsk_xmit_one(struct send_queue *sq,
1508 				struct xsk_buff_pool *pool,
1509 				struct xdp_desc *desc)
1510 {
1511 	struct virtnet_info *vi;
1512 	dma_addr_t addr;
1513 
1514 	vi = sq->vq->vdev->priv;
1515 
1516 	addr = xsk_buff_raw_get_dma(pool, desc->addr);
1517 	xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
1518 
1519 	sg_init_table(sq->sg, 2);
1520 	sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
1521 	sg_fill_dma(sq->sg + 1, addr, desc->len);
1522 
1523 	return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
1524 					      virtnet_xsk_to_ptr(desc->len),
1525 					      GFP_ATOMIC);
1526 }
1527 
1528 static int virtnet_xsk_xmit_batch(struct send_queue *sq,
1529 				  struct xsk_buff_pool *pool,
1530 				  unsigned int budget,
1531 				  u64 *kicks)
1532 {
1533 	struct xdp_desc *descs = pool->tx_descs;
1534 	bool kick = false;
1535 	u32 nb_pkts, i;
1536 	int err;
1537 
1538 	budget = min_t(u32, budget, sq->vq->num_free);
1539 
1540 	nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
1541 	if (!nb_pkts)
1542 		return 0;
1543 
1544 	for (i = 0; i < nb_pkts; i++) {
1545 		err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
1546 		if (unlikely(err)) {
1547 			xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
1548 			break;
1549 		}
1550 
1551 		kick = true;
1552 	}
1553 
1554 	if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1555 		(*kicks)++;
1556 
1557 	return i;
1558 }
1559 
1560 static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
1561 			     int budget)
1562 {
1563 	struct virtnet_info *vi = sq->vq->vdev->priv;
1564 	struct virtnet_sq_free_stats stats = {};
1565 	struct net_device *dev = vi->dev;
1566 	u64 kicks = 0;
1567 	int sent;
1568 
1569 	/* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
1570 	 * free_old_xmit().
1571 	 */
1572 	__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
1573 
1574 	if (stats.xsk)
1575 		xsk_tx_completed(sq->xsk_pool, stats.xsk);
1576 
1577 	sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
1578 
1579 	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1580 		check_sq_full_and_disable(vi, vi->dev, sq);
1581 
1582 	if (sent) {
1583 		struct netdev_queue *txq;
1584 
1585 		txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
1586 		txq_trans_cond_update(txq);
1587 	}
1588 
1589 	u64_stats_update_begin(&sq->stats.syncp);
1590 	u64_stats_add(&sq->stats.packets, stats.packets);
1591 	u64_stats_add(&sq->stats.bytes,   stats.bytes);
1592 	u64_stats_add(&sq->stats.kicks,   kicks);
1593 	u64_stats_add(&sq->stats.xdp_tx,  sent);
1594 	u64_stats_update_end(&sq->stats.syncp);
1595 
1596 	if (xsk_uses_need_wakeup(pool))
1597 		xsk_set_tx_need_wakeup(pool);
1598 
1599 	return sent;
1600 }
1601 
1602 static void xsk_wakeup(struct napi_struct *napi, struct virtqueue *vq)
1603 {
1604 	if (napi_if_scheduled_mark_missed(napi))
1605 		return;
1606 
1607 	local_bh_disable();
1608 	virtqueue_napi_schedule(napi, vq);
1609 	local_bh_enable();
1610 }
1611 
1612 static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
1613 {
1614 	struct virtnet_info *vi = netdev_priv(dev);
1615 
1616 	if (!netif_running(dev))
1617 		return -ENETDOWN;
1618 
1619 	if (qid >= vi->curr_queue_pairs)
1620 		return -EINVAL;
1621 
1622 	if (flag & XDP_WAKEUP_TX) {
1623 		struct send_queue *sq = &vi->sq[qid];
1624 
1625 		xsk_wakeup(&sq->napi, sq->vq);
1626 	}
1627 
1628 	if (flag & XDP_WAKEUP_RX) {
1629 		struct receive_queue *rq = &vi->rq[qid];
1630 
1631 		xsk_wakeup(&rq->napi, rq->vq);
1632 	}
1633 
1634 	return 0;
1635 }
1636 
1637 static void virtnet_xsk_completed(struct send_queue *sq, int num)
1638 {
1639 	xsk_tx_completed(sq->xsk_pool, num);
1640 
1641 	/* If this is called by rx poll, start_xmit and xdp xmit we should
1642 	 * wakeup the tx napi to consume the xsk tx queue, because the tx
1643 	 * interrupt may not be triggered.
1644 	 */
1645 	xsk_wakeup(&sq->napi, sq->vq);
1646 }
1647 
1648 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1649 				   struct send_queue *sq,
1650 				   struct xdp_frame *xdpf)
1651 {
1652 	struct virtio_net_hdr_mrg_rxbuf *hdr;
1653 	struct skb_shared_info *shinfo;
1654 	u8 nr_frags = 0;
1655 	int err, i;
1656 
1657 	if (unlikely(xdpf->headroom < vi->hdr_len))
1658 		return -EOVERFLOW;
1659 
1660 	if (unlikely(xdp_frame_has_frags(xdpf))) {
1661 		shinfo = xdp_get_shared_info_from_frame(xdpf);
1662 		nr_frags = shinfo->nr_frags;
1663 	}
1664 
1665 	/* In wrapping function virtnet_xdp_xmit(), we need to free
1666 	 * up the pending old buffers, where we need to calculate the
1667 	 * position of skb_shared_info in xdp_get_frame_len() and
1668 	 * xdp_return_frame(), which will involve to xdpf->data and
1669 	 * xdpf->headroom. Therefore, we need to update the value of
1670 	 * headroom synchronously here.
1671 	 */
1672 	xdpf->headroom -= vi->hdr_len;
1673 	xdpf->data -= vi->hdr_len;
1674 	/* Zero header and leave csum up to XDP layers */
1675 	hdr = xdpf->data;
1676 	memset(hdr, 0, vi->hdr_len);
1677 	xdpf->len   += vi->hdr_len;
1678 
1679 	sg_init_table(sq->sg, nr_frags + 1);
1680 	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1681 	for (i = 0; i < nr_frags; i++) {
1682 		skb_frag_t *frag = &shinfo->frags[i];
1683 
1684 		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1685 			    skb_frag_size(frag), skb_frag_off(frag));
1686 	}
1687 
1688 	err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
1689 	if (unlikely(err))
1690 		return -ENOSPC; /* Caller handle free/refcnt */
1691 
1692 	return 0;
1693 }
1694 
1695 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1696  * the current cpu, so it does not need to be locked.
1697  *
1698  * Here we use marco instead of inline functions because we have to deal with
1699  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1700  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1701  * functions to perfectly solve these three problems at the same time.
1702  */
1703 #define virtnet_xdp_get_sq(vi) ({                                       \
1704 	int cpu = smp_processor_id();                                   \
1705 	struct netdev_queue *txq;                                       \
1706 	typeof(vi) v = (vi);                                            \
1707 	unsigned int qp;                                                \
1708 									\
1709 	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
1710 		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
1711 		qp += cpu;                                              \
1712 		txq = netdev_get_tx_queue(v->dev, qp);                  \
1713 		__netif_tx_acquire(txq);                                \
1714 	} else {                                                        \
1715 		qp = cpu % v->curr_queue_pairs;                         \
1716 		txq = netdev_get_tx_queue(v->dev, qp);                  \
1717 		__netif_tx_lock(txq, cpu);                              \
1718 	}                                                               \
1719 	v->sq + qp;                                                     \
1720 })
1721 
1722 #define virtnet_xdp_put_sq(vi, q) {                                     \
1723 	struct netdev_queue *txq;                                       \
1724 	typeof(vi) v = (vi);                                            \
1725 									\
1726 	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
1727 	if (v->curr_queue_pairs > nr_cpu_ids)                           \
1728 		__netif_tx_release(txq);                                \
1729 	else                                                            \
1730 		__netif_tx_unlock(txq);                                 \
1731 }
1732 
1733 static int virtnet_xdp_xmit(struct net_device *dev,
1734 			    int n, struct xdp_frame **frames, u32 flags)
1735 {
1736 	struct virtnet_info *vi = netdev_priv(dev);
1737 	struct virtnet_sq_free_stats stats = {0};
1738 	struct receive_queue *rq = vi->rq;
1739 	struct bpf_prog *xdp_prog;
1740 	struct send_queue *sq;
1741 	int nxmit = 0;
1742 	int kicks = 0;
1743 	int ret;
1744 	int i;
1745 
1746 	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
1747 	 * indicate XDP resources have been successfully allocated.
1748 	 */
1749 	xdp_prog = rcu_access_pointer(rq->xdp_prog);
1750 	if (!xdp_prog)
1751 		return -ENXIO;
1752 
1753 	sq = virtnet_xdp_get_sq(vi);
1754 
1755 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1756 		ret = -EINVAL;
1757 		goto out;
1758 	}
1759 
1760 	/* Free up any pending old buffers before queueing new ones. */
1761 	virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
1762 			      false, &stats);
1763 
1764 	for (i = 0; i < n; i++) {
1765 		struct xdp_frame *xdpf = frames[i];
1766 
1767 		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1768 			break;
1769 		nxmit++;
1770 	}
1771 	ret = nxmit;
1772 
1773 	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1774 		check_sq_full_and_disable(vi, dev, sq);
1775 
1776 	if (flags & XDP_XMIT_FLUSH) {
1777 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1778 			kicks = 1;
1779 	}
1780 out:
1781 	u64_stats_update_begin(&sq->stats.syncp);
1782 	u64_stats_add(&sq->stats.bytes, stats.bytes);
1783 	u64_stats_add(&sq->stats.packets, stats.packets);
1784 	u64_stats_add(&sq->stats.xdp_tx, n);
1785 	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1786 	u64_stats_add(&sq->stats.kicks, kicks);
1787 	u64_stats_update_end(&sq->stats.syncp);
1788 
1789 	virtnet_xdp_put_sq(vi, sq);
1790 	return ret;
1791 }
1792 
1793 static void put_xdp_frags(struct xdp_buff *xdp)
1794 {
1795 	struct skb_shared_info *shinfo;
1796 	struct page *xdp_page;
1797 	int i;
1798 
1799 	if (xdp_buff_has_frags(xdp)) {
1800 		shinfo = xdp_get_shared_info_from_buff(xdp);
1801 		for (i = 0; i < shinfo->nr_frags; i++) {
1802 			xdp_page = skb_frag_page(&shinfo->frags[i]);
1803 			put_page(xdp_page);
1804 		}
1805 	}
1806 }
1807 
1808 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1809 			       struct net_device *dev,
1810 			       unsigned int *xdp_xmit,
1811 			       struct virtnet_rq_stats *stats)
1812 {
1813 	struct xdp_frame *xdpf;
1814 	int err;
1815 	u32 act;
1816 
1817 	act = bpf_prog_run_xdp(xdp_prog, xdp);
1818 	u64_stats_inc(&stats->xdp_packets);
1819 
1820 	switch (act) {
1821 	case XDP_PASS:
1822 		return act;
1823 
1824 	case XDP_TX:
1825 		u64_stats_inc(&stats->xdp_tx);
1826 		xdpf = xdp_convert_buff_to_frame(xdp);
1827 		if (unlikely(!xdpf)) {
1828 			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1829 			return XDP_DROP;
1830 		}
1831 
1832 		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1833 		if (unlikely(!err)) {
1834 			xdp_return_frame_rx_napi(xdpf);
1835 		} else if (unlikely(err < 0)) {
1836 			trace_xdp_exception(dev, xdp_prog, act);
1837 			return XDP_DROP;
1838 		}
1839 		*xdp_xmit |= VIRTIO_XDP_TX;
1840 		return act;
1841 
1842 	case XDP_REDIRECT:
1843 		u64_stats_inc(&stats->xdp_redirects);
1844 		err = xdp_do_redirect(dev, xdp, xdp_prog);
1845 		if (err)
1846 			return XDP_DROP;
1847 
1848 		*xdp_xmit |= VIRTIO_XDP_REDIR;
1849 		return act;
1850 
1851 	default:
1852 		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1853 		fallthrough;
1854 	case XDP_ABORTED:
1855 		trace_xdp_exception(dev, xdp_prog, act);
1856 		fallthrough;
1857 	case XDP_DROP:
1858 		return XDP_DROP;
1859 	}
1860 }
1861 
1862 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1863 {
1864 	return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
1865 }
1866 
1867 /* We copy the packet for XDP in the following cases:
1868  *
1869  * 1) Packet is scattered across multiple rx buffers.
1870  * 2) Headroom space is insufficient.
1871  *
1872  * This is inefficient but it's a temporary condition that
1873  * we hit right after XDP is enabled and until queue is refilled
1874  * with large buffers with sufficient headroom - so it should affect
1875  * at most queue size packets.
1876  * Afterwards, the conditions to enable
1877  * XDP should preclude the underlying device from sending packets
1878  * across multiple buffers (num_buf > 1), and we make sure buffers
1879  * have enough headroom.
1880  */
1881 static struct page *xdp_linearize_page(struct net_device *dev,
1882 				       struct receive_queue *rq,
1883 				       int *num_buf,
1884 				       struct page *p,
1885 				       int offset,
1886 				       int page_off,
1887 				       unsigned int *len)
1888 {
1889 	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1890 	struct page *page;
1891 
1892 	if (page_off + *len + tailroom > PAGE_SIZE)
1893 		return NULL;
1894 
1895 	page = alloc_page(GFP_ATOMIC);
1896 	if (!page)
1897 		return NULL;
1898 
1899 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1900 	page_off += *len;
1901 
1902 	/* Only mergeable mode can go inside this while loop. In small mode,
1903 	 * *num_buf == 1, so it cannot go inside.
1904 	 */
1905 	while (--*num_buf) {
1906 		unsigned int buflen;
1907 		void *buf;
1908 		void *ctx;
1909 		int off;
1910 
1911 		buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
1912 		if (unlikely(!buf))
1913 			goto err_buf;
1914 
1915 		p = virt_to_head_page(buf);
1916 		off = buf - page_address(p);
1917 
1918 		if (check_mergeable_len(dev, ctx, buflen)) {
1919 			put_page(p);
1920 			goto err_buf;
1921 		}
1922 
1923 		/* guard against a misconfigured or uncooperative backend that
1924 		 * is sending packet larger than the MTU.
1925 		 */
1926 		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1927 			put_page(p);
1928 			goto err_buf;
1929 		}
1930 
1931 		memcpy(page_address(page) + page_off,
1932 		       page_address(p) + off, buflen);
1933 		page_off += buflen;
1934 		put_page(p);
1935 	}
1936 
1937 	/* Headroom does not contribute to packet length */
1938 	*len = page_off - XDP_PACKET_HEADROOM;
1939 	return page;
1940 err_buf:
1941 	__free_pages(page, 0);
1942 	return NULL;
1943 }
1944 
1945 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1946 					       unsigned int xdp_headroom,
1947 					       void *buf,
1948 					       unsigned int len)
1949 {
1950 	unsigned int header_offset;
1951 	unsigned int headroom;
1952 	unsigned int buflen;
1953 	struct sk_buff *skb;
1954 
1955 	header_offset = VIRTNET_RX_PAD + xdp_headroom;
1956 	headroom = vi->hdr_len + header_offset;
1957 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1958 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1959 
1960 	skb = virtnet_build_skb(buf, buflen, headroom, len);
1961 	if (unlikely(!skb))
1962 		return NULL;
1963 
1964 	buf += header_offset;
1965 	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1966 
1967 	return skb;
1968 }
1969 
1970 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1971 					 struct virtnet_info *vi,
1972 					 struct receive_queue *rq,
1973 					 struct bpf_prog *xdp_prog,
1974 					 void *buf,
1975 					 unsigned int xdp_headroom,
1976 					 unsigned int len,
1977 					 unsigned int *xdp_xmit,
1978 					 struct virtnet_rq_stats *stats)
1979 {
1980 	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1981 	unsigned int headroom = vi->hdr_len + header_offset;
1982 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1983 	struct page *page = virt_to_head_page(buf);
1984 	struct page *xdp_page;
1985 	unsigned int buflen;
1986 	struct xdp_buff xdp;
1987 	struct sk_buff *skb;
1988 	unsigned int metasize = 0;
1989 	u32 act;
1990 
1991 	if (unlikely(hdr->hdr.gso_type))
1992 		goto err_xdp;
1993 
1994 	/* Partially checksummed packets must be dropped. */
1995 	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1996 		goto err_xdp;
1997 
1998 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1999 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2000 
2001 	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
2002 		int offset = buf - page_address(page) + header_offset;
2003 		unsigned int tlen = len + vi->hdr_len;
2004 		int num_buf = 1;
2005 
2006 		xdp_headroom = virtnet_get_headroom(vi);
2007 		header_offset = VIRTNET_RX_PAD + xdp_headroom;
2008 		headroom = vi->hdr_len + header_offset;
2009 		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
2010 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2011 		xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
2012 					      offset, header_offset,
2013 					      &tlen);
2014 		if (!xdp_page)
2015 			goto err_xdp;
2016 
2017 		buf = page_address(xdp_page);
2018 		put_page(page);
2019 		page = xdp_page;
2020 	}
2021 
2022 	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
2023 	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
2024 			 xdp_headroom, len, true);
2025 
2026 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
2027 
2028 	switch (act) {
2029 	case XDP_PASS:
2030 		/* Recalculate length in case bpf program changed it */
2031 		len = xdp.data_end - xdp.data;
2032 		metasize = xdp.data - xdp.data_meta;
2033 		break;
2034 
2035 	case XDP_TX:
2036 	case XDP_REDIRECT:
2037 		goto xdp_xmit;
2038 
2039 	default:
2040 		goto err_xdp;
2041 	}
2042 
2043 	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
2044 	if (unlikely(!skb))
2045 		goto err;
2046 
2047 	if (metasize)
2048 		skb_metadata_set(skb, metasize);
2049 
2050 	return skb;
2051 
2052 err_xdp:
2053 	u64_stats_inc(&stats->xdp_drops);
2054 err:
2055 	u64_stats_inc(&stats->drops);
2056 	put_page(page);
2057 xdp_xmit:
2058 	return NULL;
2059 }
2060 
2061 static struct sk_buff *receive_small(struct net_device *dev,
2062 				     struct virtnet_info *vi,
2063 				     struct receive_queue *rq,
2064 				     void *buf, void *ctx,
2065 				     unsigned int len,
2066 				     unsigned int *xdp_xmit,
2067 				     struct virtnet_rq_stats *stats)
2068 {
2069 	unsigned int xdp_headroom = (unsigned long)ctx;
2070 	struct page *page = virt_to_head_page(buf);
2071 	struct sk_buff *skb;
2072 
2073 	/* We passed the address of virtnet header to virtio-core,
2074 	 * so truncate the padding.
2075 	 */
2076 	buf -= VIRTNET_RX_PAD + xdp_headroom;
2077 
2078 	len -= vi->hdr_len;
2079 	u64_stats_add(&stats->bytes, len);
2080 
2081 	if (unlikely(len > GOOD_PACKET_LEN)) {
2082 		pr_debug("%s: rx error: len %u exceeds max size %d\n",
2083 			 dev->name, len, GOOD_PACKET_LEN);
2084 		DEV_STATS_INC(dev, rx_length_errors);
2085 		goto err;
2086 	}
2087 
2088 	if (unlikely(vi->xdp_enabled)) {
2089 		struct bpf_prog *xdp_prog;
2090 
2091 		rcu_read_lock();
2092 		xdp_prog = rcu_dereference(rq->xdp_prog);
2093 		if (xdp_prog) {
2094 			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
2095 						xdp_headroom, len, xdp_xmit,
2096 						stats);
2097 			rcu_read_unlock();
2098 			return skb;
2099 		}
2100 		rcu_read_unlock();
2101 	}
2102 
2103 	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
2104 	if (likely(skb))
2105 		return skb;
2106 
2107 err:
2108 	u64_stats_inc(&stats->drops);
2109 	put_page(page);
2110 	return NULL;
2111 }
2112 
2113 static struct sk_buff *receive_big(struct net_device *dev,
2114 				   struct virtnet_info *vi,
2115 				   struct receive_queue *rq,
2116 				   void *buf,
2117 				   unsigned int len,
2118 				   struct virtnet_rq_stats *stats)
2119 {
2120 	struct page *page = buf;
2121 	struct sk_buff *skb;
2122 
2123 	/* Make sure that len does not exceed the size allocated in
2124 	 * add_recvbuf_big.
2125 	 */
2126 	if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
2127 		pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
2128 			 dev->name, len,
2129 			 (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
2130 		goto err;
2131 	}
2132 
2133 	skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
2134 	u64_stats_add(&stats->bytes, len - vi->hdr_len);
2135 	if (unlikely(!skb))
2136 		goto err;
2137 
2138 	return skb;
2139 
2140 err:
2141 	u64_stats_inc(&stats->drops);
2142 	give_pages(rq, page);
2143 	return NULL;
2144 }
2145 
2146 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
2147 			       struct net_device *dev,
2148 			       struct virtnet_rq_stats *stats)
2149 {
2150 	struct page *page;
2151 	void *buf;
2152 	int len;
2153 
2154 	while (num_buf-- > 1) {
2155 		buf = virtnet_rq_get_buf(rq, &len, NULL);
2156 		if (unlikely(!buf)) {
2157 			pr_debug("%s: rx error: %d buffers missing\n",
2158 				 dev->name, num_buf);
2159 			DEV_STATS_INC(dev, rx_length_errors);
2160 			break;
2161 		}
2162 		u64_stats_add(&stats->bytes, len);
2163 		page = virt_to_head_page(buf);
2164 		put_page(page);
2165 	}
2166 }
2167 
2168 /* Why not use xdp_build_skb_from_frame() ?
2169  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
2170  * virtio-net there are 2 points that do not match its requirements:
2171  *  1. The size of the prefilled buffer is not fixed before xdp is set.
2172  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
2173  *     like eth_type_trans() (which virtio-net does in receive_buf()).
2174  */
2175 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
2176 					       struct virtnet_info *vi,
2177 					       struct xdp_buff *xdp,
2178 					       unsigned int xdp_frags_truesz)
2179 {
2180 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2181 	unsigned int headroom, data_len;
2182 	struct sk_buff *skb;
2183 	int metasize;
2184 	u8 nr_frags;
2185 
2186 	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
2187 		pr_debug("Error building skb as missing reserved tailroom for xdp");
2188 		return NULL;
2189 	}
2190 
2191 	if (unlikely(xdp_buff_has_frags(xdp)))
2192 		nr_frags = sinfo->nr_frags;
2193 
2194 	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
2195 	if (unlikely(!skb))
2196 		return NULL;
2197 
2198 	headroom = xdp->data - xdp->data_hard_start;
2199 	data_len = xdp->data_end - xdp->data;
2200 	skb_reserve(skb, headroom);
2201 	__skb_put(skb, data_len);
2202 
2203 	metasize = xdp->data - xdp->data_meta;
2204 	metasize = metasize > 0 ? metasize : 0;
2205 	if (metasize)
2206 		skb_metadata_set(skb, metasize);
2207 
2208 	if (unlikely(xdp_buff_has_frags(xdp)))
2209 		xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
2210 					  xdp_frags_truesz,
2211 					  xdp_buff_get_skb_flags(xdp));
2212 
2213 	return skb;
2214 }
2215 
2216 /* TODO: build xdp in big mode */
2217 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
2218 				      struct virtnet_info *vi,
2219 				      struct receive_queue *rq,
2220 				      struct xdp_buff *xdp,
2221 				      void *buf,
2222 				      unsigned int len,
2223 				      unsigned int frame_sz,
2224 				      int *num_buf,
2225 				      unsigned int *xdp_frags_truesize,
2226 				      struct virtnet_rq_stats *stats)
2227 {
2228 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2229 	struct skb_shared_info *shinfo;
2230 	unsigned int xdp_frags_truesz = 0;
2231 	unsigned int truesize;
2232 	struct page *page;
2233 	skb_frag_t *frag;
2234 	int offset;
2235 	void *ctx;
2236 
2237 	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
2238 	xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
2239 			 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
2240 
2241 	if (!*num_buf)
2242 		return 0;
2243 
2244 	if (*num_buf > 1) {
2245 		/* If we want to build multi-buffer xdp, we need
2246 		 * to specify that the flags of xdp_buff have the
2247 		 * XDP_FLAGS_HAS_FRAG bit.
2248 		 */
2249 		if (!xdp_buff_has_frags(xdp))
2250 			xdp_buff_set_frags_flag(xdp);
2251 
2252 		shinfo = xdp_get_shared_info_from_buff(xdp);
2253 		shinfo->nr_frags = 0;
2254 		shinfo->xdp_frags_size = 0;
2255 	}
2256 
2257 	if (*num_buf > MAX_SKB_FRAGS + 1)
2258 		return -EINVAL;
2259 
2260 	while (--*num_buf > 0) {
2261 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
2262 		if (unlikely(!buf)) {
2263 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
2264 				 dev->name, *num_buf,
2265 				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
2266 			DEV_STATS_INC(dev, rx_length_errors);
2267 			goto err;
2268 		}
2269 
2270 		u64_stats_add(&stats->bytes, len);
2271 		page = virt_to_head_page(buf);
2272 		offset = buf - page_address(page);
2273 
2274 		if (check_mergeable_len(dev, ctx, len)) {
2275 			put_page(page);
2276 			goto err;
2277 		}
2278 
2279 		truesize = mergeable_ctx_to_truesize(ctx);
2280 		xdp_frags_truesz += truesize;
2281 
2282 		frag = &shinfo->frags[shinfo->nr_frags++];
2283 		skb_frag_fill_page_desc(frag, page, offset, len);
2284 		if (page_is_pfmemalloc(page))
2285 			xdp_buff_set_frag_pfmemalloc(xdp);
2286 
2287 		shinfo->xdp_frags_size += len;
2288 	}
2289 
2290 	*xdp_frags_truesize = xdp_frags_truesz;
2291 	return 0;
2292 
2293 err:
2294 	put_xdp_frags(xdp);
2295 	return -EINVAL;
2296 }
2297 
2298 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
2299 				   struct receive_queue *rq,
2300 				   struct bpf_prog *xdp_prog,
2301 				   void *ctx,
2302 				   unsigned int *frame_sz,
2303 				   int *num_buf,
2304 				   struct page **page,
2305 				   int offset,
2306 				   unsigned int *len,
2307 				   struct virtio_net_hdr_mrg_rxbuf *hdr)
2308 {
2309 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2310 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2311 	struct page *xdp_page;
2312 	unsigned int xdp_room;
2313 
2314 	/* Transient failure which in theory could occur if
2315 	 * in-flight packets from before XDP was enabled reach
2316 	 * the receive path after XDP is loaded.
2317 	 */
2318 	if (unlikely(hdr->hdr.gso_type))
2319 		return NULL;
2320 
2321 	/* Partially checksummed packets must be dropped. */
2322 	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
2323 		return NULL;
2324 
2325 	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
2326 	 * with headroom may add hole in truesize, which
2327 	 * make their length exceed PAGE_SIZE. So we disabled the
2328 	 * hole mechanism for xdp. See add_recvbuf_mergeable().
2329 	 */
2330 	*frame_sz = truesize;
2331 
2332 	if (likely(headroom >= virtnet_get_headroom(vi) &&
2333 		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
2334 		return page_address(*page) + offset;
2335 	}
2336 
2337 	/* This happens when headroom is not enough because
2338 	 * of the buffer was prefilled before XDP is set.
2339 	 * This should only happen for the first several packets.
2340 	 * In fact, vq reset can be used here to help us clean up
2341 	 * the prefilled buffers, but many existing devices do not
2342 	 * support it, and we don't want to bother users who are
2343 	 * using xdp normally.
2344 	 */
2345 	if (!xdp_prog->aux->xdp_has_frags) {
2346 		/* linearize data for XDP */
2347 		xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
2348 					      *page, offset,
2349 					      XDP_PACKET_HEADROOM,
2350 					      len);
2351 		if (!xdp_page)
2352 			return NULL;
2353 	} else {
2354 		xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
2355 					  sizeof(struct skb_shared_info));
2356 		if (*len + xdp_room > PAGE_SIZE)
2357 			return NULL;
2358 
2359 		xdp_page = alloc_page(GFP_ATOMIC);
2360 		if (!xdp_page)
2361 			return NULL;
2362 
2363 		memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
2364 		       page_address(*page) + offset, *len);
2365 	}
2366 
2367 	*frame_sz = PAGE_SIZE;
2368 
2369 	put_page(*page);
2370 
2371 	*page = xdp_page;
2372 
2373 	return page_address(*page) + XDP_PACKET_HEADROOM;
2374 }
2375 
2376 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
2377 					     struct virtnet_info *vi,
2378 					     struct receive_queue *rq,
2379 					     struct bpf_prog *xdp_prog,
2380 					     void *buf,
2381 					     void *ctx,
2382 					     unsigned int len,
2383 					     unsigned int *xdp_xmit,
2384 					     struct virtnet_rq_stats *stats)
2385 {
2386 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2387 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2388 	struct page *page = virt_to_head_page(buf);
2389 	int offset = buf - page_address(page);
2390 	unsigned int xdp_frags_truesz = 0;
2391 	struct sk_buff *head_skb;
2392 	unsigned int frame_sz;
2393 	struct xdp_buff xdp;
2394 	void *data;
2395 	u32 act;
2396 	int err;
2397 
2398 	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
2399 				     offset, &len, hdr);
2400 	if (unlikely(!data))
2401 		goto err_xdp;
2402 
2403 	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
2404 					 &num_buf, &xdp_frags_truesz, stats);
2405 	if (unlikely(err))
2406 		goto err_xdp;
2407 
2408 	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
2409 
2410 	switch (act) {
2411 	case XDP_PASS:
2412 		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
2413 		if (unlikely(!head_skb))
2414 			break;
2415 		return head_skb;
2416 
2417 	case XDP_TX:
2418 	case XDP_REDIRECT:
2419 		return NULL;
2420 
2421 	default:
2422 		break;
2423 	}
2424 
2425 	put_xdp_frags(&xdp);
2426 
2427 err_xdp:
2428 	put_page(page);
2429 	mergeable_buf_free(rq, num_buf, dev, stats);
2430 
2431 	u64_stats_inc(&stats->xdp_drops);
2432 	u64_stats_inc(&stats->drops);
2433 	return NULL;
2434 }
2435 
2436 static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
2437 					       struct sk_buff *curr_skb,
2438 					       struct page *page, void *buf,
2439 					       int len, int truesize)
2440 {
2441 	int num_skb_frags;
2442 	int offset;
2443 
2444 	num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2445 	if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
2446 		struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
2447 
2448 		if (unlikely(!nskb))
2449 			return NULL;
2450 
2451 		if (curr_skb == head_skb)
2452 			skb_shinfo(curr_skb)->frag_list = nskb;
2453 		else
2454 			curr_skb->next = nskb;
2455 		curr_skb = nskb;
2456 		head_skb->truesize += nskb->truesize;
2457 		num_skb_frags = 0;
2458 	}
2459 
2460 	if (curr_skb != head_skb) {
2461 		head_skb->data_len += len;
2462 		head_skb->len += len;
2463 		head_skb->truesize += truesize;
2464 	}
2465 
2466 	offset = buf - page_address(page);
2467 	if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
2468 		put_page(page);
2469 		skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
2470 				     len, truesize);
2471 	} else {
2472 		skb_add_rx_frag(curr_skb, num_skb_frags, page,
2473 				offset, len, truesize);
2474 	}
2475 
2476 	return curr_skb;
2477 }
2478 
2479 static struct sk_buff *receive_mergeable(struct net_device *dev,
2480 					 struct virtnet_info *vi,
2481 					 struct receive_queue *rq,
2482 					 void *buf,
2483 					 void *ctx,
2484 					 unsigned int len,
2485 					 unsigned int *xdp_xmit,
2486 					 struct virtnet_rq_stats *stats)
2487 {
2488 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2489 	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2490 	struct page *page = virt_to_head_page(buf);
2491 	int offset = buf - page_address(page);
2492 	struct sk_buff *head_skb, *curr_skb;
2493 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2494 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2495 
2496 	head_skb = NULL;
2497 	u64_stats_add(&stats->bytes, len - vi->hdr_len);
2498 
2499 	if (check_mergeable_len(dev, ctx, len))
2500 		goto err_skb;
2501 
2502 	if (unlikely(vi->xdp_enabled)) {
2503 		struct bpf_prog *xdp_prog;
2504 
2505 		rcu_read_lock();
2506 		xdp_prog = rcu_dereference(rq->xdp_prog);
2507 		if (xdp_prog) {
2508 			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
2509 							 len, xdp_xmit, stats);
2510 			rcu_read_unlock();
2511 			return head_skb;
2512 		}
2513 		rcu_read_unlock();
2514 	}
2515 
2516 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
2517 	curr_skb = head_skb;
2518 
2519 	if (unlikely(!curr_skb))
2520 		goto err_skb;
2521 	while (--num_buf) {
2522 		buf = virtnet_rq_get_buf(rq, &len, &ctx);
2523 		if (unlikely(!buf)) {
2524 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
2525 				 dev->name, num_buf,
2526 				 virtio16_to_cpu(vi->vdev,
2527 						 hdr->num_buffers));
2528 			DEV_STATS_INC(dev, rx_length_errors);
2529 			goto err_buf;
2530 		}
2531 
2532 		u64_stats_add(&stats->bytes, len);
2533 		page = virt_to_head_page(buf);
2534 
2535 		if (check_mergeable_len(dev, ctx, len))
2536 			goto err_skb;
2537 
2538 		truesize = mergeable_ctx_to_truesize(ctx);
2539 		curr_skb  = virtnet_skb_append_frag(head_skb, curr_skb, page,
2540 						    buf, len, truesize);
2541 		if (!curr_skb)
2542 			goto err_skb;
2543 	}
2544 
2545 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
2546 	return head_skb;
2547 
2548 err_skb:
2549 	put_page(page);
2550 	mergeable_buf_free(rq, num_buf, dev, stats);
2551 
2552 err_buf:
2553 	u64_stats_inc(&stats->drops);
2554 	dev_kfree_skb(head_skb);
2555 	return NULL;
2556 }
2557 
2558 static inline u32
2559 virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
2560 {
2561 	return __le16_to_cpu(hdr_hash->hash_value_lo) |
2562 		(__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
2563 }
2564 
2565 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
2566 				struct sk_buff *skb)
2567 {
2568 	enum pkt_hash_types rss_hash_type;
2569 
2570 	if (!hdr_hash || !skb)
2571 		return;
2572 
2573 	switch (__le16_to_cpu(hdr_hash->hash_report)) {
2574 	case VIRTIO_NET_HASH_REPORT_TCPv4:
2575 	case VIRTIO_NET_HASH_REPORT_UDPv4:
2576 	case VIRTIO_NET_HASH_REPORT_TCPv6:
2577 	case VIRTIO_NET_HASH_REPORT_UDPv6:
2578 	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
2579 	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
2580 		rss_hash_type = PKT_HASH_TYPE_L4;
2581 		break;
2582 	case VIRTIO_NET_HASH_REPORT_IPv4:
2583 	case VIRTIO_NET_HASH_REPORT_IPv6:
2584 	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
2585 		rss_hash_type = PKT_HASH_TYPE_L3;
2586 		break;
2587 	case VIRTIO_NET_HASH_REPORT_NONE:
2588 	default:
2589 		rss_hash_type = PKT_HASH_TYPE_NONE;
2590 	}
2591 	skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type);
2592 }
2593 
2594 static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
2595 				 struct sk_buff *skb, u8 flags)
2596 {
2597 	struct virtio_net_common_hdr *hdr;
2598 	struct net_device *dev = vi->dev;
2599 
2600 	hdr = skb_vnet_common_hdr(skb);
2601 	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
2602 		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
2603 
2604 	hdr->hdr.flags = flags;
2605 	if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) {
2606 		net_warn_ratelimited("%s: bad csum: flags: %x, gso_type: %x rx_tnl_csum %d\n",
2607 				     dev->name, hdr->hdr.flags,
2608 				     hdr->hdr.gso_type, vi->rx_tnl_csum);
2609 		goto frame_err;
2610 	}
2611 
2612 	if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl,
2613 				      vi->rx_tnl_csum,
2614 				      virtio_is_little_endian(vi->vdev))) {
2615 		net_warn_ratelimited("%s: bad gso: type: %x, size: %u, flags %x tunnel %d tnl csum %d\n",
2616 				     dev->name, hdr->hdr.gso_type,
2617 				     hdr->hdr.gso_size, hdr->hdr.flags,
2618 				     vi->rx_tnl, vi->rx_tnl_csum);
2619 		goto frame_err;
2620 	}
2621 
2622 	skb_record_rx_queue(skb, vq2rxq(rq->vq));
2623 	skb->protocol = eth_type_trans(skb, dev);
2624 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
2625 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
2626 
2627 	napi_gro_receive(&rq->napi, skb);
2628 	return;
2629 
2630 frame_err:
2631 	DEV_STATS_INC(dev, rx_frame_errors);
2632 	dev_kfree_skb(skb);
2633 }
2634 
2635 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
2636 			void *buf, unsigned int len, void **ctx,
2637 			unsigned int *xdp_xmit,
2638 			struct virtnet_rq_stats *stats)
2639 {
2640 	struct net_device *dev = vi->dev;
2641 	struct sk_buff *skb;
2642 	u8 flags;
2643 
2644 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
2645 		pr_debug("%s: short packet %i\n", dev->name, len);
2646 		DEV_STATS_INC(dev, rx_length_errors);
2647 		virtnet_rq_free_buf(vi, rq, buf);
2648 		return;
2649 	}
2650 
2651 	/* About the flags below:
2652 	 * 1. Save the flags early, as the XDP program might overwrite them.
2653 	 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
2654 	 * stay valid after XDP processing.
2655 	 * 2. XDP doesn't work with partially checksummed packets (refer to
2656 	 * virtnet_xdp_set()), so packets marked as
2657 	 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
2658 	 */
2659 
2660 	if (vi->mergeable_rx_bufs) {
2661 		flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
2662 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
2663 					stats);
2664 	} else if (vi->big_packets) {
2665 		void *p = page_address((struct page *)buf);
2666 
2667 		flags = ((struct virtio_net_common_hdr *)p)->hdr.flags;
2668 		skb = receive_big(dev, vi, rq, buf, len, stats);
2669 	} else {
2670 		flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
2671 		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
2672 	}
2673 
2674 	if (unlikely(!skb))
2675 		return;
2676 
2677 	virtnet_receive_done(vi, rq, skb, flags);
2678 }
2679 
2680 /* Unlike mergeable buffers, all buffers are allocated to the
2681  * same size, except for the headroom. For this reason we do
2682  * not need to use  mergeable_len_to_ctx here - it is enough
2683  * to store the headroom as the context ignoring the truesize.
2684  */
2685 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2686 			     gfp_t gfp)
2687 {
2688 	char *buf;
2689 	unsigned int xdp_headroom = virtnet_get_headroom(vi);
2690 	void *ctx = (void *)(unsigned long)xdp_headroom;
2691 	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
2692 	int err;
2693 
2694 	len = SKB_DATA_ALIGN(len) +
2695 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2696 
2697 	if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
2698 		return -ENOMEM;
2699 
2700 	buf = virtnet_rq_alloc(rq, len, gfp);
2701 	if (unlikely(!buf))
2702 		return -ENOMEM;
2703 
2704 	buf += VIRTNET_RX_PAD + xdp_headroom;
2705 
2706 	virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
2707 
2708 	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2709 	if (err < 0) {
2710 		virtnet_rq_unmap(rq, buf, 0);
2711 		put_page(virt_to_head_page(buf));
2712 	}
2713 
2714 	return err;
2715 }
2716 
2717 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2718 			   gfp_t gfp)
2719 {
2720 	struct page *first, *list = NULL;
2721 	char *p;
2722 	int i, err, offset;
2723 
2724 	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2725 
2726 	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2727 	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
2728 		first = get_a_page(rq, gfp);
2729 		if (!first) {
2730 			if (list)
2731 				give_pages(rq, list);
2732 			return -ENOMEM;
2733 		}
2734 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2735 
2736 		/* chain new page in list head to match sg */
2737 		first->private = (unsigned long)list;
2738 		list = first;
2739 	}
2740 
2741 	first = get_a_page(rq, gfp);
2742 	if (!first) {
2743 		give_pages(rq, list);
2744 		return -ENOMEM;
2745 	}
2746 	p = page_address(first);
2747 
2748 	/* rq->sg[0], rq->sg[1] share the same page */
2749 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
2750 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2751 
2752 	/* rq->sg[1] for data packet, from offset */
2753 	offset = sizeof(struct padded_vnet_hdr);
2754 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2755 
2756 	/* chain first in list head */
2757 	first->private = (unsigned long)list;
2758 	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2759 				  first, gfp);
2760 	if (err < 0)
2761 		give_pages(rq, first);
2762 
2763 	return err;
2764 }
2765 
2766 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2767 					  struct ewma_pkt_len *avg_pkt_len,
2768 					  unsigned int room)
2769 {
2770 	struct virtnet_info *vi = rq->vq->vdev->priv;
2771 	const size_t hdr_len = vi->hdr_len;
2772 	unsigned int len;
2773 
2774 	if (room)
2775 		return PAGE_SIZE - room;
2776 
2777 	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
2778 				rq->min_buf_len, PAGE_SIZE - hdr_len);
2779 
2780 	return ALIGN(len, L1_CACHE_BYTES);
2781 }
2782 
2783 static int add_recvbuf_mergeable(struct virtnet_info *vi,
2784 				 struct receive_queue *rq, gfp_t gfp)
2785 {
2786 	struct page_frag *alloc_frag = &rq->alloc_frag;
2787 	unsigned int headroom = virtnet_get_headroom(vi);
2788 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2789 	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2790 	unsigned int len, hole;
2791 	void *ctx;
2792 	char *buf;
2793 	int err;
2794 
2795 	/* Extra tailroom is needed to satisfy XDP's assumption. This
2796 	 * means rx frags coalescing won't work, but consider we've
2797 	 * disabled GSO for XDP, it won't be a big issue.
2798 	 */
2799 	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2800 
2801 	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
2802 		return -ENOMEM;
2803 
2804 	if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
2805 		len -= sizeof(struct virtnet_rq_dma);
2806 
2807 	buf = virtnet_rq_alloc(rq, len + room, gfp);
2808 	if (unlikely(!buf))
2809 		return -ENOMEM;
2810 
2811 	buf += headroom; /* advance address leaving hole at front of pkt */
2812 	hole = alloc_frag->size - alloc_frag->offset;
2813 	if (hole < len + room) {
2814 		/* To avoid internal fragmentation, if there is very likely not
2815 		 * enough space for another buffer, add the remaining space to
2816 		 * the current buffer.
2817 		 * XDP core assumes that frame_size of xdp_buff and the length
2818 		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
2819 		 */
2820 		if (!headroom)
2821 			len += hole;
2822 		alloc_frag->offset += hole;
2823 	}
2824 
2825 	virtnet_rq_init_one_sg(rq, buf, len);
2826 
2827 	ctx = mergeable_len_to_ctx(len + room, headroom);
2828 	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2829 	if (err < 0) {
2830 		virtnet_rq_unmap(rq, buf, 0);
2831 		put_page(virt_to_head_page(buf));
2832 	}
2833 
2834 	return err;
2835 }
2836 
2837 /*
2838  * Returns false if we couldn't fill entirely (OOM) and need to retry.
2839  * In XSK mode, it's when the receive buffer is not allocated and
2840  * xsk_use_need_wakeup is not set.
2841  *
2842  * Normally run in the receive path, but can also be run from ndo_open
2843  * before we're receiving packets, or from refill_work which is
2844  * careful to disable receiving (using napi_disable).
2845  */
2846 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2847 			  gfp_t gfp)
2848 {
2849 	int err;
2850 
2851 	if (rq->xsk_pool) {
2852 		err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
2853 		goto kick;
2854 	}
2855 
2856 	do {
2857 		if (vi->mergeable_rx_bufs)
2858 			err = add_recvbuf_mergeable(vi, rq, gfp);
2859 		else if (vi->big_packets)
2860 			err = add_recvbuf_big(vi, rq, gfp);
2861 		else
2862 			err = add_recvbuf_small(vi, rq, gfp);
2863 
2864 		if (err)
2865 			break;
2866 	} while (rq->vq->num_free);
2867 
2868 kick:
2869 	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2870 		unsigned long flags;
2871 
2872 		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2873 		u64_stats_inc(&rq->stats.kicks);
2874 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2875 	}
2876 
2877 	return err != -ENOMEM;
2878 }
2879 
2880 static void skb_recv_done(struct virtqueue *rvq)
2881 {
2882 	struct virtnet_info *vi = rvq->vdev->priv;
2883 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2884 
2885 	rq->calls++;
2886 	virtqueue_napi_schedule(&rq->napi, rvq);
2887 }
2888 
2889 static void virtnet_napi_do_enable(struct virtqueue *vq,
2890 				   struct napi_struct *napi)
2891 {
2892 	napi_enable(napi);
2893 
2894 	/* If all buffers were filled by other side before we napi_enabled, we
2895 	 * won't get another interrupt, so process any outstanding packets now.
2896 	 * Call local_bh_enable after to trigger softIRQ processing.
2897 	 */
2898 	local_bh_disable();
2899 	virtqueue_napi_schedule(napi, vq);
2900 	local_bh_enable();
2901 }
2902 
2903 static void virtnet_napi_enable(struct receive_queue *rq)
2904 {
2905 	struct virtnet_info *vi = rq->vq->vdev->priv;
2906 	int qidx = vq2rxq(rq->vq);
2907 
2908 	virtnet_napi_do_enable(rq->vq, &rq->napi);
2909 	netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
2910 }
2911 
2912 static void virtnet_napi_tx_enable(struct send_queue *sq)
2913 {
2914 	struct virtnet_info *vi = sq->vq->vdev->priv;
2915 	struct napi_struct *napi = &sq->napi;
2916 	int qidx = vq2txq(sq->vq);
2917 
2918 	if (!napi->weight)
2919 		return;
2920 
2921 	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2922 	 * enable the feature if this is likely affine with the transmit path.
2923 	 */
2924 	if (!vi->affinity_hint_set) {
2925 		napi->weight = 0;
2926 		return;
2927 	}
2928 
2929 	virtnet_napi_do_enable(sq->vq, napi);
2930 	netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
2931 }
2932 
2933 static void virtnet_napi_tx_disable(struct send_queue *sq)
2934 {
2935 	struct virtnet_info *vi = sq->vq->vdev->priv;
2936 	struct napi_struct *napi = &sq->napi;
2937 	int qidx = vq2txq(sq->vq);
2938 
2939 	if (napi->weight) {
2940 		netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
2941 		napi_disable(napi);
2942 	}
2943 }
2944 
2945 static void virtnet_napi_disable(struct receive_queue *rq)
2946 {
2947 	struct virtnet_info *vi = rq->vq->vdev->priv;
2948 	struct napi_struct *napi = &rq->napi;
2949 	int qidx = vq2rxq(rq->vq);
2950 
2951 	netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
2952 	napi_disable(napi);
2953 }
2954 
2955 static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
2956 				    struct receive_queue *rq,
2957 				    int budget,
2958 				    unsigned int *xdp_xmit,
2959 				    struct virtnet_rq_stats *stats)
2960 {
2961 	unsigned int len;
2962 	int packets = 0;
2963 	void *buf;
2964 
2965 	while (packets < budget) {
2966 		buf = virtqueue_get_buf(rq->vq, &len);
2967 		if (!buf)
2968 			break;
2969 
2970 		virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
2971 		packets++;
2972 	}
2973 
2974 	return packets;
2975 }
2976 
2977 static int virtnet_receive_packets(struct virtnet_info *vi,
2978 				   struct receive_queue *rq,
2979 				   int budget,
2980 				   unsigned int *xdp_xmit,
2981 				   struct virtnet_rq_stats *stats)
2982 {
2983 	unsigned int len;
2984 	int packets = 0;
2985 	void *buf;
2986 
2987 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2988 		void *ctx;
2989 		while (packets < budget &&
2990 		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2991 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
2992 			packets++;
2993 		}
2994 	} else {
2995 		while (packets < budget &&
2996 		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2997 			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
2998 			packets++;
2999 		}
3000 	}
3001 
3002 	return packets;
3003 }
3004 
3005 static int virtnet_receive(struct receive_queue *rq, int budget,
3006 			   unsigned int *xdp_xmit)
3007 {
3008 	struct virtnet_info *vi = rq->vq->vdev->priv;
3009 	struct virtnet_rq_stats stats = {};
3010 	int i, packets;
3011 
3012 	if (rq->xsk_pool)
3013 		packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
3014 	else
3015 		packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
3016 
3017 	u64_stats_set(&stats.packets, packets);
3018 	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
3019 		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
3020 			/* We need to retry refilling in the next NAPI poll so
3021 			 * we must return budget to make sure the NAPI is
3022 			 * repolled.
3023 			 */
3024 			packets = budget;
3025 	}
3026 
3027 	u64_stats_update_begin(&rq->stats.syncp);
3028 	for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
3029 		size_t offset = virtnet_rq_stats_desc[i].offset;
3030 		u64_stats_t *item, *src;
3031 
3032 		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
3033 		src = (u64_stats_t *)((u8 *)&stats + offset);
3034 		u64_stats_add(item, u64_stats_read(src));
3035 	}
3036 
3037 	u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
3038 	u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
3039 
3040 	u64_stats_update_end(&rq->stats.syncp);
3041 
3042 	return packets;
3043 }
3044 
3045 static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
3046 {
3047 	struct virtnet_info *vi = rq->vq->vdev->priv;
3048 	unsigned int index = vq2rxq(rq->vq);
3049 	struct send_queue *sq = &vi->sq[index];
3050 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
3051 
3052 	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
3053 		return;
3054 
3055 	if (__netif_tx_trylock(txq)) {
3056 		if (sq->reset) {
3057 			__netif_tx_unlock(txq);
3058 			return;
3059 		}
3060 
3061 		do {
3062 			virtqueue_disable_cb(sq->vq);
3063 			free_old_xmit(sq, txq, !!budget);
3064 		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
3065 
3066 		if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
3067 			virtnet_tx_wake_queue(vi, sq);
3068 
3069 		__netif_tx_unlock(txq);
3070 	}
3071 }
3072 
3073 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
3074 {
3075 	struct dim_sample cur_sample = {};
3076 
3077 	if (!rq->packets_in_napi)
3078 		return;
3079 
3080 	/* Don't need protection when fetching stats, since fetcher and
3081 	 * updater of the stats are in same context
3082 	 */
3083 	dim_update_sample(rq->calls,
3084 			  u64_stats_read(&rq->stats.packets),
3085 			  u64_stats_read(&rq->stats.bytes),
3086 			  &cur_sample);
3087 
3088 	net_dim(&rq->dim, &cur_sample);
3089 	rq->packets_in_napi = 0;
3090 }
3091 
3092 static int virtnet_poll(struct napi_struct *napi, int budget)
3093 {
3094 	struct receive_queue *rq =
3095 		container_of(napi, struct receive_queue, napi);
3096 	struct virtnet_info *vi = rq->vq->vdev->priv;
3097 	struct send_queue *sq;
3098 	unsigned int received;
3099 	unsigned int xdp_xmit = 0;
3100 	bool napi_complete;
3101 
3102 	virtnet_poll_cleantx(rq, budget);
3103 
3104 	received = virtnet_receive(rq, budget, &xdp_xmit);
3105 	rq->packets_in_napi += received;
3106 
3107 	if (xdp_xmit & VIRTIO_XDP_REDIR)
3108 		xdp_do_flush();
3109 
3110 	/* Out of packets? */
3111 	if (received < budget) {
3112 		napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
3113 		/* Intentionally not taking dim_lock here. This may result in a
3114 		 * spurious net_dim call. But if that happens virtnet_rx_dim_work
3115 		 * will not act on the scheduled work.
3116 		 */
3117 		if (napi_complete && rq->dim_enabled)
3118 			virtnet_rx_dim_update(vi, rq);
3119 	}
3120 
3121 	if (xdp_xmit & VIRTIO_XDP_TX) {
3122 		sq = virtnet_xdp_get_sq(vi);
3123 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3124 			u64_stats_update_begin(&sq->stats.syncp);
3125 			u64_stats_inc(&sq->stats.kicks);
3126 			u64_stats_update_end(&sq->stats.syncp);
3127 		}
3128 		virtnet_xdp_put_sq(vi, sq);
3129 	}
3130 
3131 	return received;
3132 }
3133 
3134 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
3135 {
3136 	virtnet_napi_tx_disable(&vi->sq[qp_index]);
3137 	virtnet_napi_disable(&vi->rq[qp_index]);
3138 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3139 }
3140 
3141 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
3142 {
3143 	struct net_device *dev = vi->dev;
3144 	int err;
3145 
3146 	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
3147 			       vi->rq[qp_index].napi.napi_id);
3148 	if (err < 0)
3149 		return err;
3150 
3151 	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
3152 					 MEM_TYPE_PAGE_SHARED, NULL);
3153 	if (err < 0)
3154 		goto err_xdp_reg_mem_model;
3155 
3156 	virtnet_napi_enable(&vi->rq[qp_index]);
3157 	virtnet_napi_tx_enable(&vi->sq[qp_index]);
3158 
3159 	return 0;
3160 
3161 err_xdp_reg_mem_model:
3162 	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3163 	return err;
3164 }
3165 
3166 static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
3167 {
3168 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3169 		return;
3170 	net_dim_work_cancel(dim);
3171 }
3172 
3173 static void virtnet_update_settings(struct virtnet_info *vi)
3174 {
3175 	u32 speed;
3176 	u8 duplex;
3177 
3178 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3179 		return;
3180 
3181 	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3182 
3183 	if (ethtool_validate_speed(speed))
3184 		vi->speed = speed;
3185 
3186 	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3187 
3188 	if (ethtool_validate_duplex(duplex))
3189 		vi->duplex = duplex;
3190 }
3191 
3192 static int virtnet_open(struct net_device *dev)
3193 {
3194 	struct virtnet_info *vi = netdev_priv(dev);
3195 	int i, err;
3196 
3197 	for (i = 0; i < vi->max_queue_pairs; i++) {
3198 		if (i < vi->curr_queue_pairs)
3199 			/* Pre-fill rq agressively, to make sure we are ready to
3200 			 * get packets immediately.
3201 			 */
3202 			try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
3203 
3204 		err = virtnet_enable_queue_pair(vi, i);
3205 		if (err < 0)
3206 			goto err_enable_qp;
3207 	}
3208 
3209 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3210 		if (vi->status & VIRTIO_NET_S_LINK_UP)
3211 			netif_carrier_on(vi->dev);
3212 		virtio_config_driver_enable(vi->vdev);
3213 	} else {
3214 		vi->status = VIRTIO_NET_S_LINK_UP;
3215 		netif_carrier_on(dev);
3216 	}
3217 
3218 	return 0;
3219 
3220 err_enable_qp:
3221 	for (i--; i >= 0; i--) {
3222 		virtnet_disable_queue_pair(vi, i);
3223 		virtnet_cancel_dim(vi, &vi->rq[i].dim);
3224 	}
3225 
3226 	return err;
3227 }
3228 
3229 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
3230 {
3231 	struct send_queue *sq = container_of(napi, struct send_queue, napi);
3232 	struct virtnet_info *vi = sq->vq->vdev->priv;
3233 	unsigned int index = vq2txq(sq->vq);
3234 	struct netdev_queue *txq;
3235 	int opaque, xsk_done = 0;
3236 	bool done;
3237 
3238 	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
3239 		/* We don't need to enable cb for XDP */
3240 		napi_complete_done(napi, 0);
3241 		return 0;
3242 	}
3243 
3244 	txq = netdev_get_tx_queue(vi->dev, index);
3245 	__netif_tx_lock(txq, raw_smp_processor_id());
3246 	virtqueue_disable_cb(sq->vq);
3247 
3248 	if (sq->xsk_pool)
3249 		xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
3250 	else
3251 		free_old_xmit(sq, txq, !!budget);
3252 
3253 	if (sq->vq->num_free >= MAX_SKB_FRAGS + 2)
3254 		virtnet_tx_wake_queue(vi, sq);
3255 
3256 	if (xsk_done >= budget) {
3257 		__netif_tx_unlock(txq);
3258 		return budget;
3259 	}
3260 
3261 	opaque = virtqueue_enable_cb_prepare(sq->vq);
3262 
3263 	done = napi_complete_done(napi, 0);
3264 
3265 	if (!done)
3266 		virtqueue_disable_cb(sq->vq);
3267 
3268 	__netif_tx_unlock(txq);
3269 
3270 	if (done) {
3271 		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
3272 			if (napi_schedule_prep(napi)) {
3273 				__netif_tx_lock(txq, raw_smp_processor_id());
3274 				virtqueue_disable_cb(sq->vq);
3275 				__netif_tx_unlock(txq);
3276 				__napi_schedule(napi);
3277 			}
3278 		}
3279 	}
3280 
3281 	return 0;
3282 }
3283 
3284 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
3285 {
3286 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
3287 	struct virtnet_info *vi = sq->vq->vdev->priv;
3288 	struct virtio_net_hdr_v1_hash_tunnel *hdr;
3289 	int num_sg;
3290 	unsigned hdr_len = vi->hdr_len;
3291 	bool can_push;
3292 
3293 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
3294 
3295 	/* Make sure it's safe to cast between formats */
3296 	BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr));
3297 	BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr));
3298 
3299 	can_push = vi->any_header_sg &&
3300 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
3301 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
3302 	/* Even if we can, don't push here yet as this would skew
3303 	 * csum_start offset below. */
3304 	if (can_push)
3305 		hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data -
3306 							       hdr_len);
3307 	else
3308 		hdr = &skb_vnet_common_hdr(skb)->tnl_hdr;
3309 
3310 	if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl,
3311 					virtio_is_little_endian(vi->vdev), 0,
3312 					false))
3313 		return -EPROTO;
3314 
3315 	if (vi->mergeable_rx_bufs)
3316 		hdr->hash_hdr.hdr.num_buffers = 0;
3317 
3318 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
3319 	if (can_push) {
3320 		__skb_push(skb, hdr_len);
3321 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
3322 		if (unlikely(num_sg < 0))
3323 			return num_sg;
3324 		/* Pull header back to avoid skew in tx bytes calculations. */
3325 		__skb_pull(skb, hdr_len);
3326 	} else {
3327 		sg_set_buf(sq->sg, hdr, hdr_len);
3328 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
3329 		if (unlikely(num_sg < 0))
3330 			return num_sg;
3331 		num_sg++;
3332 	}
3333 
3334 	return virtnet_add_outbuf(sq, num_sg, skb,
3335 				  orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
3336 }
3337 
3338 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
3339 {
3340 	struct virtnet_info *vi = netdev_priv(dev);
3341 	int qnum = skb_get_queue_mapping(skb);
3342 	struct send_queue *sq = &vi->sq[qnum];
3343 	int err;
3344 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
3345 	bool xmit_more = netdev_xmit_more();
3346 	bool use_napi = sq->napi.weight;
3347 	bool kick;
3348 
3349 	if (!use_napi)
3350 		free_old_xmit(sq, txq, false);
3351 	else
3352 		virtqueue_disable_cb(sq->vq);
3353 
3354 	/* timestamp packet in software */
3355 	skb_tx_timestamp(skb);
3356 
3357 	/* Try to transmit */
3358 	err = xmit_skb(sq, skb, !use_napi);
3359 
3360 	/* This should not happen! */
3361 	if (unlikely(err)) {
3362 		DEV_STATS_INC(dev, tx_fifo_errors);
3363 		if (net_ratelimit())
3364 			dev_warn(&dev->dev,
3365 				 "Unexpected TXQ (%d) queue failure: %d\n",
3366 				 qnum, err);
3367 		DEV_STATS_INC(dev, tx_dropped);
3368 		dev_kfree_skb_any(skb);
3369 		return NETDEV_TX_OK;
3370 	}
3371 
3372 	/* Don't wait up for transmitted skbs to be freed. */
3373 	if (!use_napi) {
3374 		skb_orphan(skb);
3375 		nf_reset_ct(skb);
3376 	}
3377 
3378 	if (use_napi)
3379 		tx_may_stop(vi, dev, sq);
3380 	else
3381 		check_sq_full_and_disable(vi, dev,sq);
3382 
3383 	kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
3384 			  !xmit_more || netif_xmit_stopped(txq);
3385 	if (kick) {
3386 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3387 			u64_stats_update_begin(&sq->stats.syncp);
3388 			u64_stats_inc(&sq->stats.kicks);
3389 			u64_stats_update_end(&sq->stats.syncp);
3390 		}
3391 	}
3392 
3393 	if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
3394 		virtqueue_napi_schedule(&sq->napi, sq->vq);
3395 
3396 	return NETDEV_TX_OK;
3397 }
3398 
3399 static void virtnet_rx_pause(struct virtnet_info *vi,
3400 			     struct receive_queue *rq)
3401 {
3402 	bool running = netif_running(vi->dev);
3403 
3404 	if (running) {
3405 		virtnet_napi_disable(rq);
3406 		virtnet_cancel_dim(vi, &rq->dim);
3407 	}
3408 }
3409 
3410 static void virtnet_rx_pause_all(struct virtnet_info *vi)
3411 {
3412 	int i;
3413 
3414 	for (i = 0; i < vi->max_queue_pairs; i++)
3415 		virtnet_rx_pause(vi, &vi->rq[i]);
3416 }
3417 
3418 static void virtnet_rx_resume(struct virtnet_info *vi,
3419 			      struct receive_queue *rq,
3420 			      bool refill)
3421 {
3422 	if (netif_running(vi->dev)) {
3423 		/* Pre-fill rq agressively, to make sure we are ready to get
3424 		 * packets immediately.
3425 		 */
3426 		if (refill)
3427 			try_fill_recv(vi, rq, GFP_KERNEL);
3428 
3429 		virtnet_napi_enable(rq);
3430 	}
3431 }
3432 
3433 static void virtnet_rx_resume_all(struct virtnet_info *vi)
3434 {
3435 	int i;
3436 
3437 	for (i = 0; i < vi->max_queue_pairs; i++) {
3438 		if (i < vi->curr_queue_pairs)
3439 			virtnet_rx_resume(vi, &vi->rq[i], true);
3440 		else
3441 			virtnet_rx_resume(vi, &vi->rq[i], false);
3442 	}
3443 }
3444 
3445 static int virtnet_rx_resize(struct virtnet_info *vi,
3446 			     struct receive_queue *rq, u32 ring_num)
3447 {
3448 	int err, qindex;
3449 
3450 	qindex = rq - vi->rq;
3451 
3452 	virtnet_rx_pause(vi, rq);
3453 
3454 	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
3455 	if (err)
3456 		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
3457 
3458 	virtnet_rx_resume(vi, rq, true);
3459 	return err;
3460 }
3461 
3462 static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
3463 {
3464 	bool running = netif_running(vi->dev);
3465 	struct netdev_queue *txq;
3466 	int qindex;
3467 
3468 	qindex = sq - vi->sq;
3469 
3470 	if (running)
3471 		virtnet_napi_tx_disable(sq);
3472 
3473 	txq = netdev_get_tx_queue(vi->dev, qindex);
3474 
3475 	/* 1. wait all ximt complete
3476 	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
3477 	 */
3478 	__netif_tx_lock_bh(txq);
3479 
3480 	/* Prevent rx poll from accessing sq. */
3481 	sq->reset = true;
3482 
3483 	/* Prevent the upper layer from trying to send packets. */
3484 	netif_stop_subqueue(vi->dev, qindex);
3485 	u64_stats_update_begin(&sq->stats.syncp);
3486 	u64_stats_inc(&sq->stats.stop);
3487 	u64_stats_update_end(&sq->stats.syncp);
3488 
3489 	__netif_tx_unlock_bh(txq);
3490 }
3491 
3492 static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
3493 {
3494 	bool running = netif_running(vi->dev);
3495 	struct netdev_queue *txq;
3496 	int qindex;
3497 
3498 	qindex = sq - vi->sq;
3499 
3500 	txq = netdev_get_tx_queue(vi->dev, qindex);
3501 
3502 	__netif_tx_lock_bh(txq);
3503 	sq->reset = false;
3504 	virtnet_tx_wake_queue(vi, sq);
3505 	__netif_tx_unlock_bh(txq);
3506 
3507 	if (running)
3508 		virtnet_napi_tx_enable(sq);
3509 }
3510 
3511 static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
3512 			     u32 ring_num)
3513 {
3514 	int qindex, err;
3515 
3516 	if (ring_num <= MAX_SKB_FRAGS + 2) {
3517 		netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
3518 			   ring_num, MAX_SKB_FRAGS + 2);
3519 		return -EINVAL;
3520 	}
3521 
3522 	qindex = sq - vi->sq;
3523 
3524 	virtnet_tx_pause(vi, sq);
3525 
3526 	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
3527 			       virtnet_sq_free_unused_buf_done);
3528 	if (err)
3529 		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
3530 
3531 	virtnet_tx_resume(vi, sq);
3532 
3533 	return err;
3534 }
3535 
3536 /*
3537  * Send command via the control virtqueue and check status.  Commands
3538  * supported by the hypervisor, as indicated by feature bits, should
3539  * never fail unless improperly formatted.
3540  */
3541 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
3542 				       struct scatterlist *out,
3543 				       struct scatterlist *in)
3544 {
3545 	struct scatterlist *sgs[5], hdr, stat;
3546 	u32 out_num = 0, tmp, in_num = 0;
3547 	bool ok;
3548 	int ret;
3549 
3550 	/* Caller should know better */
3551 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
3552 
3553 	mutex_lock(&vi->cvq_lock);
3554 	vi->ctrl->status = ~0;
3555 	vi->ctrl->hdr.class = class;
3556 	vi->ctrl->hdr.cmd = cmd;
3557 	/* Add header */
3558 	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
3559 	sgs[out_num++] = &hdr;
3560 
3561 	if (out)
3562 		sgs[out_num++] = out;
3563 
3564 	/* Add return status. */
3565 	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
3566 	sgs[out_num + in_num++] = &stat;
3567 
3568 	if (in)
3569 		sgs[out_num + in_num++] = in;
3570 
3571 	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
3572 	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
3573 	if (ret < 0) {
3574 		dev_warn(&vi->vdev->dev,
3575 			 "Failed to add sgs for command vq: %d\n.", ret);
3576 		mutex_unlock(&vi->cvq_lock);
3577 		return false;
3578 	}
3579 
3580 	if (unlikely(!virtqueue_kick(vi->cvq)))
3581 		goto unlock;
3582 
3583 	/* Spin for a response, the kick causes an ioport write, trapping
3584 	 * into the hypervisor, so the request should be handled immediately.
3585 	 */
3586 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
3587 	       !virtqueue_is_broken(vi->cvq)) {
3588 		cond_resched();
3589 		cpu_relax();
3590 	}
3591 
3592 unlock:
3593 	ok = vi->ctrl->status == VIRTIO_NET_OK;
3594 	mutex_unlock(&vi->cvq_lock);
3595 	return ok;
3596 }
3597 
3598 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3599 				 struct scatterlist *out)
3600 {
3601 	return virtnet_send_command_reply(vi, class, cmd, out, NULL);
3602 }
3603 
3604 static int virtnet_set_mac_address(struct net_device *dev, void *p)
3605 {
3606 	struct virtnet_info *vi = netdev_priv(dev);
3607 	struct virtio_device *vdev = vi->vdev;
3608 	int ret;
3609 	struct sockaddr *addr;
3610 	struct scatterlist sg;
3611 
3612 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3613 		return -EOPNOTSUPP;
3614 
3615 	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
3616 	if (!addr)
3617 		return -ENOMEM;
3618 
3619 	ret = eth_prepare_mac_addr_change(dev, addr);
3620 	if (ret)
3621 		goto out;
3622 
3623 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
3624 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
3625 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3626 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
3627 			dev_warn(&vdev->dev,
3628 				 "Failed to set mac address by vq command.\n");
3629 			ret = -EINVAL;
3630 			goto out;
3631 		}
3632 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
3633 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3634 		unsigned int i;
3635 
3636 		/* Naturally, this has an atomicity problem. */
3637 		for (i = 0; i < dev->addr_len; i++)
3638 			virtio_cwrite8(vdev,
3639 				       offsetof(struct virtio_net_config, mac) +
3640 				       i, addr->sa_data[i]);
3641 	}
3642 
3643 	eth_commit_mac_addr_change(dev, p);
3644 	ret = 0;
3645 
3646 out:
3647 	kfree(addr);
3648 	return ret;
3649 }
3650 
3651 static void virtnet_stats(struct net_device *dev,
3652 			  struct rtnl_link_stats64 *tot)
3653 {
3654 	struct virtnet_info *vi = netdev_priv(dev);
3655 	unsigned int start;
3656 	int i;
3657 
3658 	for (i = 0; i < vi->max_queue_pairs; i++) {
3659 		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
3660 		struct receive_queue *rq = &vi->rq[i];
3661 		struct send_queue *sq = &vi->sq[i];
3662 
3663 		do {
3664 			start = u64_stats_fetch_begin(&sq->stats.syncp);
3665 			tpackets = u64_stats_read(&sq->stats.packets);
3666 			tbytes   = u64_stats_read(&sq->stats.bytes);
3667 			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
3668 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3669 
3670 		do {
3671 			start = u64_stats_fetch_begin(&rq->stats.syncp);
3672 			rpackets = u64_stats_read(&rq->stats.packets);
3673 			rbytes   = u64_stats_read(&rq->stats.bytes);
3674 			rdrops   = u64_stats_read(&rq->stats.drops);
3675 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3676 
3677 		tot->rx_packets += rpackets;
3678 		tot->tx_packets += tpackets;
3679 		tot->rx_bytes   += rbytes;
3680 		tot->tx_bytes   += tbytes;
3681 		tot->rx_dropped += rdrops;
3682 		tot->tx_errors  += terrors;
3683 	}
3684 
3685 	tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3686 	tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
3687 	tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
3688 	tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
3689 }
3690 
3691 static void virtnet_ack_link_announce(struct virtnet_info *vi)
3692 {
3693 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
3694 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
3695 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
3696 }
3697 
3698 static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3699 
3700 static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
3701 {
3702 	u32 indir_val = 0;
3703 	int i = 0;
3704 
3705 	for (; i < vi->rss_indir_table_size; ++i) {
3706 		indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
3707 		vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
3708 	}
3709 	vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
3710 }
3711 
3712 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
3713 {
3714 	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
3715 	struct virtio_net_rss_config_hdr *old_rss_hdr;
3716 	struct virtio_net_rss_config_trailer old_rss_trailer;
3717 	struct net_device *dev = vi->dev;
3718 	struct scatterlist sg;
3719 
3720 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
3721 		return 0;
3722 
3723 	/* Firstly check if we need update rss. Do updating if both (1) rss enabled and
3724 	 * (2) no user configuration.
3725 	 *
3726 	 * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
3727 	 * the device updates queue_pairs together with rss, so we can skip the separate queue_pairs
3728 	 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
3729 	 */
3730 	if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
3731 		old_rss_hdr = vi->rss_hdr;
3732 		old_rss_trailer = vi->rss_trailer;
3733 		vi->rss_hdr = devm_kzalloc(&vi->vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
3734 		if (!vi->rss_hdr) {
3735 			vi->rss_hdr = old_rss_hdr;
3736 			return -ENOMEM;
3737 		}
3738 
3739 		*vi->rss_hdr = *old_rss_hdr;
3740 		virtnet_rss_update_by_qpairs(vi, queue_pairs);
3741 
3742 		if (!virtnet_commit_rss_command(vi)) {
3743 			/* restore ctrl_rss if commit_rss_command failed */
3744 			devm_kfree(&vi->vdev->dev, vi->rss_hdr);
3745 			vi->rss_hdr = old_rss_hdr;
3746 			vi->rss_trailer = old_rss_trailer;
3747 
3748 			dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
3749 				 queue_pairs);
3750 			return -EINVAL;
3751 		}
3752 		devm_kfree(&vi->vdev->dev, old_rss_hdr);
3753 		goto succ;
3754 	}
3755 
3756 	mq = kzalloc_obj(*mq);
3757 	if (!mq)
3758 		return -ENOMEM;
3759 
3760 	mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3761 	sg_init_one(&sg, mq, sizeof(*mq));
3762 
3763 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3764 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
3765 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
3766 			 queue_pairs);
3767 		return -EINVAL;
3768 	}
3769 succ:
3770 	vi->curr_queue_pairs = queue_pairs;
3771 	if (dev->flags & IFF_UP) {
3772 		local_bh_disable();
3773 		for (int i = 0; i < vi->curr_queue_pairs; ++i)
3774 			virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
3775 		local_bh_enable();
3776 	}
3777 
3778 	return 0;
3779 }
3780 
3781 static int virtnet_close(struct net_device *dev)
3782 {
3783 	struct virtnet_info *vi = netdev_priv(dev);
3784 	int i;
3785 
3786 	/* Prevent the config change callback from changing carrier
3787 	 * after close
3788 	 */
3789 	virtio_config_driver_disable(vi->vdev);
3790 	/* Stop getting status/speed updates: we don't care until next
3791 	 * open
3792 	 */
3793 	cancel_work_sync(&vi->config_work);
3794 
3795 	for (i = 0; i < vi->max_queue_pairs; i++) {
3796 		virtnet_disable_queue_pair(vi, i);
3797 		virtnet_cancel_dim(vi, &vi->rq[i].dim);
3798 	}
3799 
3800 	netif_carrier_off(dev);
3801 
3802 	return 0;
3803 }
3804 
3805 static void virtnet_rx_mode_work(struct work_struct *work)
3806 {
3807 	struct virtnet_info *vi =
3808 		container_of(work, struct virtnet_info, rx_mode_work);
3809 	u8 *promisc_allmulti  __free(kfree) = NULL;
3810 	struct net_device *dev = vi->dev;
3811 	struct scatterlist sg[2];
3812 	struct virtio_net_ctrl_mac *mac_data;
3813 	struct netdev_hw_addr *ha;
3814 	int uc_count;
3815 	int mc_count;
3816 	void *buf;
3817 	int i;
3818 
3819 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
3820 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
3821 		return;
3822 
3823 	promisc_allmulti = kzalloc_obj(*promisc_allmulti);
3824 	if (!promisc_allmulti) {
3825 		dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
3826 		return;
3827 	}
3828 
3829 	rtnl_lock();
3830 
3831 	*promisc_allmulti = !!(dev->flags & IFF_PROMISC);
3832 	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3833 
3834 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3835 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
3836 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
3837 			 *promisc_allmulti ? "en" : "dis");
3838 
3839 	*promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
3840 	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3841 
3842 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3843 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
3844 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
3845 			 *promisc_allmulti ? "en" : "dis");
3846 
3847 	netif_addr_lock_bh(dev);
3848 
3849 	uc_count = netdev_uc_count(dev);
3850 	mc_count = netdev_mc_count(dev);
3851 	/* MAC filter - use one buffer for both lists */
3852 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
3853 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
3854 	mac_data = buf;
3855 	if (!buf) {
3856 		netif_addr_unlock_bh(dev);
3857 		rtnl_unlock();
3858 		return;
3859 	}
3860 
3861 	sg_init_table(sg, 2);
3862 
3863 	/* Store the unicast list and count in the front of the buffer */
3864 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
3865 	i = 0;
3866 	netdev_for_each_uc_addr(ha, dev)
3867 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3868 
3869 	sg_set_buf(&sg[0], mac_data,
3870 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
3871 
3872 	/* multicast list and count fill the end */
3873 	mac_data = (void *)&mac_data->macs[uc_count][0];
3874 
3875 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
3876 	i = 0;
3877 	netdev_for_each_mc_addr(ha, dev)
3878 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3879 
3880 	netif_addr_unlock_bh(dev);
3881 
3882 	sg_set_buf(&sg[1], mac_data,
3883 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
3884 
3885 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3886 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
3887 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
3888 
3889 	rtnl_unlock();
3890 
3891 	kfree(buf);
3892 }
3893 
3894 static void virtnet_set_rx_mode(struct net_device *dev)
3895 {
3896 	struct virtnet_info *vi = netdev_priv(dev);
3897 
3898 	if (vi->rx_mode_work_enabled)
3899 		schedule_work(&vi->rx_mode_work);
3900 }
3901 
3902 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
3903 				   __be16 proto, u16 vid)
3904 {
3905 	struct virtnet_info *vi = netdev_priv(dev);
3906 	__virtio16 *_vid __free(kfree) = NULL;
3907 	struct scatterlist sg;
3908 
3909 	_vid = kzalloc_obj(*_vid);
3910 	if (!_vid)
3911 		return -ENOMEM;
3912 
3913 	*_vid = cpu_to_virtio16(vi->vdev, vid);
3914 	sg_init_one(&sg, _vid, sizeof(*_vid));
3915 
3916 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3917 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3918 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
3919 	return 0;
3920 }
3921 
3922 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3923 				    __be16 proto, u16 vid)
3924 {
3925 	struct virtnet_info *vi = netdev_priv(dev);
3926 	__virtio16 *_vid __free(kfree) = NULL;
3927 	struct scatterlist sg;
3928 
3929 	_vid = kzalloc_obj(*_vid);
3930 	if (!_vid)
3931 		return -ENOMEM;
3932 
3933 	*_vid = cpu_to_virtio16(vi->vdev, vid);
3934 	sg_init_one(&sg, _vid, sizeof(*_vid));
3935 
3936 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3937 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3938 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
3939 	return 0;
3940 }
3941 
3942 static void virtnet_clean_affinity(struct virtnet_info *vi)
3943 {
3944 	int i;
3945 
3946 	if (vi->affinity_hint_set) {
3947 		for (i = 0; i < vi->max_queue_pairs; i++) {
3948 			virtqueue_set_affinity(vi->rq[i].vq, NULL);
3949 			virtqueue_set_affinity(vi->sq[i].vq, NULL);
3950 		}
3951 
3952 		vi->affinity_hint_set = false;
3953 	}
3954 }
3955 
3956 static void virtnet_set_affinity(struct virtnet_info *vi)
3957 {
3958 	cpumask_var_t mask;
3959 	int stragglers;
3960 	int group_size;
3961 	int i, start = 0, cpu;
3962 	int num_cpu;
3963 	int stride;
3964 
3965 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3966 		virtnet_clean_affinity(vi);
3967 		return;
3968 	}
3969 
3970 	num_cpu = num_online_cpus();
3971 	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
3972 	stragglers = num_cpu >= vi->curr_queue_pairs ?
3973 			num_cpu % vi->curr_queue_pairs :
3974 			0;
3975 
3976 	for (i = 0; i < vi->curr_queue_pairs; i++) {
3977 		group_size = stride + (i < stragglers ? 1 : 0);
3978 
3979 		for_each_online_cpu_wrap(cpu, start) {
3980 			if (!group_size--) {
3981 				start = cpu;
3982 				break;
3983 			}
3984 			cpumask_set_cpu(cpu, mask);
3985 		}
3986 
3987 		virtqueue_set_affinity(vi->rq[i].vq, mask);
3988 		virtqueue_set_affinity(vi->sq[i].vq, mask);
3989 		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
3990 		cpumask_clear(mask);
3991 	}
3992 
3993 	vi->affinity_hint_set = true;
3994 	free_cpumask_var(mask);
3995 }
3996 
3997 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
3998 {
3999 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
4000 						   node);
4001 	virtnet_set_affinity(vi);
4002 	return 0;
4003 }
4004 
4005 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
4006 {
4007 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
4008 						   node_dead);
4009 	virtnet_set_affinity(vi);
4010 	return 0;
4011 }
4012 
4013 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
4014 {
4015 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
4016 						   node);
4017 
4018 	virtnet_clean_affinity(vi);
4019 	return 0;
4020 }
4021 
4022 static enum cpuhp_state virtionet_online;
4023 
4024 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
4025 {
4026 	int ret;
4027 
4028 	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
4029 	if (ret)
4030 		return ret;
4031 	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
4032 					       &vi->node_dead);
4033 	if (!ret)
4034 		return ret;
4035 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
4036 	return ret;
4037 }
4038 
4039 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
4040 {
4041 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
4042 	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
4043 					    &vi->node_dead);
4044 }
4045 
4046 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4047 					 u16 vqn, u32 max_usecs, u32 max_packets)
4048 {
4049 	struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
4050 	struct scatterlist sgs;
4051 
4052 	coal_vq = kzalloc_obj(*coal_vq);
4053 	if (!coal_vq)
4054 		return -ENOMEM;
4055 
4056 	coal_vq->vqn = cpu_to_le16(vqn);
4057 	coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
4058 	coal_vq->coal.max_packets = cpu_to_le32(max_packets);
4059 	sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
4060 
4061 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
4062 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
4063 				  &sgs))
4064 		return -EINVAL;
4065 
4066 	return 0;
4067 }
4068 
4069 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4070 					    u16 queue, u32 max_usecs,
4071 					    u32 max_packets)
4072 {
4073 	int err;
4074 
4075 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4076 		return -EOPNOTSUPP;
4077 
4078 	err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
4079 					    max_usecs, max_packets);
4080 	if (err)
4081 		return err;
4082 
4083 	vi->rq[queue].intr_coal.max_usecs = max_usecs;
4084 	vi->rq[queue].intr_coal.max_packets = max_packets;
4085 
4086 	return 0;
4087 }
4088 
4089 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
4090 					    u16 queue, u32 max_usecs,
4091 					    u32 max_packets)
4092 {
4093 	int err;
4094 
4095 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4096 		return -EOPNOTSUPP;
4097 
4098 	err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
4099 					    max_usecs, max_packets);
4100 	if (err)
4101 		return err;
4102 
4103 	vi->sq[queue].intr_coal.max_usecs = max_usecs;
4104 	vi->sq[queue].intr_coal.max_packets = max_packets;
4105 
4106 	return 0;
4107 }
4108 
4109 static void virtnet_get_ringparam(struct net_device *dev,
4110 				  struct ethtool_ringparam *ring,
4111 				  struct kernel_ethtool_ringparam *kernel_ring,
4112 				  struct netlink_ext_ack *extack)
4113 {
4114 	struct virtnet_info *vi = netdev_priv(dev);
4115 
4116 	ring->rx_max_pending = vi->rq[0].vq->num_max;
4117 	ring->tx_max_pending = vi->sq[0].vq->num_max;
4118 	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4119 	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4120 }
4121 
4122 static int virtnet_set_ringparam(struct net_device *dev,
4123 				 struct ethtool_ringparam *ring,
4124 				 struct kernel_ethtool_ringparam *kernel_ring,
4125 				 struct netlink_ext_ack *extack)
4126 {
4127 	struct virtnet_info *vi = netdev_priv(dev);
4128 	u32 rx_pending, tx_pending;
4129 	struct receive_queue *rq;
4130 	struct send_queue *sq;
4131 	int i, err;
4132 
4133 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
4134 		return -EINVAL;
4135 
4136 	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4137 	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4138 
4139 	if (ring->rx_pending == rx_pending &&
4140 	    ring->tx_pending == tx_pending)
4141 		return 0;
4142 
4143 	if (ring->rx_pending > vi->rq[0].vq->num_max)
4144 		return -EINVAL;
4145 
4146 	if (ring->tx_pending > vi->sq[0].vq->num_max)
4147 		return -EINVAL;
4148 
4149 	for (i = 0; i < vi->max_queue_pairs; i++) {
4150 		rq = vi->rq + i;
4151 		sq = vi->sq + i;
4152 
4153 		if (ring->tx_pending != tx_pending) {
4154 			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
4155 			if (err)
4156 				return err;
4157 
4158 			/* Upon disabling and re-enabling a transmit virtqueue, the device must
4159 			 * set the coalescing parameters of the virtqueue to those configured
4160 			 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
4161 			 * did not set any TX coalescing parameters, to 0.
4162 			 */
4163 			err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
4164 							       vi->intr_coal_tx.max_usecs,
4165 							       vi->intr_coal_tx.max_packets);
4166 
4167 			/* Don't break the tx resize action if the vq coalescing is not
4168 			 * supported. The same is true for rx resize below.
4169 			 */
4170 			if (err && err != -EOPNOTSUPP)
4171 				return err;
4172 		}
4173 
4174 		if (ring->rx_pending != rx_pending) {
4175 			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
4176 			if (err)
4177 				return err;
4178 
4179 			/* The reason is same as the transmit virtqueue reset */
4180 			mutex_lock(&vi->rq[i].dim_lock);
4181 			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
4182 							       vi->intr_coal_rx.max_usecs,
4183 							       vi->intr_coal_rx.max_packets);
4184 			mutex_unlock(&vi->rq[i].dim_lock);
4185 			if (err && err != -EOPNOTSUPP)
4186 				return err;
4187 		}
4188 	}
4189 
4190 	return 0;
4191 }
4192 
4193 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
4194 {
4195 	struct net_device *dev = vi->dev;
4196 	struct scatterlist sgs[2];
4197 
4198 	/* prepare sgs */
4199 	sg_init_table(sgs, 2);
4200 	sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
4201 	sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
4202 
4203 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
4204 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
4205 				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
4206 		goto err;
4207 
4208 	return true;
4209 
4210 err:
4211 	dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
4212 	return false;
4213 
4214 }
4215 
4216 static void virtnet_init_default_rss(struct virtnet_info *vi)
4217 {
4218 	vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
4219 	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
4220 	vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
4221 						? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
4222 	vi->rss_hdr->unclassified_queue = 0;
4223 
4224 	virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
4225 
4226 	vi->rss_trailer.hash_key_length = vi->rss_key_size;
4227 
4228 	netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
4229 }
4230 
4231 static int virtnet_get_hashflow(struct net_device *dev,
4232 				struct ethtool_rxfh_fields *info)
4233 {
4234 	struct virtnet_info *vi = netdev_priv(dev);
4235 
4236 	info->data = 0;
4237 	switch (info->flow_type) {
4238 	case TCP_V4_FLOW:
4239 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
4240 			info->data = RXH_IP_SRC | RXH_IP_DST |
4241 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4242 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4243 			info->data = RXH_IP_SRC | RXH_IP_DST;
4244 		}
4245 		break;
4246 	case TCP_V6_FLOW:
4247 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
4248 			info->data = RXH_IP_SRC | RXH_IP_DST |
4249 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4250 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4251 			info->data = RXH_IP_SRC | RXH_IP_DST;
4252 		}
4253 		break;
4254 	case UDP_V4_FLOW:
4255 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
4256 			info->data = RXH_IP_SRC | RXH_IP_DST |
4257 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4258 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4259 			info->data = RXH_IP_SRC | RXH_IP_DST;
4260 		}
4261 		break;
4262 	case UDP_V6_FLOW:
4263 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
4264 			info->data = RXH_IP_SRC | RXH_IP_DST |
4265 						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4266 		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4267 			info->data = RXH_IP_SRC | RXH_IP_DST;
4268 		}
4269 		break;
4270 	case IPV4_FLOW:
4271 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
4272 			info->data = RXH_IP_SRC | RXH_IP_DST;
4273 
4274 		break;
4275 	case IPV6_FLOW:
4276 		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
4277 			info->data = RXH_IP_SRC | RXH_IP_DST;
4278 
4279 		break;
4280 	default:
4281 		info->data = 0;
4282 		break;
4283 	}
4284 
4285 	return 0;
4286 }
4287 
4288 static int virtnet_set_hashflow(struct net_device *dev,
4289 				const struct ethtool_rxfh_fields *info,
4290 				struct netlink_ext_ack *extack)
4291 {
4292 	struct virtnet_info *vi = netdev_priv(dev);
4293 	u32 new_hashtypes = vi->rss_hash_types_saved;
4294 	bool is_disable = info->data & RXH_DISCARD;
4295 	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
4296 
4297 	/* supports only 'sd', 'sdfn' and 'r' */
4298 	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
4299 		return -EINVAL;
4300 
4301 	switch (info->flow_type) {
4302 	case TCP_V4_FLOW:
4303 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
4304 		if (!is_disable)
4305 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4306 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
4307 		break;
4308 	case UDP_V4_FLOW:
4309 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
4310 		if (!is_disable)
4311 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4312 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
4313 		break;
4314 	case IPV4_FLOW:
4315 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4316 		if (!is_disable)
4317 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4318 		break;
4319 	case TCP_V6_FLOW:
4320 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
4321 		if (!is_disable)
4322 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4323 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
4324 		break;
4325 	case UDP_V6_FLOW:
4326 		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
4327 		if (!is_disable)
4328 			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4329 				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
4330 		break;
4331 	case IPV6_FLOW:
4332 		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4333 		if (!is_disable)
4334 			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4335 		break;
4336 	default:
4337 		/* unsupported flow */
4338 		return -EINVAL;
4339 	}
4340 
4341 	/* if unsupported hashtype was set */
4342 	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
4343 		return -EINVAL;
4344 
4345 	if (new_hashtypes != vi->rss_hash_types_saved) {
4346 		vi->rss_hash_types_saved = new_hashtypes;
4347 		vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
4348 		if (vi->dev->features & NETIF_F_RXHASH)
4349 			if (!virtnet_commit_rss_command(vi))
4350 				return -EINVAL;
4351 	}
4352 
4353 	return 0;
4354 }
4355 
4356 static void virtnet_get_drvinfo(struct net_device *dev,
4357 				struct ethtool_drvinfo *info)
4358 {
4359 	struct virtnet_info *vi = netdev_priv(dev);
4360 	struct virtio_device *vdev = vi->vdev;
4361 
4362 	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
4363 	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
4364 	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
4365 
4366 }
4367 
4368 /* TODO: Eliminate OOO packets during switching */
4369 static int virtnet_set_channels(struct net_device *dev,
4370 				struct ethtool_channels *channels)
4371 {
4372 	struct virtnet_info *vi = netdev_priv(dev);
4373 	u16 queue_pairs = channels->combined_count;
4374 	int err;
4375 
4376 	/* We don't support separate rx/tx channels.
4377 	 * We don't allow setting 'other' channels.
4378 	 */
4379 	if (channels->rx_count || channels->tx_count || channels->other_count)
4380 		return -EINVAL;
4381 
4382 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
4383 		return -EINVAL;
4384 
4385 	/* For now we don't support modifying channels while XDP is loaded
4386 	 * also when XDP is loaded all RX queues have XDP programs so we only
4387 	 * need to check a single RX queue.
4388 	 */
4389 	if (vi->rq[0].xdp_prog)
4390 		return -EINVAL;
4391 
4392 	cpus_read_lock();
4393 	err = virtnet_set_queues(vi, queue_pairs);
4394 	if (err) {
4395 		cpus_read_unlock();
4396 		goto err;
4397 	}
4398 	virtnet_set_affinity(vi);
4399 	cpus_read_unlock();
4400 
4401 	netif_set_real_num_tx_queues(dev, queue_pairs);
4402 	netif_set_real_num_rx_queues(dev, queue_pairs);
4403  err:
4404 	return err;
4405 }
4406 
4407 static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
4408 				  int num, int qid, const struct virtnet_stat_desc *desc)
4409 {
4410 	int i;
4411 
4412 	if (qid < 0) {
4413 		for (i = 0; i < num; ++i)
4414 			ethtool_sprintf(p, noq_fmt, desc[i].desc);
4415 	} else {
4416 		for (i = 0; i < num; ++i)
4417 			ethtool_sprintf(p, fmt, qid, desc[i].desc);
4418 	}
4419 }
4420 
4421 /* qid == -1: for rx/tx queue total field */
4422 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
4423 {
4424 	const struct virtnet_stat_desc *desc;
4425 	const char *fmt, *noq_fmt;
4426 	u8 *p = *data;
4427 	u32 num;
4428 
4429 	if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
4430 		noq_fmt = "cq_hw_%s";
4431 
4432 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4433 			desc = &virtnet_stats_cvq_desc[0];
4434 			num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4435 
4436 			virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
4437 		}
4438 	}
4439 
4440 	if (type == VIRTNET_Q_TYPE_RX) {
4441 		fmt = "rx%u_%s";
4442 		noq_fmt = "rx_%s";
4443 
4444 		desc = &virtnet_rq_stats_desc[0];
4445 		num = ARRAY_SIZE(virtnet_rq_stats_desc);
4446 
4447 		virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4448 
4449 		fmt = "rx%u_hw_%s";
4450 		noq_fmt = "rx_hw_%s";
4451 
4452 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4453 			desc = &virtnet_stats_rx_basic_desc[0];
4454 			num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4455 
4456 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4457 		}
4458 
4459 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4460 			desc = &virtnet_stats_rx_csum_desc[0];
4461 			num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4462 
4463 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4464 		}
4465 
4466 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4467 			desc = &virtnet_stats_rx_speed_desc[0];
4468 			num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4469 
4470 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4471 		}
4472 	}
4473 
4474 	if (type == VIRTNET_Q_TYPE_TX) {
4475 		fmt = "tx%u_%s";
4476 		noq_fmt = "tx_%s";
4477 
4478 		desc = &virtnet_sq_stats_desc[0];
4479 		num = ARRAY_SIZE(virtnet_sq_stats_desc);
4480 
4481 		virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4482 
4483 		fmt = "tx%u_hw_%s";
4484 		noq_fmt = "tx_hw_%s";
4485 
4486 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4487 			desc = &virtnet_stats_tx_basic_desc[0];
4488 			num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4489 
4490 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4491 		}
4492 
4493 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4494 			desc = &virtnet_stats_tx_gso_desc[0];
4495 			num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4496 
4497 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4498 		}
4499 
4500 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4501 			desc = &virtnet_stats_tx_speed_desc[0];
4502 			num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4503 
4504 			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4505 		}
4506 	}
4507 
4508 	*data = p;
4509 }
4510 
4511 struct virtnet_stats_ctx {
4512 	/* The stats are write to qstats or ethtool -S */
4513 	bool to_qstat;
4514 
4515 	/* Used to calculate the offset inside the output buffer. */
4516 	u32 desc_num[3];
4517 
4518 	/* The actual supported stat types. */
4519 	u64 bitmap[3];
4520 
4521 	/* Used to calculate the reply buffer size. */
4522 	u32 size[3];
4523 
4524 	/* Record the output buffer. */
4525 	u64 *data;
4526 };
4527 
4528 static void virtnet_stats_ctx_init(struct virtnet_info *vi,
4529 				   struct virtnet_stats_ctx *ctx,
4530 				   u64 *data, bool to_qstat)
4531 {
4532 	u32 queue_type;
4533 
4534 	ctx->data = data;
4535 	ctx->to_qstat = to_qstat;
4536 
4537 	if (to_qstat) {
4538 		ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4539 		ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4540 
4541 		queue_type = VIRTNET_Q_TYPE_RX;
4542 
4543 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4544 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4545 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4546 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
4547 		}
4548 
4549 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4550 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4551 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4552 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
4553 		}
4554 
4555 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4556 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_GSO;
4557 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4558 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_gso);
4559 		}
4560 
4561 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4562 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4563 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4564 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
4565 		}
4566 
4567 		queue_type = VIRTNET_Q_TYPE_TX;
4568 
4569 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4570 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4571 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4572 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
4573 		}
4574 
4575 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4576 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
4577 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4578 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_csum);
4579 		}
4580 
4581 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4582 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4583 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4584 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
4585 		}
4586 
4587 		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4588 			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4589 			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4590 			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
4591 		}
4592 
4593 		return;
4594 	}
4595 
4596 	ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
4597 	ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
4598 
4599 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4600 		queue_type = VIRTNET_Q_TYPE_CQ;
4601 
4602 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_CVQ;
4603 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
4604 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_cvq);
4605 	}
4606 
4607 	queue_type = VIRTNET_Q_TYPE_RX;
4608 
4609 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4610 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4611 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4612 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
4613 	}
4614 
4615 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4616 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4617 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4618 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
4619 	}
4620 
4621 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4622 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4623 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4624 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
4625 	}
4626 
4627 	queue_type = VIRTNET_Q_TYPE_TX;
4628 
4629 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4630 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4631 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4632 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
4633 	}
4634 
4635 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4636 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4637 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4638 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
4639 	}
4640 
4641 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4642 		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4643 		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4644 		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
4645 	}
4646 }
4647 
4648 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4649  * @sum: the position to store the sum values
4650  * @num: field num
4651  * @q_value: the first queue fields
4652  * @q_num: number of the queues
4653  */
4654 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
4655 {
4656 	u32 step = num;
4657 	int i, j;
4658 	u64 *p;
4659 
4660 	for (i = 0; i < num; ++i) {
4661 		p = sum + i;
4662 		*p = 0;
4663 
4664 		for (j = 0; j < q_num; ++j)
4665 			*p += *(q_value + i + j * step);
4666 	}
4667 }
4668 
4669 static void virtnet_fill_total_fields(struct virtnet_info *vi,
4670 				      struct virtnet_stats_ctx *ctx)
4671 {
4672 	u64 *data, *first_rx_q, *first_tx_q;
4673 	u32 num_cq, num_rx, num_tx;
4674 
4675 	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4676 	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4677 	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4678 
4679 	first_rx_q = ctx->data + num_rx + num_tx + num_cq;
4680 	first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
4681 
4682 	data = ctx->data;
4683 
4684 	stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
4685 
4686 	data = ctx->data + num_rx;
4687 
4688 	stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
4689 }
4690 
4691 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
4692 				     struct virtnet_stats_ctx *ctx,
4693 				     const u8 *base, bool drv_stats, u8 reply_type)
4694 {
4695 	const struct virtnet_stat_desc *desc;
4696 	const u64_stats_t *v_stat;
4697 	u64 offset, bitmap;
4698 	const __le64 *v;
4699 	u32 queue_type;
4700 	int i, num;
4701 
4702 	queue_type = vq_type(vi, qid);
4703 	bitmap = ctx->bitmap[queue_type];
4704 
4705 	if (drv_stats) {
4706 		if (queue_type == VIRTNET_Q_TYPE_RX) {
4707 			desc = &virtnet_rq_stats_desc_qstat[0];
4708 			num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4709 		} else {
4710 			desc = &virtnet_sq_stats_desc_qstat[0];
4711 			num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4712 		}
4713 
4714 		for (i = 0; i < num; ++i) {
4715 			offset = desc[i].qstat_offset / sizeof(*ctx->data);
4716 			v_stat = (const u64_stats_t *)(base + desc[i].offset);
4717 			ctx->data[offset] = u64_stats_read(v_stat);
4718 		}
4719 		return;
4720 	}
4721 
4722 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4723 		desc = &virtnet_stats_rx_basic_desc_qstat[0];
4724 		num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4725 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4726 			goto found;
4727 	}
4728 
4729 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4730 		desc = &virtnet_stats_rx_csum_desc_qstat[0];
4731 		num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4732 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4733 			goto found;
4734 	}
4735 
4736 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4737 		desc = &virtnet_stats_rx_gso_desc_qstat[0];
4738 		num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4739 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
4740 			goto found;
4741 	}
4742 
4743 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4744 		desc = &virtnet_stats_rx_speed_desc_qstat[0];
4745 		num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4746 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4747 			goto found;
4748 	}
4749 
4750 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4751 		desc = &virtnet_stats_tx_basic_desc_qstat[0];
4752 		num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4753 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4754 			goto found;
4755 	}
4756 
4757 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4758 		desc = &virtnet_stats_tx_csum_desc_qstat[0];
4759 		num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4760 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
4761 			goto found;
4762 	}
4763 
4764 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4765 		desc = &virtnet_stats_tx_gso_desc_qstat[0];
4766 		num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4767 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4768 			goto found;
4769 	}
4770 
4771 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4772 		desc = &virtnet_stats_tx_speed_desc_qstat[0];
4773 		num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4774 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4775 			goto found;
4776 	}
4777 
4778 	return;
4779 
4780 found:
4781 	for (i = 0; i < num; ++i) {
4782 		offset = desc[i].qstat_offset / sizeof(*ctx->data);
4783 		v = (const __le64 *)(base + desc[i].offset);
4784 		ctx->data[offset] = le64_to_cpu(*v);
4785 	}
4786 }
4787 
4788 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4789  * The stats source is the device or the driver.
4790  *
4791  * @vi: virtio net info
4792  * @qid: the vq id
4793  * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
4794  * @base: pointer to the device reply or the driver stats structure.
4795  * @drv_stats: designate the base type (device reply, driver stats)
4796  * @type: the type of the device reply (if drv_stats is true, this must be zero)
4797  */
4798 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
4799 			       struct virtnet_stats_ctx *ctx,
4800 			       const u8 *base, bool drv_stats, u8 reply_type)
4801 {
4802 	u32 queue_type, num_rx, num_tx, num_cq;
4803 	const struct virtnet_stat_desc *desc;
4804 	const u64_stats_t *v_stat;
4805 	u64 offset, bitmap;
4806 	const __le64 *v;
4807 	int i, num;
4808 
4809 	if (ctx->to_qstat)
4810 		return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
4811 
4812 	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4813 	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4814 	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4815 
4816 	queue_type = vq_type(vi, qid);
4817 	bitmap = ctx->bitmap[queue_type];
4818 
4819 	/* skip the total fields of pairs */
4820 	offset = num_rx + num_tx;
4821 
4822 	if (queue_type == VIRTNET_Q_TYPE_TX) {
4823 		offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
4824 
4825 		num = ARRAY_SIZE(virtnet_sq_stats_desc);
4826 		if (drv_stats) {
4827 			desc = &virtnet_sq_stats_desc[0];
4828 			goto drv_stats;
4829 		}
4830 
4831 		offset += num;
4832 
4833 	} else if (queue_type == VIRTNET_Q_TYPE_RX) {
4834 		offset += num_cq + num_rx * (qid / 2);
4835 
4836 		num = ARRAY_SIZE(virtnet_rq_stats_desc);
4837 		if (drv_stats) {
4838 			desc = &virtnet_rq_stats_desc[0];
4839 			goto drv_stats;
4840 		}
4841 
4842 		offset += num;
4843 	}
4844 
4845 	if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
4846 		desc = &virtnet_stats_cvq_desc[0];
4847 		num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4848 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
4849 			goto found;
4850 
4851 		offset += num;
4852 	}
4853 
4854 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4855 		desc = &virtnet_stats_rx_basic_desc[0];
4856 		num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4857 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4858 			goto found;
4859 
4860 		offset += num;
4861 	}
4862 
4863 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4864 		desc = &virtnet_stats_rx_csum_desc[0];
4865 		num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4866 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4867 			goto found;
4868 
4869 		offset += num;
4870 	}
4871 
4872 	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4873 		desc = &virtnet_stats_rx_speed_desc[0];
4874 		num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4875 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4876 			goto found;
4877 
4878 		offset += num;
4879 	}
4880 
4881 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4882 		desc = &virtnet_stats_tx_basic_desc[0];
4883 		num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4884 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4885 			goto found;
4886 
4887 		offset += num;
4888 	}
4889 
4890 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4891 		desc = &virtnet_stats_tx_gso_desc[0];
4892 		num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4893 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4894 			goto found;
4895 
4896 		offset += num;
4897 	}
4898 
4899 	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4900 		desc = &virtnet_stats_tx_speed_desc[0];
4901 		num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4902 		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4903 			goto found;
4904 
4905 		offset += num;
4906 	}
4907 
4908 	return;
4909 
4910 found:
4911 	for (i = 0; i < num; ++i) {
4912 		v = (const __le64 *)(base + desc[i].offset);
4913 		ctx->data[offset + i] = le64_to_cpu(*v);
4914 	}
4915 
4916 	return;
4917 
4918 drv_stats:
4919 	for (i = 0; i < num; ++i) {
4920 		v_stat = (const u64_stats_t *)(base + desc[i].offset);
4921 		ctx->data[offset + i] = u64_stats_read(v_stat);
4922 	}
4923 }
4924 
4925 static int __virtnet_get_hw_stats(struct virtnet_info *vi,
4926 				  struct virtnet_stats_ctx *ctx,
4927 				  struct virtio_net_ctrl_queue_stats *req,
4928 				  int req_size, void *reply, int res_size)
4929 {
4930 	struct virtio_net_stats_reply_hdr *hdr;
4931 	struct scatterlist sgs_in, sgs_out;
4932 	void *p;
4933 	u32 qid;
4934 	int ok;
4935 
4936 	sg_init_one(&sgs_out, req, req_size);
4937 	sg_init_one(&sgs_in, reply, res_size);
4938 
4939 	ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
4940 					VIRTIO_NET_CTRL_STATS_GET,
4941 					&sgs_out, &sgs_in);
4942 
4943 	if (!ok)
4944 		return ok;
4945 
4946 	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
4947 		hdr = p;
4948 		qid = le16_to_cpu(hdr->vq_index);
4949 		virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
4950 	}
4951 
4952 	return 0;
4953 }
4954 
4955 static void virtnet_make_stat_req(struct virtnet_info *vi,
4956 				  struct virtnet_stats_ctx *ctx,
4957 				  struct virtio_net_ctrl_queue_stats *req,
4958 				  int qid, int *idx)
4959 {
4960 	int qtype = vq_type(vi, qid);
4961 	u64 bitmap = ctx->bitmap[qtype];
4962 
4963 	if (!bitmap)
4964 		return;
4965 
4966 	req->stats[*idx].vq_index = cpu_to_le16(qid);
4967 	req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
4968 	*idx += 1;
4969 }
4970 
4971 /* qid: -1: get stats of all vq.
4972  *     > 0: get the stats for the special vq. This must not be cvq.
4973  */
4974 static int virtnet_get_hw_stats(struct virtnet_info *vi,
4975 				struct virtnet_stats_ctx *ctx, int qid)
4976 {
4977 	int qnum, i, j, res_size, qtype, last_vq, first_vq;
4978 	struct virtio_net_ctrl_queue_stats *req;
4979 	bool enable_cvq;
4980 	void *reply;
4981 	int ok;
4982 
4983 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
4984 		return 0;
4985 
4986 	if (qid == -1) {
4987 		last_vq = vi->curr_queue_pairs * 2 - 1;
4988 		first_vq = 0;
4989 		enable_cvq = true;
4990 	} else {
4991 		last_vq = qid;
4992 		first_vq = qid;
4993 		enable_cvq = false;
4994 	}
4995 
4996 	qnum = 0;
4997 	res_size = 0;
4998 	for (i = first_vq; i <= last_vq ; ++i) {
4999 		qtype = vq_type(vi, i);
5000 		if (ctx->bitmap[qtype]) {
5001 			++qnum;
5002 			res_size += ctx->size[qtype];
5003 		}
5004 	}
5005 
5006 	if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
5007 		res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
5008 		qnum += 1;
5009 	}
5010 
5011 	req = kzalloc_objs(*req, qnum);
5012 	if (!req)
5013 		return -ENOMEM;
5014 
5015 	reply = kmalloc(res_size, GFP_KERNEL);
5016 	if (!reply) {
5017 		kfree(req);
5018 		return -ENOMEM;
5019 	}
5020 
5021 	j = 0;
5022 	for (i = first_vq; i <= last_vq ; ++i)
5023 		virtnet_make_stat_req(vi, ctx, req, i, &j);
5024 
5025 	if (enable_cvq)
5026 		virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
5027 
5028 	ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
5029 
5030 	kfree(req);
5031 	kfree(reply);
5032 
5033 	return ok;
5034 }
5035 
5036 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
5037 {
5038 	struct virtnet_info *vi = netdev_priv(dev);
5039 	unsigned int i;
5040 	u8 *p = data;
5041 
5042 	switch (stringset) {
5043 	case ETH_SS_STATS:
5044 		/* Generate the total field names. */
5045 		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
5046 		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
5047 
5048 		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
5049 
5050 		for (i = 0; i < vi->curr_queue_pairs; ++i)
5051 			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
5052 
5053 		for (i = 0; i < vi->curr_queue_pairs; ++i)
5054 			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
5055 		break;
5056 	}
5057 }
5058 
5059 static int virtnet_get_sset_count(struct net_device *dev, int sset)
5060 {
5061 	struct virtnet_info *vi = netdev_priv(dev);
5062 	struct virtnet_stats_ctx ctx = {0};
5063 	u32 pair_count;
5064 
5065 	switch (sset) {
5066 	case ETH_SS_STATS:
5067 		virtnet_stats_ctx_init(vi, &ctx, NULL, false);
5068 
5069 		pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
5070 
5071 		return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
5072 			vi->curr_queue_pairs * pair_count;
5073 	default:
5074 		return -EOPNOTSUPP;
5075 	}
5076 }
5077 
5078 static void virtnet_get_ethtool_stats(struct net_device *dev,
5079 				      struct ethtool_stats *stats, u64 *data)
5080 {
5081 	struct virtnet_info *vi = netdev_priv(dev);
5082 	struct virtnet_stats_ctx ctx = {0};
5083 	unsigned int start, i;
5084 	const u8 *stats_base;
5085 
5086 	virtnet_stats_ctx_init(vi, &ctx, data, false);
5087 	if (virtnet_get_hw_stats(vi, &ctx, -1))
5088 		dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
5089 
5090 	for (i = 0; i < vi->curr_queue_pairs; i++) {
5091 		struct receive_queue *rq = &vi->rq[i];
5092 		struct send_queue *sq = &vi->sq[i];
5093 
5094 		stats_base = (const u8 *)&rq->stats;
5095 		do {
5096 			start = u64_stats_fetch_begin(&rq->stats.syncp);
5097 			virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
5098 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
5099 
5100 		stats_base = (const u8 *)&sq->stats;
5101 		do {
5102 			start = u64_stats_fetch_begin(&sq->stats.syncp);
5103 			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
5104 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
5105 	}
5106 
5107 	virtnet_fill_total_fields(vi, &ctx);
5108 }
5109 
5110 static void virtnet_get_channels(struct net_device *dev,
5111 				 struct ethtool_channels *channels)
5112 {
5113 	struct virtnet_info *vi = netdev_priv(dev);
5114 
5115 	channels->combined_count = vi->curr_queue_pairs;
5116 	channels->max_combined = vi->max_queue_pairs;
5117 	channels->max_other = 0;
5118 	channels->rx_count = 0;
5119 	channels->tx_count = 0;
5120 	channels->other_count = 0;
5121 }
5122 
5123 static int virtnet_set_link_ksettings(struct net_device *dev,
5124 				      const struct ethtool_link_ksettings *cmd)
5125 {
5126 	struct virtnet_info *vi = netdev_priv(dev);
5127 
5128 	return ethtool_virtdev_set_link_ksettings(dev, cmd,
5129 						  &vi->speed, &vi->duplex);
5130 }
5131 
5132 static int virtnet_get_link_ksettings(struct net_device *dev,
5133 				      struct ethtool_link_ksettings *cmd)
5134 {
5135 	struct virtnet_info *vi = netdev_priv(dev);
5136 
5137 	cmd->base.speed = vi->speed;
5138 	cmd->base.duplex = vi->duplex;
5139 	cmd->base.port = PORT_OTHER;
5140 
5141 	return 0;
5142 }
5143 
5144 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
5145 					  struct ethtool_coalesce *ec)
5146 {
5147 	struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
5148 	struct scatterlist sgs_tx;
5149 	int i;
5150 
5151 	coal_tx = kzalloc_obj(*coal_tx);
5152 	if (!coal_tx)
5153 		return -ENOMEM;
5154 
5155 	coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
5156 	coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
5157 	sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
5158 
5159 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5160 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
5161 				  &sgs_tx))
5162 		return -EINVAL;
5163 
5164 	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
5165 	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
5166 	for (i = 0; i < vi->max_queue_pairs; i++) {
5167 		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
5168 		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
5169 	}
5170 
5171 	return 0;
5172 }
5173 
5174 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
5175 					  struct ethtool_coalesce *ec)
5176 {
5177 	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
5178 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5179 	struct scatterlist sgs_rx;
5180 	int i;
5181 
5182 	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5183 		return -EOPNOTSUPP;
5184 
5185 	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
5186 			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
5187 		return -EINVAL;
5188 
5189 	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
5190 		vi->rx_dim_enabled = true;
5191 		for (i = 0; i < vi->max_queue_pairs; i++) {
5192 			mutex_lock(&vi->rq[i].dim_lock);
5193 			vi->rq[i].dim_enabled = true;
5194 			mutex_unlock(&vi->rq[i].dim_lock);
5195 		}
5196 		return 0;
5197 	}
5198 
5199 	coal_rx = kzalloc_obj(*coal_rx);
5200 	if (!coal_rx)
5201 		return -ENOMEM;
5202 
5203 	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
5204 		vi->rx_dim_enabled = false;
5205 		for (i = 0; i < vi->max_queue_pairs; i++) {
5206 			mutex_lock(&vi->rq[i].dim_lock);
5207 			vi->rq[i].dim_enabled = false;
5208 			mutex_unlock(&vi->rq[i].dim_lock);
5209 		}
5210 	}
5211 
5212 	/* Since the per-queue coalescing params can be set,
5213 	 * we need apply the global new params even if they
5214 	 * are not updated.
5215 	 */
5216 	coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
5217 	coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
5218 	sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
5219 
5220 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5221 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
5222 				  &sgs_rx))
5223 		return -EINVAL;
5224 
5225 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
5226 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
5227 	for (i = 0; i < vi->max_queue_pairs; i++) {
5228 		mutex_lock(&vi->rq[i].dim_lock);
5229 		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
5230 		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
5231 		mutex_unlock(&vi->rq[i].dim_lock);
5232 	}
5233 
5234 	return 0;
5235 }
5236 
5237 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
5238 				       struct ethtool_coalesce *ec)
5239 {
5240 	int err;
5241 
5242 	err = virtnet_send_tx_notf_coal_cmds(vi, ec);
5243 	if (err)
5244 		return err;
5245 
5246 	err = virtnet_send_rx_notf_coal_cmds(vi, ec);
5247 	if (err)
5248 		return err;
5249 
5250 	return 0;
5251 }
5252 
5253 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
5254 					     struct ethtool_coalesce *ec,
5255 					     u16 queue)
5256 {
5257 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5258 	u32 max_usecs, max_packets;
5259 	bool cur_rx_dim;
5260 	int err;
5261 
5262 	mutex_lock(&vi->rq[queue].dim_lock);
5263 	cur_rx_dim = vi->rq[queue].dim_enabled;
5264 	max_usecs = vi->rq[queue].intr_coal.max_usecs;
5265 	max_packets = vi->rq[queue].intr_coal.max_packets;
5266 
5267 	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
5268 			       ec->rx_max_coalesced_frames != max_packets)) {
5269 		mutex_unlock(&vi->rq[queue].dim_lock);
5270 		return -EINVAL;
5271 	}
5272 
5273 	if (rx_ctrl_dim_on && !cur_rx_dim) {
5274 		vi->rq[queue].dim_enabled = true;
5275 		mutex_unlock(&vi->rq[queue].dim_lock);
5276 		return 0;
5277 	}
5278 
5279 	if (!rx_ctrl_dim_on && cur_rx_dim)
5280 		vi->rq[queue].dim_enabled = false;
5281 
5282 	/* If no params are updated, userspace ethtool will
5283 	 * reject the modification.
5284 	 */
5285 	err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
5286 					       ec->rx_coalesce_usecs,
5287 					       ec->rx_max_coalesced_frames);
5288 	mutex_unlock(&vi->rq[queue].dim_lock);
5289 	return err;
5290 }
5291 
5292 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
5293 					  struct ethtool_coalesce *ec,
5294 					  u16 queue)
5295 {
5296 	int err;
5297 
5298 	err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
5299 	if (err)
5300 		return err;
5301 
5302 	err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
5303 					       ec->tx_coalesce_usecs,
5304 					       ec->tx_max_coalesced_frames);
5305 	if (err)
5306 		return err;
5307 
5308 	return 0;
5309 }
5310 
5311 static void virtnet_rx_dim_work(struct work_struct *work)
5312 {
5313 	struct dim *dim = container_of(work, struct dim, work);
5314 	struct receive_queue *rq = container_of(dim,
5315 			struct receive_queue, dim);
5316 	struct virtnet_info *vi = rq->vq->vdev->priv;
5317 	struct net_device *dev = vi->dev;
5318 	struct dim_cq_moder update_moder;
5319 	int qnum, err;
5320 
5321 	qnum = rq - vi->rq;
5322 
5323 	mutex_lock(&rq->dim_lock);
5324 	if (!rq->dim_enabled)
5325 		goto out;
5326 
5327 	update_moder = net_dim_get_rx_irq_moder(dev, dim);
5328 	if (update_moder.usec != rq->intr_coal.max_usecs ||
5329 	    update_moder.pkts != rq->intr_coal.max_packets) {
5330 		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
5331 						       update_moder.usec,
5332 						       update_moder.pkts);
5333 		if (err)
5334 			pr_debug("%s: Failed to send dim parameters on rxq%d\n",
5335 				 dev->name, qnum);
5336 	}
5337 out:
5338 	dim->state = DIM_START_MEASURE;
5339 	mutex_unlock(&rq->dim_lock);
5340 }
5341 
5342 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
5343 {
5344 	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
5345 	 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
5346 	 */
5347 	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
5348 		return -EOPNOTSUPP;
5349 
5350 	if (ec->tx_max_coalesced_frames > 1 ||
5351 	    ec->rx_max_coalesced_frames != 1)
5352 		return -EINVAL;
5353 
5354 	return 0;
5355 }
5356 
5357 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
5358 					   int vq_weight, bool *should_update)
5359 {
5360 	if (weight ^ vq_weight) {
5361 		if (dev_flags & IFF_UP)
5362 			return -EBUSY;
5363 		*should_update = true;
5364 	}
5365 
5366 	return 0;
5367 }
5368 
5369 static int virtnet_set_coalesce(struct net_device *dev,
5370 				struct ethtool_coalesce *ec,
5371 				struct kernel_ethtool_coalesce *kernel_coal,
5372 				struct netlink_ext_ack *extack)
5373 {
5374 	struct virtnet_info *vi = netdev_priv(dev);
5375 	int ret, queue_number, napi_weight, i;
5376 	bool update_napi = false;
5377 
5378 	/* Can't change NAPI weight if the link is up */
5379 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5380 	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
5381 		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5382 						      vi->sq[queue_number].napi.weight,
5383 						      &update_napi);
5384 		if (ret)
5385 			return ret;
5386 
5387 		if (update_napi) {
5388 			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
5389 			 * updated for the sake of simplicity, which might not be necessary
5390 			 */
5391 			break;
5392 		}
5393 	}
5394 
5395 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
5396 		ret = virtnet_send_notf_coal_cmds(vi, ec);
5397 	else
5398 		ret = virtnet_coal_params_supported(ec);
5399 
5400 	if (ret)
5401 		return ret;
5402 
5403 	if (update_napi) {
5404 		/* xsk xmit depends on the tx napi. So if xsk is active,
5405 		 * prevent modifications to tx napi.
5406 		 */
5407 		for (i = queue_number; i < vi->max_queue_pairs; i++) {
5408 			if (vi->sq[i].xsk_pool)
5409 				return -EBUSY;
5410 		}
5411 
5412 		for (; queue_number < vi->max_queue_pairs; queue_number++)
5413 			vi->sq[queue_number].napi.weight = napi_weight;
5414 	}
5415 
5416 	return ret;
5417 }
5418 
5419 static int virtnet_get_coalesce(struct net_device *dev,
5420 				struct ethtool_coalesce *ec,
5421 				struct kernel_ethtool_coalesce *kernel_coal,
5422 				struct netlink_ext_ack *extack)
5423 {
5424 	struct virtnet_info *vi = netdev_priv(dev);
5425 
5426 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
5427 		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
5428 		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
5429 		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
5430 		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
5431 		ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
5432 	} else {
5433 		ec->rx_max_coalesced_frames = 1;
5434 
5435 		if (vi->sq[0].napi.weight)
5436 			ec->tx_max_coalesced_frames = 1;
5437 	}
5438 
5439 	return 0;
5440 }
5441 
5442 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
5443 					  u32 queue,
5444 					  struct ethtool_coalesce *ec)
5445 {
5446 	struct virtnet_info *vi = netdev_priv(dev);
5447 	int ret, napi_weight;
5448 	bool update_napi = false;
5449 
5450 	if (queue >= vi->max_queue_pairs)
5451 		return -EINVAL;
5452 
5453 	/* Can't change NAPI weight if the link is up */
5454 	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5455 	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5456 					      vi->sq[queue].napi.weight,
5457 					      &update_napi);
5458 	if (ret)
5459 		return ret;
5460 
5461 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5462 		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
5463 	else
5464 		ret = virtnet_coal_params_supported(ec);
5465 
5466 	if (ret)
5467 		return ret;
5468 
5469 	if (update_napi)
5470 		vi->sq[queue].napi.weight = napi_weight;
5471 
5472 	return 0;
5473 }
5474 
5475 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
5476 					  u32 queue,
5477 					  struct ethtool_coalesce *ec)
5478 {
5479 	struct virtnet_info *vi = netdev_priv(dev);
5480 
5481 	if (queue >= vi->max_queue_pairs)
5482 		return -EINVAL;
5483 
5484 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
5485 		mutex_lock(&vi->rq[queue].dim_lock);
5486 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
5487 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
5488 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
5489 		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
5490 		ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
5491 		mutex_unlock(&vi->rq[queue].dim_lock);
5492 	} else {
5493 		ec->rx_max_coalesced_frames = 1;
5494 
5495 		if (vi->sq[queue].napi.weight)
5496 			ec->tx_max_coalesced_frames = 1;
5497 	}
5498 
5499 	return 0;
5500 }
5501 
5502 static void virtnet_init_settings(struct net_device *dev)
5503 {
5504 	struct virtnet_info *vi = netdev_priv(dev);
5505 
5506 	vi->speed = SPEED_UNKNOWN;
5507 	vi->duplex = DUPLEX_UNKNOWN;
5508 }
5509 
5510 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
5511 {
5512 	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
5513 }
5514 
5515 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
5516 {
5517 	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
5518 }
5519 
5520 static int virtnet_get_rxfh(struct net_device *dev,
5521 			    struct ethtool_rxfh_param *rxfh)
5522 {
5523 	struct virtnet_info *vi = netdev_priv(dev);
5524 	int i;
5525 
5526 	if (rxfh->indir) {
5527 		for (i = 0; i < vi->rss_indir_table_size; ++i)
5528 			rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
5529 	}
5530 
5531 	if (rxfh->key)
5532 		memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
5533 
5534 	rxfh->hfunc = ETH_RSS_HASH_TOP;
5535 
5536 	return 0;
5537 }
5538 
5539 static int virtnet_set_rxfh(struct net_device *dev,
5540 			    struct ethtool_rxfh_param *rxfh,
5541 			    struct netlink_ext_ack *extack)
5542 {
5543 	struct virtnet_info *vi = netdev_priv(dev);
5544 	bool update = false;
5545 	int i;
5546 
5547 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5548 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
5549 		return -EOPNOTSUPP;
5550 
5551 	if (rxfh->indir) {
5552 		if (!vi->has_rss)
5553 			return -EOPNOTSUPP;
5554 
5555 		for (i = 0; i < vi->rss_indir_table_size; ++i)
5556 			vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
5557 		update = true;
5558 	}
5559 
5560 	if (rxfh->key) {
5561 		/* If either _F_HASH_REPORT or _F_RSS are negotiated, the
5562 		 * device provides hash calculation capabilities, that is,
5563 		 * hash_key is configured.
5564 		 */
5565 		if (!vi->has_rss && !vi->has_rss_hash_report)
5566 			return -EOPNOTSUPP;
5567 
5568 		memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
5569 		update = true;
5570 	}
5571 
5572 	if (update)
5573 		virtnet_commit_rss_command(vi);
5574 
5575 	return 0;
5576 }
5577 
5578 static u32 virtnet_get_rx_ring_count(struct net_device *dev)
5579 {
5580 	struct virtnet_info *vi = netdev_priv(dev);
5581 
5582 	return vi->curr_queue_pairs;
5583 }
5584 
5585 static const struct ethtool_ops virtnet_ethtool_ops = {
5586 	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
5587 		ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
5588 	.get_drvinfo = virtnet_get_drvinfo,
5589 	.get_link = ethtool_op_get_link,
5590 	.get_ringparam = virtnet_get_ringparam,
5591 	.set_ringparam = virtnet_set_ringparam,
5592 	.get_strings = virtnet_get_strings,
5593 	.get_sset_count = virtnet_get_sset_count,
5594 	.get_ethtool_stats = virtnet_get_ethtool_stats,
5595 	.set_channels = virtnet_set_channels,
5596 	.get_channels = virtnet_get_channels,
5597 	.get_ts_info = ethtool_op_get_ts_info,
5598 	.get_link_ksettings = virtnet_get_link_ksettings,
5599 	.set_link_ksettings = virtnet_set_link_ksettings,
5600 	.set_coalesce = virtnet_set_coalesce,
5601 	.get_coalesce = virtnet_get_coalesce,
5602 	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
5603 	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
5604 	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
5605 	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
5606 	.get_rxfh = virtnet_get_rxfh,
5607 	.set_rxfh = virtnet_set_rxfh,
5608 	.get_rxfh_fields = virtnet_get_hashflow,
5609 	.set_rxfh_fields = virtnet_set_hashflow,
5610 	.get_rx_ring_count = virtnet_get_rx_ring_count,
5611 };
5612 
5613 static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
5614 				       struct netdev_queue_stats_rx *stats)
5615 {
5616 	struct virtnet_info *vi = netdev_priv(dev);
5617 	struct receive_queue *rq = &vi->rq[i];
5618 	struct virtnet_stats_ctx ctx = {0};
5619 
5620 	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5621 
5622 	virtnet_get_hw_stats(vi, &ctx, i * 2);
5623 	virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
5624 }
5625 
5626 static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
5627 				       struct netdev_queue_stats_tx *stats)
5628 {
5629 	struct virtnet_info *vi = netdev_priv(dev);
5630 	struct send_queue *sq = &vi->sq[i];
5631 	struct virtnet_stats_ctx ctx = {0};
5632 
5633 	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5634 
5635 	virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
5636 	virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
5637 }
5638 
5639 static void virtnet_get_base_stats(struct net_device *dev,
5640 				   struct netdev_queue_stats_rx *rx,
5641 				   struct netdev_queue_stats_tx *tx)
5642 {
5643 	struct virtnet_info *vi = netdev_priv(dev);
5644 
5645 	/* The queue stats of the virtio-net will not be reset. So here we
5646 	 * return 0.
5647 	 */
5648 	rx->bytes = 0;
5649 	rx->packets = 0;
5650 
5651 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
5652 		rx->hw_drops = 0;
5653 		rx->hw_drop_overruns = 0;
5654 	}
5655 
5656 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
5657 		rx->csum_unnecessary = 0;
5658 		rx->csum_none = 0;
5659 		rx->csum_bad = 0;
5660 	}
5661 
5662 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
5663 		rx->hw_gro_packets = 0;
5664 		rx->hw_gro_bytes = 0;
5665 		rx->hw_gro_wire_packets = 0;
5666 		rx->hw_gro_wire_bytes = 0;
5667 	}
5668 
5669 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
5670 		rx->hw_drop_ratelimits = 0;
5671 
5672 	tx->bytes = 0;
5673 	tx->packets = 0;
5674 	tx->stop = 0;
5675 	tx->wake = 0;
5676 
5677 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
5678 		tx->hw_drops = 0;
5679 		tx->hw_drop_errors = 0;
5680 	}
5681 
5682 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
5683 		tx->csum_none = 0;
5684 		tx->needs_csum = 0;
5685 	}
5686 
5687 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
5688 		tx->hw_gso_packets = 0;
5689 		tx->hw_gso_bytes = 0;
5690 		tx->hw_gso_wire_packets = 0;
5691 		tx->hw_gso_wire_bytes = 0;
5692 	}
5693 
5694 	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
5695 		tx->hw_drop_ratelimits = 0;
5696 
5697 	netdev_stat_queue_sum(dev,
5698 			      dev->real_num_rx_queues, vi->max_queue_pairs, rx,
5699 			      dev->real_num_tx_queues, vi->max_queue_pairs, tx);
5700 }
5701 
5702 static const struct netdev_stat_ops virtnet_stat_ops = {
5703 	.get_queue_stats_rx	= virtnet_get_queue_stats_rx,
5704 	.get_queue_stats_tx	= virtnet_get_queue_stats_tx,
5705 	.get_base_stats		= virtnet_get_base_stats,
5706 };
5707 
5708 static void virtnet_freeze_down(struct virtio_device *vdev)
5709 {
5710 	struct virtnet_info *vi = vdev->priv;
5711 
5712 	/* Make sure no work handler is accessing the device */
5713 	flush_work(&vi->config_work);
5714 	disable_rx_mode_work(vi);
5715 	flush_work(&vi->rx_mode_work);
5716 
5717 	if (netif_running(vi->dev)) {
5718 		rtnl_lock();
5719 		virtnet_close(vi->dev);
5720 		rtnl_unlock();
5721 	}
5722 
5723 	netif_tx_lock_bh(vi->dev);
5724 	netif_device_detach(vi->dev);
5725 	netif_tx_unlock_bh(vi->dev);
5726 }
5727 
5728 static int init_vqs(struct virtnet_info *vi);
5729 
5730 static int virtnet_restore_up(struct virtio_device *vdev)
5731 {
5732 	struct virtnet_info *vi = vdev->priv;
5733 	int err;
5734 
5735 	err = init_vqs(vi);
5736 	if (err)
5737 		return err;
5738 
5739 	virtio_device_ready(vdev);
5740 
5741 	enable_rx_mode_work(vi);
5742 
5743 	if (netif_running(vi->dev)) {
5744 		rtnl_lock();
5745 		err = virtnet_open(vi->dev);
5746 		rtnl_unlock();
5747 		if (err)
5748 			return err;
5749 	}
5750 
5751 	netif_tx_lock_bh(vi->dev);
5752 	netif_device_attach(vi->dev);
5753 	netif_tx_unlock_bh(vi->dev);
5754 	return err;
5755 }
5756 
5757 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
5758 {
5759 	__virtio64 *_offloads __free(kfree) = NULL;
5760 	struct scatterlist sg;
5761 
5762 	_offloads = kzalloc_obj(*_offloads);
5763 	if (!_offloads)
5764 		return -ENOMEM;
5765 
5766 	*_offloads = cpu_to_virtio64(vi->vdev, offloads);
5767 
5768 	sg_init_one(&sg, _offloads, sizeof(*_offloads));
5769 
5770 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
5771 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
5772 		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
5773 		return -EINVAL;
5774 	}
5775 
5776 	return 0;
5777 }
5778 
5779 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
5780 {
5781 	u64 offloads = 0;
5782 
5783 	if (!vi->guest_offloads)
5784 		return 0;
5785 
5786 	return virtnet_set_guest_offloads(vi, offloads);
5787 }
5788 
5789 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
5790 {
5791 	u64 offloads = vi->guest_offloads;
5792 
5793 	if (!vi->guest_offloads)
5794 		return 0;
5795 
5796 	return virtnet_set_guest_offloads(vi, offloads);
5797 }
5798 
5799 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
5800 				    struct xsk_buff_pool *pool)
5801 {
5802 	int err, qindex;
5803 
5804 	qindex = rq - vi->rq;
5805 
5806 	if (pool) {
5807 		err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
5808 		if (err < 0)
5809 			return err;
5810 
5811 		err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
5812 						 MEM_TYPE_XSK_BUFF_POOL, NULL);
5813 		if (err < 0)
5814 			goto unreg;
5815 
5816 		xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
5817 	}
5818 
5819 	virtnet_rx_pause(vi, rq);
5820 
5821 	err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
5822 	if (err) {
5823 		netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
5824 
5825 		pool = NULL;
5826 	}
5827 
5828 	rq->xsk_pool = pool;
5829 
5830 	virtnet_rx_resume(vi, rq, true);
5831 
5832 	if (pool)
5833 		return 0;
5834 
5835 unreg:
5836 	xdp_rxq_info_unreg(&rq->xsk_rxq_info);
5837 	return err;
5838 }
5839 
5840 static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
5841 				    struct send_queue *sq,
5842 				    struct xsk_buff_pool *pool)
5843 {
5844 	int err, qindex;
5845 
5846 	qindex = sq - vi->sq;
5847 
5848 	virtnet_tx_pause(vi, sq);
5849 
5850 	err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
5851 			      virtnet_sq_free_unused_buf_done);
5852 	if (err) {
5853 		netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
5854 		pool = NULL;
5855 	}
5856 
5857 	sq->xsk_pool = pool;
5858 
5859 	virtnet_tx_resume(vi, sq);
5860 
5861 	return err;
5862 }
5863 
5864 static int virtnet_xsk_pool_enable(struct net_device *dev,
5865 				   struct xsk_buff_pool *pool,
5866 				   u16 qid)
5867 {
5868 	struct virtnet_info *vi = netdev_priv(dev);
5869 	struct receive_queue *rq;
5870 	struct device *dma_dev;
5871 	struct send_queue *sq;
5872 	dma_addr_t hdr_dma;
5873 	int err, size;
5874 
5875 	if (vi->hdr_len > xsk_pool_get_headroom(pool))
5876 		return -EINVAL;
5877 
5878 	/* In big_packets mode, xdp cannot work, so there is no need to
5879 	 * initialize xsk of rq.
5880 	 */
5881 	if (vi->big_packets && !vi->mergeable_rx_bufs)
5882 		return -ENOENT;
5883 
5884 	if (qid >= vi->curr_queue_pairs)
5885 		return -EINVAL;
5886 
5887 	sq = &vi->sq[qid];
5888 	rq = &vi->rq[qid];
5889 
5890 	/* xsk assumes that tx and rx must have the same dma device. The af-xdp
5891 	 * may use one buffer to receive from the rx and reuse this buffer to
5892 	 * send by the tx. So the dma dev of sq and rq must be the same one.
5893 	 *
5894 	 * But vq->dma_dev allows every vq has the respective dma dev. So I
5895 	 * check the dma dev of vq and sq is the same dev.
5896 	 */
5897 	if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
5898 		return -EINVAL;
5899 
5900 	dma_dev = virtqueue_dma_dev(rq->vq);
5901 	if (!dma_dev)
5902 		return -EINVAL;
5903 
5904 	size = virtqueue_get_vring_size(rq->vq);
5905 
5906 	rq->xsk_buffs = kvzalloc_objs(*rq->xsk_buffs, size);
5907 	if (!rq->xsk_buffs)
5908 		return -ENOMEM;
5909 
5910 	hdr_dma = virtqueue_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
5911 					     DMA_TO_DEVICE, 0);
5912 	if (virtqueue_map_mapping_error(sq->vq, hdr_dma)) {
5913 		err = -ENOMEM;
5914 		goto err_free_buffs;
5915 	}
5916 
5917 	err = xsk_pool_dma_map(pool, dma_dev, 0);
5918 	if (err)
5919 		goto err_xsk_map;
5920 
5921 	err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
5922 	if (err)
5923 		goto err_rq;
5924 
5925 	err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
5926 	if (err)
5927 		goto err_sq;
5928 
5929 	/* Now, we do not support tx offload(such as tx csum), so all the tx
5930 	 * virtnet hdr is zero. So all the tx packets can share a single hdr.
5931 	 */
5932 	sq->xsk_hdr_dma_addr = hdr_dma;
5933 
5934 	return 0;
5935 
5936 err_sq:
5937 	virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5938 err_rq:
5939 	xsk_pool_dma_unmap(pool, 0);
5940 err_xsk_map:
5941 	virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
5942 				     DMA_TO_DEVICE, 0);
5943 err_free_buffs:
5944 	kvfree(rq->xsk_buffs);
5945 	return err;
5946 }
5947 
5948 static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5949 {
5950 	struct virtnet_info *vi = netdev_priv(dev);
5951 	struct xsk_buff_pool *pool;
5952 	struct receive_queue *rq;
5953 	struct send_queue *sq;
5954 	int err;
5955 
5956 	if (qid >= vi->curr_queue_pairs)
5957 		return -EINVAL;
5958 
5959 	sq = &vi->sq[qid];
5960 	rq = &vi->rq[qid];
5961 
5962 	pool = rq->xsk_pool;
5963 
5964 	err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5965 	err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
5966 
5967 	xsk_pool_dma_unmap(pool, 0);
5968 
5969 	virtqueue_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
5970 				     vi->hdr_len, DMA_TO_DEVICE, 0);
5971 	kvfree(rq->xsk_buffs);
5972 
5973 	return err;
5974 }
5975 
5976 static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
5977 {
5978 	if (xdp->xsk.pool)
5979 		return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
5980 					       xdp->xsk.queue_id);
5981 	else
5982 		return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
5983 }
5984 
5985 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5986 			   struct netlink_ext_ack *extack)
5987 {
5988 	unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
5989 					   sizeof(struct skb_shared_info));
5990 	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
5991 	struct virtnet_info *vi = netdev_priv(dev);
5992 	struct bpf_prog *old_prog;
5993 	u16 xdp_qp = 0, curr_qp;
5994 	int i, err;
5995 
5996 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
5997 	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5998 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
5999 	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
6000 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
6001 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
6002 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
6003 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
6004 		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
6005 		return -EOPNOTSUPP;
6006 	}
6007 
6008 	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
6009 		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
6010 		return -EINVAL;
6011 	}
6012 
6013 	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
6014 		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
6015 		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
6016 		return -EINVAL;
6017 	}
6018 
6019 	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
6020 	if (prog)
6021 		xdp_qp = nr_cpu_ids;
6022 
6023 	/* XDP requires extra queues for XDP_TX */
6024 	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
6025 		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
6026 				 curr_qp + xdp_qp, vi->max_queue_pairs);
6027 		xdp_qp = 0;
6028 	}
6029 
6030 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
6031 	if (!prog && !old_prog)
6032 		return 0;
6033 
6034 	if (prog)
6035 		bpf_prog_add(prog, vi->max_queue_pairs - 1);
6036 
6037 	virtnet_rx_pause_all(vi);
6038 
6039 	/* Make sure NAPI is not using any XDP TX queues for RX. */
6040 	if (netif_running(dev)) {
6041 		for (i = 0; i < vi->max_queue_pairs; i++)
6042 			virtnet_napi_tx_disable(&vi->sq[i]);
6043 	}
6044 
6045 	if (!prog) {
6046 		for (i = 0; i < vi->max_queue_pairs; i++) {
6047 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
6048 			if (i == 0)
6049 				virtnet_restore_guest_offloads(vi);
6050 		}
6051 		synchronize_net();
6052 	}
6053 
6054 	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
6055 	if (err)
6056 		goto err;
6057 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
6058 	vi->xdp_queue_pairs = xdp_qp;
6059 
6060 	if (prog) {
6061 		vi->xdp_enabled = true;
6062 		for (i = 0; i < vi->max_queue_pairs; i++) {
6063 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
6064 			if (i == 0 && !old_prog)
6065 				virtnet_clear_guest_offloads(vi);
6066 		}
6067 		if (!old_prog)
6068 			xdp_features_set_redirect_target(dev, true);
6069 	} else {
6070 		xdp_features_clear_redirect_target(dev);
6071 		vi->xdp_enabled = false;
6072 	}
6073 
6074 	virtnet_rx_resume_all(vi);
6075 	for (i = 0; i < vi->max_queue_pairs; i++) {
6076 		if (old_prog)
6077 			bpf_prog_put(old_prog);
6078 		if (netif_running(dev))
6079 			virtnet_napi_tx_enable(&vi->sq[i]);
6080 	}
6081 
6082 	return 0;
6083 
6084 err:
6085 	if (!prog) {
6086 		virtnet_clear_guest_offloads(vi);
6087 		for (i = 0; i < vi->max_queue_pairs; i++)
6088 			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
6089 	}
6090 
6091 	virtnet_rx_resume_all(vi);
6092 	if (netif_running(dev)) {
6093 		for (i = 0; i < vi->max_queue_pairs; i++)
6094 			virtnet_napi_tx_enable(&vi->sq[i]);
6095 	}
6096 	if (prog)
6097 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
6098 	return err;
6099 }
6100 
6101 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
6102 {
6103 	switch (xdp->command) {
6104 	case XDP_SETUP_PROG:
6105 		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
6106 	case XDP_SETUP_XSK_POOL:
6107 		return virtnet_xsk_pool_setup(dev, xdp);
6108 	default:
6109 		return -EINVAL;
6110 	}
6111 }
6112 
6113 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
6114 				      size_t len)
6115 {
6116 	struct virtnet_info *vi = netdev_priv(dev);
6117 	int ret;
6118 
6119 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
6120 		return -EOPNOTSUPP;
6121 
6122 	ret = snprintf(buf, len, "sby");
6123 	if (ret >= len)
6124 		return -EOPNOTSUPP;
6125 
6126 	return 0;
6127 }
6128 
6129 static int virtnet_set_features(struct net_device *dev,
6130 				netdev_features_t features)
6131 {
6132 	struct virtnet_info *vi = netdev_priv(dev);
6133 	u64 offloads;
6134 	int err;
6135 
6136 	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
6137 		if (vi->xdp_enabled)
6138 			return -EBUSY;
6139 
6140 		if (features & NETIF_F_GRO_HW)
6141 			offloads = vi->guest_offloads_capable;
6142 		else
6143 			offloads = vi->guest_offloads_capable &
6144 				   ~GUEST_OFFLOAD_GRO_HW_MASK;
6145 
6146 		err = virtnet_set_guest_offloads(vi, offloads);
6147 		if (err)
6148 			return err;
6149 		vi->guest_offloads = offloads;
6150 	}
6151 
6152 	if ((dev->features ^ features) & NETIF_F_RXHASH) {
6153 		if (features & NETIF_F_RXHASH)
6154 			vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
6155 		else
6156 			vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
6157 
6158 		if (!virtnet_commit_rss_command(vi))
6159 			return -EINVAL;
6160 	}
6161 
6162 	return 0;
6163 }
6164 
6165 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
6166 {
6167 	struct virtnet_info *priv = netdev_priv(dev);
6168 	struct send_queue *sq = &priv->sq[txqueue];
6169 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
6170 
6171 	u64_stats_update_begin(&sq->stats.syncp);
6172 	u64_stats_inc(&sq->stats.tx_timeouts);
6173 	u64_stats_update_end(&sq->stats.syncp);
6174 
6175 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
6176 		   txqueue, sq->name, sq->vq->index, sq->vq->name,
6177 		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
6178 }
6179 
6180 static int virtnet_init_irq_moder(struct virtnet_info *vi)
6181 {
6182 	u8 profile_flags = 0, coal_flags = 0;
6183 	int ret, i;
6184 
6185 	profile_flags |= DIM_PROFILE_RX;
6186 	coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
6187 	ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
6188 				     DIM_CQ_PERIOD_MODE_START_FROM_EQE,
6189 				     0, virtnet_rx_dim_work, NULL);
6190 
6191 	if (ret)
6192 		return ret;
6193 
6194 	for (i = 0; i < vi->max_queue_pairs; i++)
6195 		net_dim_setting(vi->dev, &vi->rq[i].dim, false);
6196 
6197 	return 0;
6198 }
6199 
6200 static void virtnet_free_irq_moder(struct virtnet_info *vi)
6201 {
6202 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
6203 		return;
6204 
6205 	rtnl_lock();
6206 	net_dim_free_irq_moder(vi->dev);
6207 	rtnl_unlock();
6208 }
6209 
6210 static const struct net_device_ops virtnet_netdev = {
6211 	.ndo_open            = virtnet_open,
6212 	.ndo_stop   	     = virtnet_close,
6213 	.ndo_start_xmit      = start_xmit,
6214 	.ndo_validate_addr   = eth_validate_addr,
6215 	.ndo_set_mac_address = virtnet_set_mac_address,
6216 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
6217 	.ndo_get_stats64     = virtnet_stats,
6218 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
6219 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
6220 	.ndo_bpf		= virtnet_xdp,
6221 	.ndo_xdp_xmit		= virtnet_xdp_xmit,
6222 	.ndo_xsk_wakeup         = virtnet_xsk_wakeup,
6223 	.ndo_features_check	= passthru_features_check,
6224 	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
6225 	.ndo_set_features	= virtnet_set_features,
6226 	.ndo_tx_timeout		= virtnet_tx_timeout,
6227 };
6228 
6229 static void virtnet_config_changed_work(struct work_struct *work)
6230 {
6231 	struct virtnet_info *vi =
6232 		container_of(work, struct virtnet_info, config_work);
6233 	u16 v;
6234 
6235 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
6236 				 struct virtio_net_config, status, &v) < 0)
6237 		return;
6238 
6239 	if (v & VIRTIO_NET_S_ANNOUNCE) {
6240 		netdev_notify_peers(vi->dev);
6241 		virtnet_ack_link_announce(vi);
6242 	}
6243 
6244 	/* Ignore unknown (future) status bits */
6245 	v &= VIRTIO_NET_S_LINK_UP;
6246 
6247 	if (vi->status == v)
6248 		return;
6249 
6250 	vi->status = v;
6251 
6252 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
6253 		virtnet_update_settings(vi);
6254 		netif_carrier_on(vi->dev);
6255 		netif_tx_wake_all_queues(vi->dev);
6256 	} else {
6257 		netif_carrier_off(vi->dev);
6258 		netif_tx_stop_all_queues(vi->dev);
6259 	}
6260 }
6261 
6262 static void virtnet_config_changed(struct virtio_device *vdev)
6263 {
6264 	struct virtnet_info *vi = vdev->priv;
6265 
6266 	schedule_work(&vi->config_work);
6267 }
6268 
6269 static void virtnet_free_queues(struct virtnet_info *vi)
6270 {
6271 	int i;
6272 
6273 	for (i = 0; i < vi->max_queue_pairs; i++) {
6274 		__netif_napi_del(&vi->rq[i].napi);
6275 		__netif_napi_del(&vi->sq[i].napi);
6276 	}
6277 
6278 	/* We called __netif_napi_del(),
6279 	 * we need to respect an RCU grace period before freeing vi->rq
6280 	 */
6281 	synchronize_net();
6282 
6283 	kfree(vi->rq);
6284 	kfree(vi->sq);
6285 	kfree(vi->ctrl);
6286 }
6287 
6288 static void _free_receive_bufs(struct virtnet_info *vi)
6289 {
6290 	struct bpf_prog *old_prog;
6291 	int i;
6292 
6293 	for (i = 0; i < vi->max_queue_pairs; i++) {
6294 		while (vi->rq[i].pages)
6295 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
6296 
6297 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
6298 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
6299 		if (old_prog)
6300 			bpf_prog_put(old_prog);
6301 	}
6302 }
6303 
6304 static void free_receive_bufs(struct virtnet_info *vi)
6305 {
6306 	rtnl_lock();
6307 	_free_receive_bufs(vi);
6308 	rtnl_unlock();
6309 }
6310 
6311 static void free_receive_page_frags(struct virtnet_info *vi)
6312 {
6313 	int i;
6314 	for (i = 0; i < vi->max_queue_pairs; i++)
6315 		if (vi->rq[i].alloc_frag.page) {
6316 			if (vi->rq[i].last_dma)
6317 				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
6318 			put_page(vi->rq[i].alloc_frag.page);
6319 		}
6320 }
6321 
6322 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
6323 {
6324 	struct virtnet_info *vi = vq->vdev->priv;
6325 	struct send_queue *sq;
6326 	int i = vq2txq(vq);
6327 
6328 	sq = &vi->sq[i];
6329 
6330 	switch (virtnet_xmit_ptr_unpack(&buf)) {
6331 	case VIRTNET_XMIT_TYPE_SKB:
6332 	case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
6333 		dev_kfree_skb(buf);
6334 		break;
6335 
6336 	case VIRTNET_XMIT_TYPE_XDP:
6337 		xdp_return_frame(buf);
6338 		break;
6339 
6340 	case VIRTNET_XMIT_TYPE_XSK:
6341 		xsk_tx_completed(sq->xsk_pool, 1);
6342 		break;
6343 	}
6344 }
6345 
6346 static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
6347 {
6348 	struct virtnet_info *vi = vq->vdev->priv;
6349 	int i = vq2txq(vq);
6350 
6351 	netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
6352 }
6353 
6354 static void free_unused_bufs(struct virtnet_info *vi)
6355 {
6356 	void *buf;
6357 	int i;
6358 
6359 	for (i = 0; i < vi->max_queue_pairs; i++) {
6360 		struct virtqueue *vq = vi->sq[i].vq;
6361 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6362 			virtnet_sq_free_unused_buf(vq, buf);
6363 		cond_resched();
6364 	}
6365 
6366 	for (i = 0; i < vi->max_queue_pairs; i++) {
6367 		struct virtqueue *vq = vi->rq[i].vq;
6368 
6369 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6370 			virtnet_rq_unmap_free_buf(vq, buf);
6371 		cond_resched();
6372 	}
6373 }
6374 
6375 static void virtnet_del_vqs(struct virtnet_info *vi)
6376 {
6377 	struct virtio_device *vdev = vi->vdev;
6378 
6379 	virtnet_clean_affinity(vi);
6380 
6381 	vdev->config->del_vqs(vdev);
6382 
6383 	virtnet_free_queues(vi);
6384 }
6385 
6386 /* How large should a single buffer be so a queue full of these can fit at
6387  * least one full packet?
6388  * Logic below assumes the mergeable buffer header is used.
6389  */
6390 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
6391 {
6392 	const unsigned int hdr_len = vi->hdr_len;
6393 	unsigned int rq_size = virtqueue_get_vring_size(vq);
6394 	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
6395 	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
6396 	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
6397 
6398 	return max(max(min_buf_len, hdr_len) - hdr_len,
6399 		   (unsigned int)GOOD_PACKET_LEN);
6400 }
6401 
6402 static int virtnet_find_vqs(struct virtnet_info *vi)
6403 {
6404 	struct virtqueue_info *vqs_info;
6405 	struct virtqueue **vqs;
6406 	int ret = -ENOMEM;
6407 	int total_vqs;
6408 	bool *ctx;
6409 	u16 i;
6410 
6411 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
6412 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
6413 	 * possible control vq.
6414 	 */
6415 	total_vqs = vi->max_queue_pairs * 2 +
6416 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
6417 
6418 	/* Allocate space for find_vqs parameters */
6419 	vqs = kzalloc_objs(*vqs, total_vqs);
6420 	if (!vqs)
6421 		goto err_vq;
6422 	vqs_info = kzalloc_objs(*vqs_info, total_vqs);
6423 	if (!vqs_info)
6424 		goto err_vqs_info;
6425 	if (!vi->big_packets || vi->mergeable_rx_bufs) {
6426 		ctx = kzalloc_objs(*ctx, total_vqs);
6427 		if (!ctx)
6428 			goto err_ctx;
6429 	} else {
6430 		ctx = NULL;
6431 	}
6432 
6433 	/* Parameters for control virtqueue, if any */
6434 	if (vi->has_cvq) {
6435 		vqs_info[total_vqs - 1].name = "control";
6436 	}
6437 
6438 	/* Allocate/initialize parameters for send/receive virtqueues */
6439 	for (i = 0; i < vi->max_queue_pairs; i++) {
6440 		vqs_info[rxq2vq(i)].callback = skb_recv_done;
6441 		vqs_info[txq2vq(i)].callback = skb_xmit_done;
6442 		sprintf(vi->rq[i].name, "input.%u", i);
6443 		sprintf(vi->sq[i].name, "output.%u", i);
6444 		vqs_info[rxq2vq(i)].name = vi->rq[i].name;
6445 		vqs_info[txq2vq(i)].name = vi->sq[i].name;
6446 		if (ctx)
6447 			vqs_info[rxq2vq(i)].ctx = true;
6448 	}
6449 
6450 	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL);
6451 	if (ret)
6452 		goto err_find;
6453 
6454 	if (vi->has_cvq) {
6455 		vi->cvq = vqs[total_vqs - 1];
6456 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
6457 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6458 	}
6459 
6460 	for (i = 0; i < vi->max_queue_pairs; i++) {
6461 		vi->rq[i].vq = vqs[rxq2vq(i)];
6462 		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
6463 		vi->sq[i].vq = vqs[txq2vq(i)];
6464 	}
6465 
6466 	/* run here: ret == 0. */
6467 
6468 
6469 err_find:
6470 	kfree(ctx);
6471 err_ctx:
6472 	kfree(vqs_info);
6473 err_vqs_info:
6474 	kfree(vqs);
6475 err_vq:
6476 	return ret;
6477 }
6478 
6479 static int virtnet_alloc_queues(struct virtnet_info *vi)
6480 {
6481 	int i;
6482 
6483 	if (vi->has_cvq) {
6484 		vi->ctrl = kzalloc_obj(*vi->ctrl);
6485 		if (!vi->ctrl)
6486 			goto err_ctrl;
6487 	} else {
6488 		vi->ctrl = NULL;
6489 	}
6490 	vi->sq = kzalloc_objs(*vi->sq, vi->max_queue_pairs);
6491 	if (!vi->sq)
6492 		goto err_sq;
6493 	vi->rq = kzalloc_objs(*vi->rq, vi->max_queue_pairs);
6494 	if (!vi->rq)
6495 		goto err_rq;
6496 
6497 	for (i = 0; i < vi->max_queue_pairs; i++) {
6498 		vi->rq[i].pages = NULL;
6499 		netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
6500 				      i);
6501 		vi->rq[i].napi.weight = napi_weight;
6502 		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
6503 					 virtnet_poll_tx,
6504 					 napi_tx ? napi_weight : 0);
6505 
6506 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
6507 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
6508 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
6509 
6510 		u64_stats_init(&vi->rq[i].stats.syncp);
6511 		u64_stats_init(&vi->sq[i].stats.syncp);
6512 		mutex_init(&vi->rq[i].dim_lock);
6513 	}
6514 
6515 	return 0;
6516 
6517 err_rq:
6518 	kfree(vi->sq);
6519 err_sq:
6520 	kfree(vi->ctrl);
6521 err_ctrl:
6522 	return -ENOMEM;
6523 }
6524 
6525 static int init_vqs(struct virtnet_info *vi)
6526 {
6527 	int ret;
6528 
6529 	/* Allocate send & receive queues */
6530 	ret = virtnet_alloc_queues(vi);
6531 	if (ret)
6532 		goto err;
6533 
6534 	ret = virtnet_find_vqs(vi);
6535 	if (ret)
6536 		goto err_free;
6537 
6538 	cpus_read_lock();
6539 	virtnet_set_affinity(vi);
6540 	cpus_read_unlock();
6541 
6542 	return 0;
6543 
6544 err_free:
6545 	virtnet_free_queues(vi);
6546 err:
6547 	return ret;
6548 }
6549 
6550 #ifdef CONFIG_SYSFS
6551 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
6552 		char *buf)
6553 {
6554 	struct virtnet_info *vi = netdev_priv(queue->dev);
6555 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
6556 	unsigned int headroom = virtnet_get_headroom(vi);
6557 	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
6558 	struct ewma_pkt_len *avg;
6559 
6560 	BUG_ON(queue_index >= vi->max_queue_pairs);
6561 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
6562 	return sprintf(buf, "%u\n",
6563 		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
6564 				       SKB_DATA_ALIGN(headroom + tailroom)));
6565 }
6566 
6567 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
6568 	__ATTR_RO(mergeable_rx_buffer_size);
6569 
6570 static struct attribute *virtio_net_mrg_rx_attrs[] = {
6571 	&mergeable_rx_buffer_size_attribute.attr,
6572 	NULL
6573 };
6574 
6575 static const struct attribute_group virtio_net_mrg_rx_group = {
6576 	.name = "virtio_net",
6577 	.attrs = virtio_net_mrg_rx_attrs
6578 };
6579 #endif
6580 
6581 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
6582 				    unsigned int fbit,
6583 				    const char *fname, const char *dname)
6584 {
6585 	if (!virtio_has_feature(vdev, fbit))
6586 		return false;
6587 
6588 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
6589 		fname, dname);
6590 
6591 	return true;
6592 }
6593 
6594 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
6595 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
6596 
6597 static bool virtnet_validate_features(struct virtio_device *vdev)
6598 {
6599 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
6600 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
6601 			     "VIRTIO_NET_F_CTRL_VQ") ||
6602 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
6603 			     "VIRTIO_NET_F_CTRL_VQ") ||
6604 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
6605 			     "VIRTIO_NET_F_CTRL_VQ") ||
6606 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
6607 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
6608 			     "VIRTIO_NET_F_CTRL_VQ") ||
6609 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
6610 			     "VIRTIO_NET_F_CTRL_VQ") ||
6611 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
6612 			     "VIRTIO_NET_F_CTRL_VQ") ||
6613 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
6614 			     "VIRTIO_NET_F_CTRL_VQ") ||
6615 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
6616 			     "VIRTIO_NET_F_CTRL_VQ"))) {
6617 		return false;
6618 	}
6619 
6620 	return true;
6621 }
6622 
6623 #define MIN_MTU ETH_MIN_MTU
6624 #define MAX_MTU ETH_MAX_MTU
6625 
6626 static int virtnet_validate(struct virtio_device *vdev)
6627 {
6628 	if (!vdev->config->get) {
6629 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
6630 			__func__);
6631 		return -EINVAL;
6632 	}
6633 
6634 	if (!virtnet_validate_features(vdev))
6635 		return -EINVAL;
6636 
6637 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6638 		int mtu = virtio_cread16(vdev,
6639 					 offsetof(struct virtio_net_config,
6640 						  mtu));
6641 		if (mtu < MIN_MTU)
6642 			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
6643 	}
6644 
6645 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
6646 	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6647 		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
6648 		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
6649 	}
6650 
6651 	return 0;
6652 }
6653 
6654 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
6655 {
6656 	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6657 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
6658 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
6659 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
6660 		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
6661 		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
6662 }
6663 
6664 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
6665 {
6666 	bool guest_gso = virtnet_check_guest_gso(vi);
6667 
6668 	/* If device can receive ANY guest GSO packets, regardless of mtu,
6669 	 * allocate packets of maximum size, otherwise limit it to only
6670 	 * mtu size worth only.
6671 	 */
6672 	if (mtu > ETH_DATA_LEN || guest_gso) {
6673 		vi->big_packets = true;
6674 		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
6675 	}
6676 }
6677 
6678 #define VIRTIO_NET_HASH_REPORT_MAX_TABLE      10
6679 static enum xdp_rss_hash_type
6680 virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
6681 	[VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
6682 	[VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
6683 	[VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6684 	[VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6685 	[VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
6686 	[VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6687 	[VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6688 	[VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6689 	[VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6690 	[VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
6691 };
6692 
6693 static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6694 			       enum xdp_rss_hash_type *rss_type)
6695 {
6696 	const struct xdp_buff *xdp = (void *)_ctx;
6697 	struct virtio_net_hdr_v1_hash *hdr_hash;
6698 	struct virtnet_info *vi;
6699 	u16 hash_report;
6700 
6701 	if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
6702 		return -ENODATA;
6703 
6704 	vi = netdev_priv(xdp->rxq->dev);
6705 	hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
6706 	hash_report = __le16_to_cpu(hdr_hash->hash_report);
6707 
6708 	if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
6709 		hash_report = VIRTIO_NET_HASH_REPORT_NONE;
6710 
6711 	*rss_type = virtnet_xdp_rss_type[hash_report];
6712 	*hash = virtio_net_hash_value(hdr_hash);
6713 	return 0;
6714 }
6715 
6716 static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
6717 	.xmo_rx_hash			= virtnet_xdp_rx_hash,
6718 };
6719 
6720 static int virtnet_probe(struct virtio_device *vdev)
6721 {
6722 	int i, err = -ENOMEM;
6723 	struct net_device *dev;
6724 	struct virtnet_info *vi;
6725 	u16 max_queue_pairs;
6726 	int mtu = 0;
6727 
6728 	/* Find if host supports multiqueue/rss virtio_net device */
6729 	max_queue_pairs = 1;
6730 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
6731 		max_queue_pairs =
6732 		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
6733 
6734 	/* We need at least 2 queue's */
6735 	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
6736 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
6737 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6738 		max_queue_pairs = 1;
6739 
6740 	/* Allocate ourselves a network device with room for our info */
6741 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
6742 	if (!dev)
6743 		return -ENOMEM;
6744 
6745 	/* Set up network device as normal. */
6746 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
6747 			   IFF_TX_SKB_NO_LINEAR;
6748 	dev->netdev_ops = &virtnet_netdev;
6749 	dev->stat_ops = &virtnet_stat_ops;
6750 	dev->features = NETIF_F_HIGHDMA;
6751 
6752 	dev->ethtool_ops = &virtnet_ethtool_ops;
6753 	SET_NETDEV_DEV(dev, &vdev->dev);
6754 
6755 	/* Do we support "hardware" checksums? */
6756 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
6757 		/* This opens up the world of extra features. */
6758 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6759 		if (csum)
6760 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6761 
6762 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
6763 			dev->hw_features |= NETIF_F_TSO
6764 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
6765 		}
6766 		/* Individual feature bits: what can host handle? */
6767 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
6768 			dev->hw_features |= NETIF_F_TSO;
6769 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
6770 			dev->hw_features |= NETIF_F_TSO6;
6771 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
6772 			dev->hw_features |= NETIF_F_TSO_ECN;
6773 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
6774 			dev->hw_features |= NETIF_F_GSO_UDP_L4;
6775 
6776 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO)) {
6777 			dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6778 			dev->hw_enc_features = dev->hw_features;
6779 		}
6780 		if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL &&
6781 		    virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM)) {
6782 			dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
6783 			dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
6784 		}
6785 
6786 		dev->features |= NETIF_F_GSO_ROBUST;
6787 
6788 		if (gso)
6789 			dev->features |= dev->hw_features;
6790 		/* (!csum && gso) case will be fixed by register_netdev() */
6791 	}
6792 
6793 	/* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
6794 	 * need to calculate checksums for partially checksummed packets,
6795 	 * as they're considered valid by the upper layer.
6796 	 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
6797 	 * receives fully checksummed packets. The device may assist in
6798 	 * validating these packets' checksums, so the driver won't have to.
6799 	 */
6800 	dev->features |= NETIF_F_RXCSUM;
6801 
6802 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6803 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
6804 		dev->features |= NETIF_F_GRO_HW;
6805 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
6806 		dev->hw_features |= NETIF_F_GRO_HW;
6807 
6808 	dev->vlan_features = dev->features;
6809 	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6810 		NETDEV_XDP_ACT_XSK_ZEROCOPY;
6811 
6812 	/* MTU range: 68 - 65535 */
6813 	dev->min_mtu = MIN_MTU;
6814 	dev->max_mtu = MAX_MTU;
6815 
6816 	/* Configuration may specify what MAC to use.  Otherwise random. */
6817 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6818 		u8 addr[ETH_ALEN];
6819 
6820 		virtio_cread_bytes(vdev,
6821 				   offsetof(struct virtio_net_config, mac),
6822 				   addr, ETH_ALEN);
6823 		eth_hw_addr_set(dev, addr);
6824 	} else {
6825 		eth_hw_addr_random(dev);
6826 		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
6827 			 dev->dev_addr);
6828 	}
6829 
6830 	/* Set up our device-specific information */
6831 	vi = netdev_priv(dev);
6832 	vi->dev = dev;
6833 	vi->vdev = vdev;
6834 	vdev->priv = vi;
6835 
6836 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
6837 	INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
6838 
6839 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
6840 		vi->mergeable_rx_bufs = true;
6841 		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
6842 	}
6843 
6844 	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
6845 		vi->has_rss_hash_report = true;
6846 
6847 	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
6848 		vi->has_rss = true;
6849 
6850 		vi->rss_indir_table_size =
6851 			virtio_cread16(vdev, offsetof(struct virtio_net_config,
6852 				rss_max_indirection_table_length));
6853 	}
6854 	vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
6855 	if (!vi->rss_hdr) {
6856 		err = -ENOMEM;
6857 		goto free;
6858 	}
6859 
6860 	if (vi->has_rss || vi->has_rss_hash_report) {
6861 		vi->rss_key_size =
6862 			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
6863 		if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
6864 			dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
6865 				vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
6866 			err = -EINVAL;
6867 			goto free;
6868 		}
6869 
6870 		vi->rss_hash_types_supported =
6871 		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
6872 		vi->rss_hash_types_supported &=
6873 				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
6874 				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
6875 				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
6876 
6877 		dev->hw_features |= NETIF_F_RXHASH;
6878 		dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
6879 	}
6880 
6881 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) ||
6882 	    virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
6883 		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel);
6884 	else if (vi->has_rss_hash_report)
6885 		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
6886 	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
6887 		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6888 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
6889 	else
6890 		vi->hdr_len = sizeof(struct virtio_net_hdr);
6891 
6892 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM))
6893 		vi->rx_tnl_csum = true;
6894 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO))
6895 		vi->rx_tnl = true;
6896 	if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO))
6897 		vi->tx_tnl = true;
6898 
6899 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
6900 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6901 		vi->any_header_sg = true;
6902 
6903 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6904 		vi->has_cvq = true;
6905 
6906 	mutex_init(&vi->cvq_lock);
6907 
6908 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6909 		mtu = virtio_cread16(vdev,
6910 				     offsetof(struct virtio_net_config,
6911 					      mtu));
6912 		if (mtu < dev->min_mtu) {
6913 			/* Should never trigger: MTU was previously validated
6914 			 * in virtnet_validate.
6915 			 */
6916 			dev_err(&vdev->dev,
6917 				"device MTU appears to have changed it is now %d < %d",
6918 				mtu, dev->min_mtu);
6919 			err = -EINVAL;
6920 			goto free;
6921 		}
6922 
6923 		dev->mtu = mtu;
6924 		dev->max_mtu = mtu;
6925 	}
6926 
6927 	virtnet_set_big_packets(vi, mtu);
6928 
6929 	if (vi->any_header_sg)
6930 		dev->needed_headroom = vi->hdr_len;
6931 
6932 	/* Enable multiqueue by default */
6933 	if (num_online_cpus() >= max_queue_pairs)
6934 		vi->curr_queue_pairs = max_queue_pairs;
6935 	else
6936 		vi->curr_queue_pairs = num_online_cpus();
6937 	vi->max_queue_pairs = max_queue_pairs;
6938 
6939 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
6940 	err = init_vqs(vi);
6941 	if (err)
6942 		goto free;
6943 
6944 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
6945 		vi->intr_coal_rx.max_usecs = 0;
6946 		vi->intr_coal_tx.max_usecs = 0;
6947 		vi->intr_coal_rx.max_packets = 0;
6948 
6949 		/* Keep the default values of the coalescing parameters
6950 		 * aligned with the default napi_tx state.
6951 		 */
6952 		if (vi->sq[0].napi.weight)
6953 			vi->intr_coal_tx.max_packets = 1;
6954 		else
6955 			vi->intr_coal_tx.max_packets = 0;
6956 	}
6957 
6958 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
6959 		/* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
6960 		for (i = 0; i < vi->max_queue_pairs; i++)
6961 			if (vi->sq[i].napi.weight)
6962 				vi->sq[i].intr_coal.max_packets = 1;
6963 
6964 		err = virtnet_init_irq_moder(vi);
6965 		if (err)
6966 			goto free;
6967 	}
6968 
6969 #ifdef CONFIG_SYSFS
6970 	if (vi->mergeable_rx_bufs)
6971 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
6972 #endif
6973 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
6974 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
6975 
6976 	virtnet_init_settings(dev);
6977 
6978 	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
6979 		vi->failover = net_failover_create(vi->dev);
6980 		if (IS_ERR(vi->failover)) {
6981 			err = PTR_ERR(vi->failover);
6982 			goto free_vqs;
6983 		}
6984 	}
6985 
6986 	if (vi->has_rss || vi->has_rss_hash_report)
6987 		virtnet_init_default_rss(vi);
6988 
6989 	enable_rx_mode_work(vi);
6990 
6991 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
6992 	rtnl_lock();
6993 
6994 	err = register_netdevice(dev);
6995 	if (err) {
6996 		pr_debug("virtio_net: registering device failed\n");
6997 		rtnl_unlock();
6998 		goto free_failover;
6999 	}
7000 
7001 	/* Disable config change notification until ndo_open. */
7002 	virtio_config_driver_disable(vi->vdev);
7003 
7004 	virtio_device_ready(vdev);
7005 
7006 	if (vi->has_rss || vi->has_rss_hash_report) {
7007 		if (!virtnet_commit_rss_command(vi)) {
7008 			dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
7009 			dev->hw_features &= ~NETIF_F_RXHASH;
7010 			vi->has_rss_hash_report = false;
7011 			vi->has_rss = false;
7012 		}
7013 	}
7014 
7015 	virtnet_set_queues(vi, vi->curr_queue_pairs);
7016 
7017 	/* a random MAC address has been assigned, notify the device.
7018 	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
7019 	 * because many devices work fine without getting MAC explicitly
7020 	 */
7021 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
7022 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
7023 		struct scatterlist sg;
7024 
7025 		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
7026 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
7027 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
7028 			pr_debug("virtio_net: setting MAC address failed\n");
7029 			rtnl_unlock();
7030 			err = -EINVAL;
7031 			goto free_unregister_netdev;
7032 		}
7033 	}
7034 
7035 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
7036 		struct virtio_net_stats_capabilities *stats_cap  __free(kfree) = NULL;
7037 		struct scatterlist sg;
7038 		__le64 v;
7039 
7040 		stats_cap = kzalloc_obj(*stats_cap);
7041 		if (!stats_cap) {
7042 			rtnl_unlock();
7043 			err = -ENOMEM;
7044 			goto free_unregister_netdev;
7045 		}
7046 
7047 		sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
7048 
7049 		if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
7050 						VIRTIO_NET_CTRL_STATS_QUERY,
7051 						NULL, &sg)) {
7052 			pr_debug("virtio_net: fail to get stats capability\n");
7053 			rtnl_unlock();
7054 			err = -EINVAL;
7055 			goto free_unregister_netdev;
7056 		}
7057 
7058 		v = stats_cap->supported_stats_types[0];
7059 		vi->device_stats_cap = le64_to_cpu(v);
7060 	}
7061 
7062 	/* Assume link up if device can't report link status,
7063 	   otherwise get link status from config. */
7064 	netif_carrier_off(dev);
7065 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
7066 		virtio_config_changed(vi->vdev);
7067 	} else {
7068 		vi->status = VIRTIO_NET_S_LINK_UP;
7069 		virtnet_update_settings(vi);
7070 		netif_carrier_on(dev);
7071 	}
7072 
7073 	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) {
7074 		unsigned int fbit;
7075 
7076 		fbit = virtio_offload_to_feature(guest_offloads[i]);
7077 		if (virtio_has_feature(vi->vdev, fbit))
7078 			set_bit(guest_offloads[i], &vi->guest_offloads);
7079 	}
7080 	vi->guest_offloads_capable = vi->guest_offloads;
7081 
7082 	rtnl_unlock();
7083 
7084 	err = virtnet_cpu_notif_add(vi);
7085 	if (err) {
7086 		pr_debug("virtio_net: registering cpu notifier failed\n");
7087 		goto free_unregister_netdev;
7088 	}
7089 
7090 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
7091 		 dev->name, max_queue_pairs);
7092 
7093 	return 0;
7094 
7095 free_unregister_netdev:
7096 	unregister_netdev(dev);
7097 free_failover:
7098 	net_failover_destroy(vi->failover);
7099 free_vqs:
7100 	virtio_reset_device(vdev);
7101 	free_receive_page_frags(vi);
7102 	virtnet_del_vqs(vi);
7103 free:
7104 	free_netdev(dev);
7105 	return err;
7106 }
7107 
7108 static void remove_vq_common(struct virtnet_info *vi)
7109 {
7110 	int i;
7111 
7112 	virtio_reset_device(vi->vdev);
7113 
7114 	/* Free unused buffers in both send and recv, if any. */
7115 	free_unused_bufs(vi);
7116 
7117 	/*
7118 	 * Rule of thumb is netdev_tx_reset_queue() should follow any
7119 	 * skb freeing not followed by netdev_tx_completed_queue()
7120 	 */
7121 	for (i = 0; i < vi->max_queue_pairs; i++)
7122 		netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
7123 
7124 	free_receive_bufs(vi);
7125 
7126 	free_receive_page_frags(vi);
7127 
7128 	virtnet_del_vqs(vi);
7129 }
7130 
7131 static void virtnet_remove(struct virtio_device *vdev)
7132 {
7133 	struct virtnet_info *vi = vdev->priv;
7134 
7135 	virtnet_cpu_notif_remove(vi);
7136 
7137 	/* Make sure no work handler is accessing the device. */
7138 	flush_work(&vi->config_work);
7139 	disable_rx_mode_work(vi);
7140 	flush_work(&vi->rx_mode_work);
7141 
7142 	virtnet_free_irq_moder(vi);
7143 
7144 	unregister_netdev(vi->dev);
7145 
7146 	net_failover_destroy(vi->failover);
7147 
7148 	remove_vq_common(vi);
7149 
7150 	free_netdev(vi->dev);
7151 }
7152 
7153 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
7154 {
7155 	struct virtnet_info *vi = vdev->priv;
7156 
7157 	virtnet_cpu_notif_remove(vi);
7158 	virtnet_freeze_down(vdev);
7159 	remove_vq_common(vi);
7160 
7161 	return 0;
7162 }
7163 
7164 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
7165 {
7166 	struct virtnet_info *vi = vdev->priv;
7167 	int err;
7168 
7169 	err = virtnet_restore_up(vdev);
7170 	if (err)
7171 		return err;
7172 	virtnet_set_queues(vi, vi->curr_queue_pairs);
7173 
7174 	err = virtnet_cpu_notif_add(vi);
7175 	if (err) {
7176 		virtnet_freeze_down(vdev);
7177 		remove_vq_common(vi);
7178 		return err;
7179 	}
7180 
7181 	return 0;
7182 }
7183 
7184 static struct virtio_device_id id_table[] = {
7185 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
7186 	{ 0 },
7187 };
7188 
7189 #define VIRTNET_FEATURES \
7190 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
7191 	VIRTIO_NET_F_MAC, \
7192 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
7193 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
7194 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
7195 	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
7196 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
7197 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
7198 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
7199 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
7200 	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
7201 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
7202 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
7203 	VIRTIO_NET_F_VQ_NOTF_COAL, \
7204 	VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
7205 
7206 static unsigned int features[] = {
7207 	VIRTNET_FEATURES,
7208 	VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO,
7209 	VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM,
7210 	VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO,
7211 	VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM,
7212 };
7213 
7214 static unsigned int features_legacy[] = {
7215 	VIRTNET_FEATURES,
7216 	VIRTIO_NET_F_GSO,
7217 	VIRTIO_F_ANY_LAYOUT,
7218 };
7219 
7220 static struct virtio_driver virtio_net_driver = {
7221 	.feature_table = features,
7222 	.feature_table_size = ARRAY_SIZE(features),
7223 	.feature_table_legacy = features_legacy,
7224 	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
7225 	.driver.name =	KBUILD_MODNAME,
7226 	.id_table =	id_table,
7227 	.validate =	virtnet_validate,
7228 	.probe =	virtnet_probe,
7229 	.remove =	virtnet_remove,
7230 	.config_changed = virtnet_config_changed,
7231 #ifdef CONFIG_PM_SLEEP
7232 	.freeze =	virtnet_freeze,
7233 	.restore =	virtnet_restore,
7234 #endif
7235 };
7236 
7237 static __init int virtio_net_driver_init(void)
7238 {
7239 	int ret;
7240 
7241 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
7242 				      virtnet_cpu_online,
7243 				      virtnet_cpu_down_prep);
7244 	if (ret < 0)
7245 		goto out;
7246 	virtionet_online = ret;
7247 	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
7248 				      NULL, virtnet_cpu_dead);
7249 	if (ret)
7250 		goto err_dead;
7251 	ret = register_virtio_driver(&virtio_net_driver);
7252 	if (ret)
7253 		goto err_virtio;
7254 	return 0;
7255 err_virtio:
7256 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7257 err_dead:
7258 	cpuhp_remove_multi_state(virtionet_online);
7259 out:
7260 	return ret;
7261 }
7262 module_init(virtio_net_driver_init);
7263 
7264 static __exit void virtio_net_driver_exit(void)
7265 {
7266 	unregister_virtio_driver(&virtio_net_driver);
7267 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7268 	cpuhp_remove_multi_state(virtionet_online);
7269 }
7270 module_exit(virtio_net_driver_exit);
7271 
7272 MODULE_DEVICE_TABLE(virtio, id_table);
7273 MODULE_DESCRIPTION("Virtio network driver");
7274 MODULE_LICENSE("GPL");
7275