xref: /linux/drivers/net/ethernet/google/gve/gve_tx_dqo.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include "gve_dqo.h"
11 #include <net/ip.h>
12 #include <linux/tcp.h>
13 #include <linux/slab.h>
14 #include <linux/skbuff.h>
15 
16 /* Returns true if tx_bufs are available. */
17 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
18 {
19 	int num_avail;
20 
21 	if (!tx->dqo.qpl)
22 		return true;
23 
24 	num_avail = tx->dqo.num_tx_qpl_bufs -
25 		(tx->dqo_tx.alloc_tx_qpl_buf_cnt -
26 		 tx->dqo_tx.free_tx_qpl_buf_cnt);
27 
28 	if (count <= num_avail)
29 		return true;
30 
31 	/* Update cached value from dqo_compl. */
32 	tx->dqo_tx.free_tx_qpl_buf_cnt =
33 		atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt);
34 
35 	num_avail = tx->dqo.num_tx_qpl_bufs -
36 		(tx->dqo_tx.alloc_tx_qpl_buf_cnt -
37 		 tx->dqo_tx.free_tx_qpl_buf_cnt);
38 
39 	return count <= num_avail;
40 }
41 
42 static s16
43 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx)
44 {
45 	s16 index;
46 
47 	index = tx->dqo_tx.free_tx_qpl_buf_head;
48 
49 	/* No TX buffers available, try to steal the list from the
50 	 * completion handler.
51 	 */
52 	if (unlikely(index == -1)) {
53 		tx->dqo_tx.free_tx_qpl_buf_head =
54 			atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
55 		index = tx->dqo_tx.free_tx_qpl_buf_head;
56 
57 		if (unlikely(index == -1))
58 			return index;
59 	}
60 
61 	/* Remove TX buf from free list */
62 	tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index];
63 
64 	return index;
65 }
66 
67 static void
68 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx,
69 		     struct gve_tx_pending_packet_dqo *pkt)
70 {
71 	s16 index;
72 	int i;
73 
74 	if (!pkt->num_bufs)
75 		return;
76 
77 	index = pkt->tx_qpl_buf_ids[0];
78 	/* Create a linked list of buffers to be added to the free list */
79 	for (i = 1; i < pkt->num_bufs; i++) {
80 		tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i];
81 		index = pkt->tx_qpl_buf_ids[i];
82 	}
83 
84 	while (true) {
85 		s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head);
86 
87 		tx->dqo.tx_qpl_buf_next[index] = old_head;
88 		if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head,
89 				   old_head,
90 				   pkt->tx_qpl_buf_ids[0]) == old_head) {
91 			break;
92 		}
93 	}
94 
95 	atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt);
96 	pkt->num_bufs = 0;
97 }
98 
99 /* Returns true if a gve_tx_pending_packet_dqo object is available. */
100 static bool gve_has_pending_packet(struct gve_tx_ring *tx)
101 {
102 	/* Check TX path's list. */
103 	if (tx->dqo_tx.free_pending_packets != -1)
104 		return true;
105 
106 	/* Check completion handler's list. */
107 	if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1)
108 		return true;
109 
110 	return false;
111 }
112 
113 static struct gve_tx_pending_packet_dqo *
114 gve_alloc_pending_packet(struct gve_tx_ring *tx)
115 {
116 	struct gve_tx_pending_packet_dqo *pending_packet;
117 	s16 index;
118 
119 	index = tx->dqo_tx.free_pending_packets;
120 
121 	/* No pending_packets available, try to steal the list from the
122 	 * completion handler.
123 	 */
124 	if (unlikely(index == -1)) {
125 		tx->dqo_tx.free_pending_packets =
126 			atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
127 		index = tx->dqo_tx.free_pending_packets;
128 
129 		if (unlikely(index == -1))
130 			return NULL;
131 	}
132 
133 	pending_packet = &tx->dqo.pending_packets[index];
134 
135 	/* Remove pending_packet from free list */
136 	tx->dqo_tx.free_pending_packets = pending_packet->next;
137 	pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
138 
139 	return pending_packet;
140 }
141 
142 static void
143 gve_free_pending_packet(struct gve_tx_ring *tx,
144 			struct gve_tx_pending_packet_dqo *pending_packet)
145 {
146 	s16 index = pending_packet - tx->dqo.pending_packets;
147 
148 	pending_packet->state = GVE_PACKET_STATE_UNALLOCATED;
149 	while (true) {
150 		s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets);
151 
152 		pending_packet->next = old_head;
153 		if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets,
154 				   old_head, index) == old_head) {
155 			break;
156 		}
157 	}
158 }
159 
160 /* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
161  */
162 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
163 {
164 	int i;
165 
166 	for (i = 0; i < tx->dqo.num_pending_packets; i++) {
167 		struct gve_tx_pending_packet_dqo *cur_state =
168 			&tx->dqo.pending_packets[i];
169 		int j;
170 
171 		for (j = 0; j < cur_state->num_bufs; j++) {
172 			if (j == 0) {
173 				dma_unmap_single(tx->dev,
174 					dma_unmap_addr(cur_state, dma[j]),
175 					dma_unmap_len(cur_state, len[j]),
176 					DMA_TO_DEVICE);
177 			} else {
178 				dma_unmap_page(tx->dev,
179 					dma_unmap_addr(cur_state, dma[j]),
180 					dma_unmap_len(cur_state, len[j]),
181 					DMA_TO_DEVICE);
182 			}
183 		}
184 		if (cur_state->skb) {
185 			dev_consume_skb_any(cur_state->skb);
186 			cur_state->skb = NULL;
187 		}
188 	}
189 }
190 
191 void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
192 {
193 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
194 	struct gve_tx_ring *tx = &priv->tx[idx];
195 
196 	if (!gve_tx_was_added_to_block(priv, idx))
197 		return;
198 
199 	gve_remove_napi(priv, ntfy_idx);
200 	gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
201 	netdev_tx_reset_queue(tx->netdev_txq);
202 	gve_tx_clean_pending_packets(tx);
203 	gve_tx_remove_from_block(priv, idx);
204 }
205 
206 static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
207 				 struct gve_tx_alloc_rings_cfg *cfg)
208 {
209 	struct device *hdev = &priv->pdev->dev;
210 	int idx = tx->q_num;
211 	size_t bytes;
212 	u32 qpl_id;
213 
214 	if (tx->q_resources) {
215 		dma_free_coherent(hdev, sizeof(*tx->q_resources),
216 				  tx->q_resources, tx->q_resources_bus);
217 		tx->q_resources = NULL;
218 	}
219 
220 	if (tx->dqo.compl_ring) {
221 		bytes = sizeof(tx->dqo.compl_ring[0]) *
222 			(tx->dqo.complq_mask + 1);
223 		dma_free_coherent(hdev, bytes, tx->dqo.compl_ring,
224 				  tx->complq_bus_dqo);
225 		tx->dqo.compl_ring = NULL;
226 	}
227 
228 	if (tx->dqo.tx_ring) {
229 		bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
230 		dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus);
231 		tx->dqo.tx_ring = NULL;
232 	}
233 
234 	kvfree(tx->dqo.pending_packets);
235 	tx->dqo.pending_packets = NULL;
236 
237 	kvfree(tx->dqo.tx_qpl_buf_next);
238 	tx->dqo.tx_qpl_buf_next = NULL;
239 
240 	if (tx->dqo.qpl) {
241 		qpl_id = gve_tx_qpl_id(priv, tx->q_num);
242 		gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
243 		tx->dqo.qpl = NULL;
244 	}
245 
246 	netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
247 }
248 
249 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
250 {
251 	int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO *
252 		tx->dqo.qpl->num_entries;
253 	int i;
254 
255 	tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs,
256 					   sizeof(tx->dqo.tx_qpl_buf_next[0]),
257 					   GFP_KERNEL);
258 	if (!tx->dqo.tx_qpl_buf_next)
259 		return -ENOMEM;
260 
261 	tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs;
262 
263 	/* Generate free TX buf list */
264 	for (i = 0; i < num_tx_qpl_bufs - 1; i++)
265 		tx->dqo.tx_qpl_buf_next[i] = i + 1;
266 	tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1;
267 
268 	atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
269 	return 0;
270 }
271 
272 void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
273 {
274 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
275 	struct gve_tx_ring *tx = &priv->tx[idx];
276 
277 	gve_tx_add_to_block(priv, idx);
278 
279 	tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
280 	gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
281 }
282 
283 static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
284 				 struct gve_tx_alloc_rings_cfg *cfg,
285 				 struct gve_tx_ring *tx,
286 				 int idx)
287 {
288 	struct device *hdev = &priv->pdev->dev;
289 	int num_pending_packets;
290 	int qpl_page_cnt;
291 	size_t bytes;
292 	u32 qpl_id;
293 	int i;
294 
295 	memset(tx, 0, sizeof(*tx));
296 	tx->q_num = idx;
297 	tx->dev = hdev;
298 	atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
299 
300 	/* Queue sizes must be a power of 2 */
301 	tx->mask = cfg->ring_size - 1;
302 	tx->dqo.complq_mask = tx->mask;
303 
304 	/* The max number of pending packets determines the maximum number of
305 	 * descriptors which maybe written to the completion queue.
306 	 *
307 	 * We must set the number small enough to make sure we never overrun the
308 	 * completion queue.
309 	 */
310 	num_pending_packets = tx->dqo.complq_mask + 1;
311 
312 	/* Reserve space for descriptor completions, which will be reported at
313 	 * most every GVE_TX_MIN_RE_INTERVAL packets.
314 	 */
315 	num_pending_packets -=
316 		(tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL;
317 
318 	/* Each packet may have at most 2 buffer completions if it receives both
319 	 * a miss and reinjection completion.
320 	 */
321 	num_pending_packets /= 2;
322 
323 	tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX);
324 	tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets,
325 					   sizeof(tx->dqo.pending_packets[0]),
326 					   GFP_KERNEL);
327 	if (!tx->dqo.pending_packets)
328 		goto err;
329 
330 	/* Set up linked list of pending packets */
331 	for (i = 0; i < tx->dqo.num_pending_packets - 1; i++)
332 		tx->dqo.pending_packets[i].next = i + 1;
333 
334 	tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
335 	atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
336 	tx->dqo_compl.miss_completions.head = -1;
337 	tx->dqo_compl.miss_completions.tail = -1;
338 	tx->dqo_compl.timed_out_completions.head = -1;
339 	tx->dqo_compl.timed_out_completions.tail = -1;
340 
341 	bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
342 	tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
343 	if (!tx->dqo.tx_ring)
344 		goto err;
345 
346 	bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1);
347 	tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes,
348 						&tx->complq_bus_dqo,
349 						GFP_KERNEL);
350 	if (!tx->dqo.compl_ring)
351 		goto err;
352 
353 	tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources),
354 					     &tx->q_resources_bus, GFP_KERNEL);
355 	if (!tx->q_resources)
356 		goto err;
357 
358 	if (!cfg->raw_addressing) {
359 		qpl_id = gve_tx_qpl_id(priv, tx->q_num);
360 		qpl_page_cnt = priv->tx_pages_per_qpl;
361 
362 		tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
363 							qpl_page_cnt);
364 		if (!tx->dqo.qpl)
365 			goto err;
366 
367 		if (gve_tx_qpl_buf_init(tx))
368 			goto err;
369 	}
370 
371 	return 0;
372 
373 err:
374 	gve_tx_free_ring_dqo(priv, tx, cfg);
375 	return -ENOMEM;
376 }
377 
378 int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
379 			   struct gve_tx_alloc_rings_cfg *cfg)
380 {
381 	struct gve_tx_ring *tx = cfg->tx;
382 	int err = 0;
383 	int i, j;
384 
385 	if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
386 		netif_err(priv, drv, priv->dev,
387 			  "Cannot alloc more than the max num of Tx rings\n");
388 		return -EINVAL;
389 	}
390 
391 	if (cfg->start_idx == 0) {
392 		tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
393 			      GFP_KERNEL);
394 		if (!tx)
395 			return -ENOMEM;
396 	} else if (!tx) {
397 		netif_err(priv, drv, priv->dev,
398 			  "Cannot alloc tx rings from a nonzero start idx without tx array\n");
399 		return -EINVAL;
400 	}
401 
402 	for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
403 		err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
404 		if (err) {
405 			netif_err(priv, drv, priv->dev,
406 				  "Failed to alloc tx ring=%d: err=%d\n",
407 				  i, err);
408 			goto err;
409 		}
410 	}
411 
412 	cfg->tx = tx;
413 	return 0;
414 
415 err:
416 	for (j = 0; j < i; j++)
417 		gve_tx_free_ring_dqo(priv, &tx[j], cfg);
418 	if (cfg->start_idx == 0)
419 		kvfree(tx);
420 	return err;
421 }
422 
423 void gve_tx_free_rings_dqo(struct gve_priv *priv,
424 			   struct gve_tx_alloc_rings_cfg *cfg)
425 {
426 	struct gve_tx_ring *tx = cfg->tx;
427 	int i;
428 
429 	if (!tx)
430 		return;
431 
432 	for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
433 		gve_tx_free_ring_dqo(priv, &tx[i], cfg);
434 
435 	if (cfg->start_idx == 0) {
436 		kvfree(tx);
437 		cfg->tx = NULL;
438 	}
439 }
440 
441 /* Returns the number of slots available in the ring */
442 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
443 {
444 	u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask;
445 
446 	return tx->mask - num_used;
447 }
448 
449 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
450 				       int desc_count, int buf_count)
451 {
452 	return gve_has_pending_packet(tx) &&
453 		   num_avail_tx_slots(tx) >= desc_count &&
454 		   gve_has_free_tx_qpl_bufs(tx, buf_count);
455 }
456 
457 /* Stops the queue if available descriptors is less than 'count'.
458  * Return: 0 if stop is not required.
459  */
460 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
461 				 int desc_count, int buf_count)
462 {
463 	if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
464 		return 0;
465 
466 	/* Update cached TX head pointer */
467 	tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
468 
469 	if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
470 		return 0;
471 
472 	/* No space, so stop the queue */
473 	tx->stop_queue++;
474 	netif_tx_stop_queue(tx->netdev_txq);
475 
476 	/* Sync with restarting queue in `gve_tx_poll_dqo()` */
477 	mb();
478 
479 	/* After stopping queue, check if we can transmit again in order to
480 	 * avoid TOCTOU bug.
481 	 */
482 	tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
483 
484 	if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
485 		return -EBUSY;
486 
487 	netif_tx_start_queue(tx->netdev_txq);
488 	tx->wake_queue++;
489 	return 0;
490 }
491 
492 static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
493 					struct gve_tx_metadata_dqo *metadata)
494 {
495 	memset(metadata, 0, sizeof(*metadata));
496 	metadata->version = GVE_TX_METADATA_VERSION_DQO;
497 
498 	if (skb->l4_hash) {
499 		u16 path_hash = skb->hash ^ (skb->hash >> 16);
500 
501 		path_hash &= (1 << 15) - 1;
502 		if (unlikely(path_hash == 0))
503 			path_hash = ~path_hash;
504 
505 		metadata->path_hash = path_hash;
506 	}
507 }
508 
509 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
510 				     struct sk_buff *skb, u32 len, u64 addr,
511 				     s16 compl_tag, bool eop, bool is_gso)
512 {
513 	const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
514 
515 	while (len > 0) {
516 		struct gve_tx_pkt_desc_dqo *desc =
517 			&tx->dqo.tx_ring[*desc_idx].pkt;
518 		u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO);
519 		bool cur_eop = eop && cur_len == len;
520 
521 		*desc = (struct gve_tx_pkt_desc_dqo){
522 			.buf_addr = cpu_to_le64(addr),
523 			.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
524 			.end_of_packet = cur_eop,
525 			.checksum_offload_enable = checksum_offload_en,
526 			.compl_tag = cpu_to_le16(compl_tag),
527 			.buf_size = cur_len,
528 		};
529 
530 		addr += cur_len;
531 		len -= cur_len;
532 		*desc_idx = (*desc_idx + 1) & tx->mask;
533 	}
534 }
535 
536 /* Validates and prepares `skb` for TSO.
537  *
538  * Returns header length, or < 0 if invalid.
539  */
540 static int gve_prep_tso(struct sk_buff *skb)
541 {
542 	struct tcphdr *tcp;
543 	int header_len;
544 	u32 paylen;
545 	int err;
546 
547 	/* Note: HW requires MSS (gso_size) to be <= 9728 and the total length
548 	 * of the TSO to be <= 262143.
549 	 *
550 	 * However, we don't validate these because:
551 	 * - Hypervisor enforces a limit of 9K MTU
552 	 * - Kernel will not produce a TSO larger than 64k
553 	 */
554 
555 	if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
556 		return -1;
557 
558 	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
559 		return -EINVAL;
560 
561 	/* Needed because we will modify header. */
562 	err = skb_cow_head(skb, 0);
563 	if (err < 0)
564 		return err;
565 
566 	tcp = tcp_hdr(skb);
567 	paylen = skb->len - skb_transport_offset(skb);
568 	csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
569 	header_len = skb_tcp_all_headers(skb);
570 
571 	if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
572 		return -EINVAL;
573 
574 	return header_len;
575 }
576 
577 static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
578 				     const struct sk_buff *skb,
579 				     const struct gve_tx_metadata_dqo *metadata,
580 				     int header_len)
581 {
582 	*desc = (struct gve_tx_tso_context_desc_dqo){
583 		.header_len = header_len,
584 		.cmd_dtype = {
585 			.dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
586 			.tso = 1,
587 		},
588 		.flex0 = metadata->bytes[0],
589 		.flex5 = metadata->bytes[5],
590 		.flex6 = metadata->bytes[6],
591 		.flex7 = metadata->bytes[7],
592 		.flex8 = metadata->bytes[8],
593 		.flex9 = metadata->bytes[9],
594 		.flex10 = metadata->bytes[10],
595 		.flex11 = metadata->bytes[11],
596 	};
597 	desc->tso_total_len = skb->len - header_len;
598 	desc->mss = skb_shinfo(skb)->gso_size;
599 }
600 
601 static void
602 gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
603 			     const struct gve_tx_metadata_dqo *metadata)
604 {
605 	*desc = (struct gve_tx_general_context_desc_dqo){
606 		.flex0 = metadata->bytes[0],
607 		.flex1 = metadata->bytes[1],
608 		.flex2 = metadata->bytes[2],
609 		.flex3 = metadata->bytes[3],
610 		.flex4 = metadata->bytes[4],
611 		.flex5 = metadata->bytes[5],
612 		.flex6 = metadata->bytes[6],
613 		.flex7 = metadata->bytes[7],
614 		.flex8 = metadata->bytes[8],
615 		.flex9 = metadata->bytes[9],
616 		.flex10 = metadata->bytes[10],
617 		.flex11 = metadata->bytes[11],
618 		.cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
619 	};
620 }
621 
622 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
623 				      struct sk_buff *skb,
624 				      struct gve_tx_pending_packet_dqo *pkt,
625 				      s16 completion_tag,
626 				      u32 *desc_idx,
627 				      bool is_gso)
628 {
629 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
630 	int i;
631 
632 	/* Note: HW requires that the size of a non-TSO packet be within the
633 	 * range of [17, 9728].
634 	 *
635 	 * We don't double check because
636 	 * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
637 	 * - Hypervisor won't allow MTU larger than 9216.
638 	 */
639 
640 	pkt->num_bufs = 0;
641 	/* Map the linear portion of skb */
642 	{
643 		u32 len = skb_headlen(skb);
644 		dma_addr_t addr;
645 
646 		addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
647 		if (unlikely(dma_mapping_error(tx->dev, addr)))
648 			goto err;
649 
650 		dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
651 		dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
652 		++pkt->num_bufs;
653 
654 		gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
655 					 completion_tag,
656 					 /*eop=*/shinfo->nr_frags == 0, is_gso);
657 	}
658 
659 	for (i = 0; i < shinfo->nr_frags; i++) {
660 		const skb_frag_t *frag = &shinfo->frags[i];
661 		bool is_eop = i == (shinfo->nr_frags - 1);
662 		u32 len = skb_frag_size(frag);
663 		dma_addr_t addr;
664 
665 		addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
666 		if (unlikely(dma_mapping_error(tx->dev, addr)))
667 			goto err;
668 
669 		dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
670 		dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
671 		++pkt->num_bufs;
672 
673 		gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
674 					 completion_tag, is_eop, is_gso);
675 	}
676 
677 	return 0;
678 err:
679 	for (i = 0; i < pkt->num_bufs; i++) {
680 		if (i == 0) {
681 			dma_unmap_single(tx->dev,
682 					 dma_unmap_addr(pkt, dma[i]),
683 					 dma_unmap_len(pkt, len[i]),
684 					 DMA_TO_DEVICE);
685 		} else {
686 			dma_unmap_page(tx->dev,
687 				       dma_unmap_addr(pkt, dma[i]),
688 				       dma_unmap_len(pkt, len[i]),
689 				       DMA_TO_DEVICE);
690 		}
691 	}
692 	pkt->num_bufs = 0;
693 	return -1;
694 }
695 
696 /* Tx buffer i corresponds to
697  * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO
698  * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO
699  */
700 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx,
701 				s16 index,
702 				void **va, dma_addr_t *dma_addr)
703 {
704 	int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO);
705 	int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO;
706 
707 	*va = page_address(tx->dqo.qpl->pages[page_id]) + offset;
708 	*dma_addr = tx->dqo.qpl->page_buses[page_id] + offset;
709 }
710 
711 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
712 				   struct sk_buff *skb,
713 				   struct gve_tx_pending_packet_dqo *pkt,
714 				   s16 completion_tag,
715 				   u32 *desc_idx,
716 				   bool is_gso)
717 {
718 	u32 copy_offset = 0;
719 	dma_addr_t dma_addr;
720 	u32 copy_len;
721 	s16 index;
722 	void *va;
723 
724 	/* Break the packet into buffer size chunks */
725 	pkt->num_bufs = 0;
726 	while (copy_offset < skb->len) {
727 		index = gve_alloc_tx_qpl_buf(tx);
728 		if (unlikely(index == -1))
729 			goto err;
730 
731 		gve_tx_buf_get_addr(tx, index, &va, &dma_addr);
732 		copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO,
733 				 skb->len - copy_offset);
734 		skb_copy_bits(skb, copy_offset, va, copy_len);
735 
736 		copy_offset += copy_len;
737 		dma_sync_single_for_device(tx->dev, dma_addr,
738 					   copy_len, DMA_TO_DEVICE);
739 		gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
740 					 copy_len,
741 					 dma_addr,
742 					 completion_tag,
743 					 copy_offset == skb->len,
744 					 is_gso);
745 
746 		pkt->tx_qpl_buf_ids[pkt->num_bufs] = index;
747 		++tx->dqo_tx.alloc_tx_qpl_buf_cnt;
748 		++pkt->num_bufs;
749 	}
750 
751 	return 0;
752 err:
753 	/* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */
754 	gve_free_tx_qpl_bufs(tx, pkt);
755 	return -ENOMEM;
756 }
757 
758 /* Returns 0 on success, or < 0 on error.
759  *
760  * Before this function is called, the caller must ensure
761  * gve_has_pending_packet(tx) returns true.
762  */
763 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
764 			      struct sk_buff *skb)
765 {
766 	const bool is_gso = skb_is_gso(skb);
767 	u32 desc_idx = tx->dqo_tx.tail;
768 	struct gve_tx_pending_packet_dqo *pkt;
769 	struct gve_tx_metadata_dqo metadata;
770 	s16 completion_tag;
771 
772 	pkt = gve_alloc_pending_packet(tx);
773 	pkt->skb = skb;
774 	completion_tag = pkt - tx->dqo.pending_packets;
775 
776 	gve_extract_tx_metadata_dqo(skb, &metadata);
777 	if (is_gso) {
778 		int header_len = gve_prep_tso(skb);
779 
780 		if (unlikely(header_len < 0))
781 			goto err;
782 
783 		gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
784 					 skb, &metadata, header_len);
785 		desc_idx = (desc_idx + 1) & tx->mask;
786 	}
787 
788 	gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
789 				     &metadata);
790 	desc_idx = (desc_idx + 1) & tx->mask;
791 
792 	if (tx->dqo.qpl) {
793 		if (gve_tx_add_skb_copy_dqo(tx, skb, pkt,
794 					    completion_tag,
795 					    &desc_idx, is_gso))
796 			goto err;
797 	}  else {
798 		if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt,
799 					       completion_tag,
800 					       &desc_idx, is_gso))
801 			goto err;
802 	}
803 
804 	tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
805 
806 	/* Commit the changes to our state */
807 	tx->dqo_tx.tail = desc_idx;
808 
809 	/* Request a descriptor completion on the last descriptor of the
810 	 * packet if we are allowed to by the HW enforced interval.
811 	 */
812 	{
813 		u32 last_desc_idx = (desc_idx - 1) & tx->mask;
814 		u32 last_report_event_interval =
815 			(last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
816 
817 		if (unlikely(last_report_event_interval >=
818 			     GVE_TX_MIN_RE_INTERVAL)) {
819 			tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
820 			tx->dqo_tx.last_re_idx = last_desc_idx;
821 		}
822 	}
823 
824 	return 0;
825 
826 err:
827 	pkt->skb = NULL;
828 	gve_free_pending_packet(tx, pkt);
829 
830 	return -1;
831 }
832 
833 static int gve_num_descs_per_buf(size_t size)
834 {
835 	return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
836 }
837 
838 static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
839 {
840 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
841 	int num_descs;
842 	int i;
843 
844 	num_descs = gve_num_descs_per_buf(skb_headlen(skb));
845 
846 	for (i = 0; i < shinfo->nr_frags; i++) {
847 		unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
848 
849 		num_descs += gve_num_descs_per_buf(frag_size);
850 	}
851 
852 	return num_descs;
853 }
854 
855 /* Returns true if HW is capable of sending TSO represented by `skb`.
856  *
857  * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
858  * - The header is counted as one buffer for every single segment.
859  * - A buffer which is split between two segments is counted for both.
860  * - If a buffer contains both header and payload, it is counted as two buffers.
861  */
862 static bool gve_can_send_tso(const struct sk_buff *skb)
863 {
864 	const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
865 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
866 	const int header_len = skb_tcp_all_headers(skb);
867 	const int gso_size = shinfo->gso_size;
868 	int cur_seg_num_bufs;
869 	int prev_frag_size;
870 	int cur_seg_size;
871 	int i;
872 
873 	cur_seg_size = skb_headlen(skb) - header_len;
874 	prev_frag_size = skb_headlen(skb);
875 	cur_seg_num_bufs = cur_seg_size > 0;
876 
877 	for (i = 0; i < shinfo->nr_frags; i++) {
878 		if (cur_seg_size >= gso_size) {
879 			cur_seg_size %= gso_size;
880 			cur_seg_num_bufs = cur_seg_size > 0;
881 
882 			if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
883 				int prev_frag_remain = prev_frag_size %
884 					GVE_TX_MAX_BUF_SIZE_DQO;
885 
886 				/* If the last descriptor of the previous frag
887 				 * is less than cur_seg_size, the segment will
888 				 * span two descriptors in the previous frag.
889 				 * Since max gso size (9728) is less than
890 				 * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
891 				 * for the segment to span more than two
892 				 * descriptors.
893 				 */
894 				if (prev_frag_remain &&
895 				    cur_seg_size > prev_frag_remain)
896 					cur_seg_num_bufs++;
897 			}
898 		}
899 
900 		if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
901 			return false;
902 
903 		prev_frag_size = skb_frag_size(&shinfo->frags[i]);
904 		cur_seg_size += prev_frag_size;
905 	}
906 
907 	return true;
908 }
909 
910 netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
911 					 struct net_device *dev,
912 					 netdev_features_t features)
913 {
914 	if (skb_is_gso(skb) && !gve_can_send_tso(skb))
915 		return features & ~NETIF_F_GSO_MASK;
916 
917 	return features;
918 }
919 
920 /* Attempt to transmit specified SKB.
921  *
922  * Returns 0 if the SKB was transmitted or dropped.
923  * Returns -1 if there is not currently enough space to transmit the SKB.
924  */
925 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
926 			  struct sk_buff *skb)
927 {
928 	int num_buffer_descs;
929 	int total_num_descs;
930 
931 	if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
932 		goto drop;
933 
934 	if (tx->dqo.qpl) {
935 		/* We do not need to verify the number of buffers used per
936 		 * packet or per segment in case of TSO as with 2K size buffers
937 		 * none of the TX packet rules would be violated.
938 		 *
939 		 * gve_can_send_tso() checks that each TCP segment of gso_size is
940 		 * not distributed over more than 9 SKB frags..
941 		 */
942 		num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
943 	} else {
944 		num_buffer_descs = gve_num_buffer_descs_needed(skb);
945 		if (!skb_is_gso(skb)) {
946 			if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
947 				if (unlikely(skb_linearize(skb) < 0))
948 					goto drop;
949 
950 				num_buffer_descs = 1;
951 			}
952 		}
953 	}
954 
955 	/* Metadata + (optional TSO) + data descriptors. */
956 	total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
957 	if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
958 			GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
959 			num_buffer_descs))) {
960 		return -1;
961 	}
962 
963 	if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0))
964 		goto drop;
965 
966 	netdev_tx_sent_queue(tx->netdev_txq, skb->len);
967 	skb_tx_timestamp(skb);
968 	return 0;
969 
970 drop:
971 	tx->dropped_pkt++;
972 	dev_kfree_skb_any(skb);
973 	return 0;
974 }
975 
976 /* Transmit a given skb and ring the doorbell. */
977 netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
978 {
979 	struct gve_priv *priv = netdev_priv(dev);
980 	struct gve_tx_ring *tx;
981 
982 	tx = &priv->tx[skb_get_queue_mapping(skb)];
983 	if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
984 		/* We need to ring the txq doorbell -- we have stopped the Tx
985 		 * queue for want of resources, but prior calls to gve_tx()
986 		 * may have added descriptors without ringing the doorbell.
987 		 */
988 		gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
989 		return NETDEV_TX_BUSY;
990 	}
991 
992 	if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
993 		return NETDEV_TX_OK;
994 
995 	gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
996 	return NETDEV_TX_OK;
997 }
998 
999 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
1000 			struct gve_tx_pending_packet_dqo *pending_packet)
1001 {
1002 	s16 old_tail, index;
1003 
1004 	index = pending_packet - tx->dqo.pending_packets;
1005 	old_tail = list->tail;
1006 	list->tail = index;
1007 	if (old_tail == -1)
1008 		list->head = index;
1009 	else
1010 		tx->dqo.pending_packets[old_tail].next = index;
1011 
1012 	pending_packet->next = -1;
1013 	pending_packet->prev = old_tail;
1014 }
1015 
1016 static void remove_from_list(struct gve_tx_ring *tx,
1017 			     struct gve_index_list *list,
1018 			     struct gve_tx_pending_packet_dqo *pkt)
1019 {
1020 	s16 prev_index, next_index;
1021 
1022 	prev_index = pkt->prev;
1023 	next_index = pkt->next;
1024 
1025 	if (prev_index == -1) {
1026 		/* Node is head */
1027 		list->head = next_index;
1028 	} else {
1029 		tx->dqo.pending_packets[prev_index].next = next_index;
1030 	}
1031 	if (next_index == -1) {
1032 		/* Node is tail */
1033 		list->tail = prev_index;
1034 	} else {
1035 		tx->dqo.pending_packets[next_index].prev = prev_index;
1036 	}
1037 }
1038 
1039 static void gve_unmap_packet(struct device *dev,
1040 			     struct gve_tx_pending_packet_dqo *pkt)
1041 {
1042 	int i;
1043 
1044 	/* SKB linear portion is guaranteed to be mapped */
1045 	dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
1046 			 dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
1047 	for (i = 1; i < pkt->num_bufs; i++) {
1048 		dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
1049 			       dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
1050 	}
1051 	pkt->num_bufs = 0;
1052 }
1053 
1054 /* Completion types and expected behavior:
1055  * No Miss compl + Packet compl = Packet completed normally.
1056  * Miss compl + Re-inject compl = Packet completed normally.
1057  * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
1058  * Miss compl + Packet compl = Skipped i.e. packet not completed.
1059  */
1060 static void gve_handle_packet_completion(struct gve_priv *priv,
1061 					 struct gve_tx_ring *tx, bool is_napi,
1062 					 u16 compl_tag, u64 *bytes, u64 *pkts,
1063 					 bool is_reinjection)
1064 {
1065 	struct gve_tx_pending_packet_dqo *pending_packet;
1066 
1067 	if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
1068 		net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
1069 				    priv->dev->name, (int)compl_tag);
1070 		return;
1071 	}
1072 
1073 	pending_packet = &tx->dqo.pending_packets[compl_tag];
1074 
1075 	if (unlikely(is_reinjection)) {
1076 		if (unlikely(pending_packet->state ==
1077 			     GVE_PACKET_STATE_TIMED_OUT_COMPL)) {
1078 			net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n",
1079 					    priv->dev->name, (int)compl_tag);
1080 			/* Packet was already completed as a result of timeout,
1081 			 * so just remove from list and free pending packet.
1082 			 */
1083 			remove_from_list(tx,
1084 					 &tx->dqo_compl.timed_out_completions,
1085 					 pending_packet);
1086 			gve_free_pending_packet(tx, pending_packet);
1087 			return;
1088 		}
1089 		if (unlikely(pending_packet->state !=
1090 			     GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
1091 			/* No outstanding miss completion but packet allocated
1092 			 * implies packet receives a re-injection completion
1093 			 * without a prior miss completion. Return without
1094 			 * completing the packet.
1095 			 */
1096 			net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
1097 					    priv->dev->name, (int)compl_tag);
1098 			return;
1099 		}
1100 		remove_from_list(tx, &tx->dqo_compl.miss_completions,
1101 				 pending_packet);
1102 	} else {
1103 		/* Packet is allocated but not a pending data completion. */
1104 		if (unlikely(pending_packet->state !=
1105 			     GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
1106 			net_err_ratelimited("%s: No pending data completion: %d\n",
1107 					    priv->dev->name, (int)compl_tag);
1108 			return;
1109 		}
1110 	}
1111 	tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
1112 	if (tx->dqo.qpl)
1113 		gve_free_tx_qpl_bufs(tx, pending_packet);
1114 	else
1115 		gve_unmap_packet(tx->dev, pending_packet);
1116 
1117 	*bytes += pending_packet->skb->len;
1118 	(*pkts)++;
1119 	napi_consume_skb(pending_packet->skb, is_napi);
1120 	pending_packet->skb = NULL;
1121 	gve_free_pending_packet(tx, pending_packet);
1122 }
1123 
1124 static void gve_handle_miss_completion(struct gve_priv *priv,
1125 				       struct gve_tx_ring *tx, u16 compl_tag,
1126 				       u64 *bytes, u64 *pkts)
1127 {
1128 	struct gve_tx_pending_packet_dqo *pending_packet;
1129 
1130 	if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
1131 		net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
1132 				    priv->dev->name, (int)compl_tag);
1133 		return;
1134 	}
1135 
1136 	pending_packet = &tx->dqo.pending_packets[compl_tag];
1137 	if (unlikely(pending_packet->state !=
1138 				GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
1139 		net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n",
1140 				    priv->dev->name, (int)pending_packet->state,
1141 				    (int)compl_tag);
1142 		return;
1143 	}
1144 
1145 	pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL;
1146 	/* jiffies can wraparound but time comparisons can handle overflows. */
1147 	pending_packet->timeout_jiffies =
1148 			jiffies +
1149 			msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
1150 					 MSEC_PER_SEC);
1151 	add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
1152 
1153 	*bytes += pending_packet->skb->len;
1154 	(*pkts)++;
1155 }
1156 
1157 static void remove_miss_completions(struct gve_priv *priv,
1158 				    struct gve_tx_ring *tx)
1159 {
1160 	struct gve_tx_pending_packet_dqo *pending_packet;
1161 	s16 next_index;
1162 
1163 	next_index = tx->dqo_compl.miss_completions.head;
1164 	while (next_index != -1) {
1165 		pending_packet = &tx->dqo.pending_packets[next_index];
1166 		next_index = pending_packet->next;
1167 		/* Break early because packets should timeout in order. */
1168 		if (time_is_after_jiffies(pending_packet->timeout_jiffies))
1169 			break;
1170 
1171 		remove_from_list(tx, &tx->dqo_compl.miss_completions,
1172 				 pending_packet);
1173 		/* Unmap/free TX buffers and free skb but do not unallocate packet i.e.
1174 		 * the completion tag is not freed to ensure that the driver
1175 		 * can take appropriate action if a corresponding valid
1176 		 * completion is received later.
1177 		 */
1178 		if (tx->dqo.qpl)
1179 			gve_free_tx_qpl_bufs(tx, pending_packet);
1180 		else
1181 			gve_unmap_packet(tx->dev, pending_packet);
1182 
1183 		/* This indicates the packet was dropped. */
1184 		dev_kfree_skb_any(pending_packet->skb);
1185 		pending_packet->skb = NULL;
1186 		tx->dropped_pkt++;
1187 		net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
1188 				    priv->dev->name,
1189 				    (int)(pending_packet - tx->dqo.pending_packets));
1190 
1191 		pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
1192 		pending_packet->timeout_jiffies =
1193 				jiffies +
1194 				msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
1195 						 MSEC_PER_SEC);
1196 		/* Maintain pending packet in another list so the packet can be
1197 		 * unallocated at a later time.
1198 		 */
1199 		add_to_list(tx, &tx->dqo_compl.timed_out_completions,
1200 			    pending_packet);
1201 	}
1202 }
1203 
1204 static void remove_timed_out_completions(struct gve_priv *priv,
1205 					 struct gve_tx_ring *tx)
1206 {
1207 	struct gve_tx_pending_packet_dqo *pending_packet;
1208 	s16 next_index;
1209 
1210 	next_index = tx->dqo_compl.timed_out_completions.head;
1211 	while (next_index != -1) {
1212 		pending_packet = &tx->dqo.pending_packets[next_index];
1213 		next_index = pending_packet->next;
1214 		/* Break early because packets should timeout in order. */
1215 		if (time_is_after_jiffies(pending_packet->timeout_jiffies))
1216 			break;
1217 
1218 		remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
1219 				 pending_packet);
1220 		gve_free_pending_packet(tx, pending_packet);
1221 	}
1222 }
1223 
1224 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
1225 			  struct napi_struct *napi)
1226 {
1227 	u64 reinject_compl_bytes = 0;
1228 	u64 reinject_compl_pkts = 0;
1229 	int num_descs_cleaned = 0;
1230 	u64 miss_compl_bytes = 0;
1231 	u64 miss_compl_pkts = 0;
1232 	u64 pkt_compl_bytes = 0;
1233 	u64 pkt_compl_pkts = 0;
1234 
1235 	/* Limit in order to avoid blocking for too long */
1236 	while (!napi || pkt_compl_pkts < napi->weight) {
1237 		struct gve_tx_compl_desc *compl_desc =
1238 			&tx->dqo.compl_ring[tx->dqo_compl.head];
1239 		u16 type;
1240 
1241 		if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
1242 			break;
1243 
1244 		/* Prefetch the next descriptor. */
1245 		prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
1246 				tx->dqo.complq_mask]);
1247 
1248 		/* Do not read data until we own the descriptor */
1249 		dma_rmb();
1250 		type = compl_desc->type;
1251 
1252 		if (type == GVE_COMPL_TYPE_DQO_DESC) {
1253 			/* This is the last descriptor fetched by HW plus one */
1254 			u16 tx_head = le16_to_cpu(compl_desc->tx_head);
1255 
1256 			atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
1257 		} else if (type == GVE_COMPL_TYPE_DQO_PKT) {
1258 			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1259 			if (compl_tag & GVE_ALT_MISS_COMPL_BIT) {
1260 				compl_tag &= ~GVE_ALT_MISS_COMPL_BIT;
1261 				gve_handle_miss_completion(priv, tx, compl_tag,
1262 							   &miss_compl_bytes,
1263 							   &miss_compl_pkts);
1264 			} else {
1265 				gve_handle_packet_completion(priv, tx, !!napi,
1266 							     compl_tag,
1267 							     &pkt_compl_bytes,
1268 							     &pkt_compl_pkts,
1269 							     false);
1270 			}
1271 		} else if (type == GVE_COMPL_TYPE_DQO_MISS) {
1272 			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1273 
1274 			gve_handle_miss_completion(priv, tx, compl_tag,
1275 						   &miss_compl_bytes,
1276 						   &miss_compl_pkts);
1277 		} else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
1278 			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
1279 
1280 			gve_handle_packet_completion(priv, tx, !!napi,
1281 						     compl_tag,
1282 						     &reinject_compl_bytes,
1283 						     &reinject_compl_pkts,
1284 						     true);
1285 		}
1286 
1287 		tx->dqo_compl.head =
1288 			(tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
1289 		/* Flip the generation bit when we wrap around */
1290 		tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
1291 		num_descs_cleaned++;
1292 	}
1293 
1294 	netdev_tx_completed_queue(tx->netdev_txq,
1295 				  pkt_compl_pkts + miss_compl_pkts,
1296 				  pkt_compl_bytes + miss_compl_bytes);
1297 
1298 	remove_miss_completions(priv, tx);
1299 	remove_timed_out_completions(priv, tx);
1300 
1301 	u64_stats_update_begin(&tx->statss);
1302 	tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
1303 	tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
1304 	u64_stats_update_end(&tx->statss);
1305 	return num_descs_cleaned;
1306 }
1307 
1308 bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
1309 {
1310 	struct gve_tx_compl_desc *compl_desc;
1311 	struct gve_tx_ring *tx = block->tx;
1312 	struct gve_priv *priv = block->priv;
1313 
1314 	if (do_clean) {
1315 		int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx,
1316 							      &block->napi);
1317 
1318 		/* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */
1319 		mb();
1320 
1321 		if (netif_tx_queue_stopped(tx->netdev_txq) &&
1322 		    num_descs_cleaned > 0) {
1323 			tx->wake_queue++;
1324 			netif_tx_wake_queue(tx->netdev_txq);
1325 		}
1326 	}
1327 
1328 	/* Return true if we still have work. */
1329 	compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
1330 	return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
1331 }
1332