xref: /linux/drivers/net/ethernet/google/gve/gve_tx.c (revision c9d23f9657cabfd2836a096bf6eddf8df2cf1434)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15 
16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 				       struct gve_queue_resources *q_resources,
18 				       u32 val)
19 {
20 	iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22 
23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 	u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 	struct gve_tx_ring *tx = &priv->tx[tx_qid];
27 
28 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30 
31 /* gvnic can only transmit from a Registered Segment.
32  * We copy skb payloads into the registered segment before writing Tx
33  * descriptors and ringing the Tx doorbell.
34  *
35  * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36  * free allocations in the order they were allocated.
37  */
38 
39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 	fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 			  PAGE_KERNEL);
43 	if (unlikely(!fifo->base)) {
44 		netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 			  fifo->qpl->id);
46 		return -ENOMEM;
47 	}
48 
49 	fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 	atomic_set(&fifo->available, fifo->size);
51 	fifo->head = 0;
52 	return 0;
53 }
54 
55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 	WARN(atomic_read(&fifo->available) != fifo->size,
58 	     "Releasing non-empty fifo");
59 
60 	vunmap(fifo->base);
61 }
62 
63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 					  size_t bytes)
65 {
66 	return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68 
69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 	return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73 
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75  * @fifo: FIFO to allocate from
76  * @bytes: Allocation size
77  * @iov: Scatter-gather elements to fill with allocation fragment base/len
78  *
79  * Returns number of valid elements in iov[] or negative on error.
80  *
81  * Allocations from a given FIFO must be externally synchronized but concurrent
82  * allocation and frees are allowed.
83  */
84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 			     struct gve_tx_iovec iov[2])
86 {
87 	size_t overflow, padding;
88 	u32 aligned_head;
89 	int nfrags = 0;
90 
91 	if (!bytes)
92 		return 0;
93 
94 	/* This check happens before we know how much padding is needed to
95 	 * align to a cacheline boundary for the payload, but that is fine,
96 	 * because the FIFO head always start aligned, and the FIFO's boundaries
97 	 * are aligned, so if there is space for the data, there is space for
98 	 * the padding to the next alignment.
99 	 */
100 	WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 	     "Reached %s when there's not enough space in the fifo", __func__);
102 
103 	nfrags++;
104 
105 	iov[0].iov_offset = fifo->head;
106 	iov[0].iov_len = bytes;
107 	fifo->head += bytes;
108 
109 	if (fifo->head > fifo->size) {
110 		/* If the allocation did not fit in the tail fragment of the
111 		 * FIFO, also use the head fragment.
112 		 */
113 		nfrags++;
114 		overflow = fifo->head - fifo->size;
115 		iov[0].iov_len -= overflow;
116 		iov[1].iov_offset = 0;	/* Start of fifo*/
117 		iov[1].iov_len = overflow;
118 
119 		fifo->head = overflow;
120 	}
121 
122 	/* Re-align to a cacheline boundary */
123 	aligned_head = L1_CACHE_ALIGN(fifo->head);
124 	padding = aligned_head - fifo->head;
125 	iov[nfrags - 1].iov_padding = padding;
126 	atomic_sub(bytes + padding, &fifo->available);
127 	fifo->head = aligned_head;
128 
129 	if (fifo->head == fifo->size)
130 		fifo->head = 0;
131 
132 	return nfrags;
133 }
134 
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136  * @fifo: FIFO to return fragments to
137  * @bytes: Bytes to free
138  */
139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 	atomic_add(bytes, &fifo->available);
142 }
143 
144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 	size_t space_freed = 0;
147 	int i;
148 
149 	for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 		space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 		info->iov[i].iov_len = 0;
152 		info->iov[i].iov_padding = 0;
153 	}
154 	return space_freed;
155 }
156 
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 			      u32 to_do)
159 {
160 	struct gve_tx_buffer_state *info;
161 	u32 clean_end = tx->done + to_do;
162 	u64 pkts = 0, bytes = 0;
163 	size_t space_freed = 0;
164 	u32 xsk_complete = 0;
165 	u32 idx;
166 
167 	for (; tx->done < clean_end; tx->done++) {
168 		idx = tx->done & tx->mask;
169 		info = &tx->info[idx];
170 
171 		if (unlikely(!info->xdp.size))
172 			continue;
173 
174 		bytes += info->xdp.size;
175 		pkts++;
176 		xsk_complete += info->xdp.is_xsk;
177 
178 		info->xdp.size = 0;
179 		if (info->xdp_frame) {
180 			xdp_return_frame(info->xdp_frame);
181 			info->xdp_frame = NULL;
182 		}
183 		space_freed += gve_tx_clear_buffer_state(info);
184 	}
185 
186 	gve_tx_free_fifo(&tx->tx_fifo, space_freed);
187 	if (xsk_complete > 0 && tx->xsk_pool)
188 		xsk_tx_completed(tx->xsk_pool, xsk_complete);
189 	u64_stats_update_begin(&tx->statss);
190 	tx->bytes_done += bytes;
191 	tx->pkt_done += pkts;
192 	u64_stats_update_end(&tx->statss);
193 	return pkts;
194 }
195 
196 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
197 			     u32 to_do, bool try_to_wake);
198 
199 static void gve_tx_free_ring(struct gve_priv *priv, int idx)
200 {
201 	struct gve_tx_ring *tx = &priv->tx[idx];
202 	struct device *hdev = &priv->pdev->dev;
203 	size_t bytes;
204 	u32 slots;
205 
206 	gve_tx_remove_from_block(priv, idx);
207 	slots = tx->mask + 1;
208 	if (tx->q_num < priv->tx_cfg.num_queues) {
209 		gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
210 		netdev_tx_reset_queue(tx->netdev_txq);
211 	} else {
212 		gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
213 	}
214 
215 	dma_free_coherent(hdev, sizeof(*tx->q_resources),
216 			  tx->q_resources, tx->q_resources_bus);
217 	tx->q_resources = NULL;
218 
219 	if (!tx->raw_addressing) {
220 		gve_tx_fifo_release(priv, &tx->tx_fifo);
221 		gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
222 		tx->tx_fifo.qpl = NULL;
223 	}
224 
225 	bytes = sizeof(*tx->desc) * slots;
226 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
227 	tx->desc = NULL;
228 
229 	vfree(tx->info);
230 	tx->info = NULL;
231 
232 	netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
233 }
234 
235 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
236 {
237 	struct gve_tx_ring *tx = &priv->tx[idx];
238 	struct device *hdev = &priv->pdev->dev;
239 	u32 slots = priv->tx_desc_cnt;
240 	size_t bytes;
241 
242 	/* Make sure everything is zeroed to start */
243 	memset(tx, 0, sizeof(*tx));
244 	spin_lock_init(&tx->clean_lock);
245 	spin_lock_init(&tx->xdp_lock);
246 	tx->q_num = idx;
247 
248 	tx->mask = slots - 1;
249 
250 	/* alloc metadata */
251 	tx->info = vzalloc(sizeof(*tx->info) * slots);
252 	if (!tx->info)
253 		return -ENOMEM;
254 
255 	/* alloc tx queue */
256 	bytes = sizeof(*tx->desc) * slots;
257 	tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
258 	if (!tx->desc)
259 		goto abort_with_info;
260 
261 	tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
262 	tx->dev = &priv->pdev->dev;
263 	if (!tx->raw_addressing) {
264 		tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
265 		if (!tx->tx_fifo.qpl)
266 			goto abort_with_desc;
267 		/* map Tx FIFO */
268 		if (gve_tx_fifo_init(priv, &tx->tx_fifo))
269 			goto abort_with_qpl;
270 	}
271 
272 	tx->q_resources =
273 		dma_alloc_coherent(hdev,
274 				   sizeof(*tx->q_resources),
275 				   &tx->q_resources_bus,
276 				   GFP_KERNEL);
277 	if (!tx->q_resources)
278 		goto abort_with_fifo;
279 
280 	netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
281 		  (unsigned long)tx->bus);
282 	if (idx < priv->tx_cfg.num_queues)
283 		tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
284 	gve_tx_add_to_block(priv, idx);
285 
286 	return 0;
287 
288 abort_with_fifo:
289 	if (!tx->raw_addressing)
290 		gve_tx_fifo_release(priv, &tx->tx_fifo);
291 abort_with_qpl:
292 	if (!tx->raw_addressing)
293 		gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
294 abort_with_desc:
295 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
296 	tx->desc = NULL;
297 abort_with_info:
298 	vfree(tx->info);
299 	tx->info = NULL;
300 	return -ENOMEM;
301 }
302 
303 int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
304 {
305 	int err = 0;
306 	int i;
307 
308 	for (i = start_id; i < start_id + num_rings; i++) {
309 		err = gve_tx_alloc_ring(priv, i);
310 		if (err) {
311 			netif_err(priv, drv, priv->dev,
312 				  "Failed to alloc tx ring=%d: err=%d\n",
313 				  i, err);
314 			break;
315 		}
316 	}
317 	/* Unallocate if there was an error */
318 	if (err) {
319 		int j;
320 
321 		for (j = start_id; j < i; j++)
322 			gve_tx_free_ring(priv, j);
323 	}
324 	return err;
325 }
326 
327 void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
328 {
329 	int i;
330 
331 	for (i = start_id; i < start_id + num_rings; i++)
332 		gve_tx_free_ring(priv, i);
333 }
334 
335 /* gve_tx_avail - Calculates the number of slots available in the ring
336  * @tx: tx ring to check
337  *
338  * Returns the number of slots available
339  *
340  * The capacity of the queue is mask + 1. We don't need to reserve an entry.
341  **/
342 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
343 {
344 	return tx->mask + 1 - (tx->req - tx->done);
345 }
346 
347 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
348 					      struct sk_buff *skb)
349 {
350 	int pad_bytes, align_hdr_pad;
351 	int bytes;
352 	int hlen;
353 
354 	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
355 				 tcp_hdrlen(skb) : skb_headlen(skb);
356 
357 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
358 						   hlen);
359 	/* We need to take into account the header alignment padding. */
360 	align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
361 	bytes = align_hdr_pad + pad_bytes + skb->len;
362 
363 	return bytes;
364 }
365 
366 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
367  * 1 for each skb frag
368  * 1 for the skb linear portion
369  * 1 for when tcp hdr needs to be in separate descriptor
370  * 1 if the payload wraps to the beginning of the FIFO
371  * 1 for metadata descriptor
372  */
373 #define MAX_TX_DESC_NEEDED	(MAX_SKB_FRAGS + 4)
374 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
375 {
376 	if (info->skb) {
377 		dma_unmap_single(dev, dma_unmap_addr(info, dma),
378 				 dma_unmap_len(info, len),
379 				 DMA_TO_DEVICE);
380 		dma_unmap_len_set(info, len, 0);
381 	} else {
382 		dma_unmap_page(dev, dma_unmap_addr(info, dma),
383 			       dma_unmap_len(info, len),
384 			       DMA_TO_DEVICE);
385 		dma_unmap_len_set(info, len, 0);
386 	}
387 }
388 
389 /* Check if sufficient resources (descriptor ring space, FIFO space) are
390  * available to transmit the given number of bytes.
391  */
392 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
393 {
394 	bool can_alloc = true;
395 
396 	if (!tx->raw_addressing)
397 		can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
398 
399 	return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
400 }
401 
402 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
403 
404 /* Stops the queue if the skb cannot be transmitted. */
405 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
406 			     struct sk_buff *skb)
407 {
408 	int bytes_required = 0;
409 	u32 nic_done;
410 	u32 to_do;
411 	int ret;
412 
413 	if (!tx->raw_addressing)
414 		bytes_required = gve_skb_fifo_bytes_required(tx, skb);
415 
416 	if (likely(gve_can_tx(tx, bytes_required)))
417 		return 0;
418 
419 	ret = -EBUSY;
420 	spin_lock(&tx->clean_lock);
421 	nic_done = gve_tx_load_event_counter(priv, tx);
422 	to_do = nic_done - tx->done;
423 
424 	/* Only try to clean if there is hope for TX */
425 	if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
426 		if (to_do > 0) {
427 			to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
428 			gve_clean_tx_done(priv, tx, to_do, false);
429 		}
430 		if (likely(gve_can_tx(tx, bytes_required)))
431 			ret = 0;
432 	}
433 	if (ret) {
434 		/* No space, so stop the queue */
435 		tx->stop_queue++;
436 		netif_tx_stop_queue(tx->netdev_txq);
437 	}
438 	spin_unlock(&tx->clean_lock);
439 
440 	return ret;
441 }
442 
443 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
444 				 u16 csum_offset, u8 ip_summed, bool is_gso,
445 				 int l4_hdr_offset, u32 desc_cnt,
446 				 u16 hlen, u64 addr, u16 pkt_len)
447 {
448 	/* l4_hdr_offset and csum_offset are in units of 16-bit words */
449 	if (is_gso) {
450 		pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
451 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
452 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
453 	} else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
454 		pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
455 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
456 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
457 	} else {
458 		pkt_desc->pkt.type_flags = GVE_TXD_STD;
459 		pkt_desc->pkt.l4_csum_offset = 0;
460 		pkt_desc->pkt.l4_hdr_offset = 0;
461 	}
462 	pkt_desc->pkt.desc_cnt = desc_cnt;
463 	pkt_desc->pkt.len = cpu_to_be16(pkt_len);
464 	pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
465 	pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
466 }
467 
468 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
469 				 struct sk_buff *skb)
470 {
471 	BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
472 
473 	mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
474 	mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
475 				   GVE_MTD_PATH_HASH_L4;
476 	mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
477 	mtd_desc->mtd.reserved0 = 0;
478 	mtd_desc->mtd.reserved1 = 0;
479 }
480 
481 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
482 				 u16 l3_offset, u16 gso_size,
483 				 bool is_gso_v6, bool is_gso,
484 				 u16 len, u64 addr)
485 {
486 	seg_desc->seg.type_flags = GVE_TXD_SEG;
487 	if (is_gso) {
488 		if (is_gso_v6)
489 			seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
490 		seg_desc->seg.l3_offset = l3_offset >> 1;
491 		seg_desc->seg.mss = cpu_to_be16(gso_size);
492 	}
493 	seg_desc->seg.seg_len = cpu_to_be16(len);
494 	seg_desc->seg.seg_addr = cpu_to_be64(addr);
495 }
496 
497 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
498 				    u64 iov_offset, u64 iov_len)
499 {
500 	u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
501 	u64 first_page = iov_offset / PAGE_SIZE;
502 	u64 page;
503 
504 	for (page = first_page; page <= last_page; page++)
505 		dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
506 }
507 
508 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
509 {
510 	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
511 	union gve_tx_desc *pkt_desc, *seg_desc;
512 	struct gve_tx_buffer_state *info;
513 	int mtd_desc_nr = !!skb->l4_hash;
514 	bool is_gso = skb_is_gso(skb);
515 	u32 idx = tx->req & tx->mask;
516 	int payload_iov = 2;
517 	int copy_offset;
518 	u32 next_idx;
519 	int i;
520 
521 	info = &tx->info[idx];
522 	pkt_desc = &tx->desc[idx];
523 
524 	l4_hdr_offset = skb_checksum_start_offset(skb);
525 	/* If the skb is gso, then we want the tcp header in the first segment
526 	 * otherwise we want the linear portion of the skb (which will contain
527 	 * the checksum because skb->csum_start and skb->csum_offset are given
528 	 * relative to skb->head) in the first segment.
529 	 */
530 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
531 			skb_headlen(skb);
532 
533 	info->skb =  skb;
534 	/* We don't want to split the header, so if necessary, pad to the end
535 	 * of the fifo and then put the header at the beginning of the fifo.
536 	 */
537 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
538 	hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
539 				       &info->iov[0]);
540 	WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
541 	payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
542 					   &info->iov[payload_iov]);
543 
544 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
545 			     is_gso, l4_hdr_offset,
546 			     1 + mtd_desc_nr + payload_nfrags, hlen,
547 			     info->iov[hdr_nfrags - 1].iov_offset, skb->len);
548 
549 	skb_copy_bits(skb, 0,
550 		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
551 		      hlen);
552 	gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
553 				info->iov[hdr_nfrags - 1].iov_offset,
554 				info->iov[hdr_nfrags - 1].iov_len);
555 	copy_offset = hlen;
556 
557 	if (mtd_desc_nr) {
558 		next_idx = (tx->req + 1) & tx->mask;
559 		gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
560 	}
561 
562 	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
563 		next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
564 		seg_desc = &tx->desc[next_idx];
565 
566 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
567 				     skb_shinfo(skb)->gso_size,
568 				     skb_is_gso_v6(skb), is_gso,
569 				     info->iov[i].iov_len,
570 				     info->iov[i].iov_offset);
571 
572 		skb_copy_bits(skb, copy_offset,
573 			      tx->tx_fifo.base + info->iov[i].iov_offset,
574 			      info->iov[i].iov_len);
575 		gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
576 					info->iov[i].iov_offset,
577 					info->iov[i].iov_len);
578 		copy_offset += info->iov[i].iov_len;
579 	}
580 
581 	return 1 + mtd_desc_nr + payload_nfrags;
582 }
583 
584 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
585 				  struct sk_buff *skb)
586 {
587 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
588 	int hlen, num_descriptors, l4_hdr_offset;
589 	union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
590 	struct gve_tx_buffer_state *info;
591 	int mtd_desc_nr = !!skb->l4_hash;
592 	bool is_gso = skb_is_gso(skb);
593 	u32 idx = tx->req & tx->mask;
594 	u64 addr;
595 	u32 len;
596 	int i;
597 
598 	info = &tx->info[idx];
599 	pkt_desc = &tx->desc[idx];
600 
601 	l4_hdr_offset = skb_checksum_start_offset(skb);
602 	/* If the skb is gso, then we want only up to the tcp header in the first segment
603 	 * to efficiently replicate on each segment otherwise we want the linear portion
604 	 * of the skb (which will contain the checksum because skb->csum_start and
605 	 * skb->csum_offset are given relative to skb->head) in the first segment.
606 	 */
607 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
608 	len = skb_headlen(skb);
609 
610 	info->skb =  skb;
611 
612 	addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
613 	if (unlikely(dma_mapping_error(tx->dev, addr))) {
614 		tx->dma_mapping_error++;
615 		goto drop;
616 	}
617 	dma_unmap_len_set(info, len, len);
618 	dma_unmap_addr_set(info, dma, addr);
619 
620 	num_descriptors = 1 + shinfo->nr_frags;
621 	if (hlen < len)
622 		num_descriptors++;
623 	if (mtd_desc_nr)
624 		num_descriptors++;
625 
626 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
627 			     is_gso, l4_hdr_offset,
628 			     num_descriptors, hlen, addr, skb->len);
629 
630 	if (mtd_desc_nr) {
631 		idx = (idx + 1) & tx->mask;
632 		mtd_desc = &tx->desc[idx];
633 		gve_tx_fill_mtd_desc(mtd_desc, skb);
634 	}
635 
636 	if (hlen < len) {
637 		/* For gso the rest of the linear portion of the skb needs to
638 		 * be in its own descriptor.
639 		 */
640 		len -= hlen;
641 		addr += hlen;
642 		idx = (idx + 1) & tx->mask;
643 		seg_desc = &tx->desc[idx];
644 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
645 				     skb_shinfo(skb)->gso_size,
646 				     skb_is_gso_v6(skb), is_gso, len, addr);
647 	}
648 
649 	for (i = 0; i < shinfo->nr_frags; i++) {
650 		const skb_frag_t *frag = &shinfo->frags[i];
651 
652 		idx = (idx + 1) & tx->mask;
653 		seg_desc = &tx->desc[idx];
654 		len = skb_frag_size(frag);
655 		addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
656 		if (unlikely(dma_mapping_error(tx->dev, addr))) {
657 			tx->dma_mapping_error++;
658 			goto unmap_drop;
659 		}
660 		tx->info[idx].skb = NULL;
661 		dma_unmap_len_set(&tx->info[idx], len, len);
662 		dma_unmap_addr_set(&tx->info[idx], dma, addr);
663 
664 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
665 				     skb_shinfo(skb)->gso_size,
666 				     skb_is_gso_v6(skb), is_gso, len, addr);
667 	}
668 
669 	return num_descriptors;
670 
671 unmap_drop:
672 	i += num_descriptors - shinfo->nr_frags;
673 	while (i--) {
674 		/* Skip metadata descriptor, if set */
675 		if (i == 1 && mtd_desc_nr == 1)
676 			continue;
677 		idx--;
678 		gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
679 	}
680 drop:
681 	tx->dropped_pkt++;
682 	return 0;
683 }
684 
685 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
686 {
687 	struct gve_priv *priv = netdev_priv(dev);
688 	struct gve_tx_ring *tx;
689 	int nsegs;
690 
691 	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
692 	     "skb queue index out of range");
693 	tx = &priv->tx[skb_get_queue_mapping(skb)];
694 	if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
695 		/* We need to ring the txq doorbell -- we have stopped the Tx
696 		 * queue for want of resources, but prior calls to gve_tx()
697 		 * may have added descriptors without ringing the doorbell.
698 		 */
699 
700 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
701 		return NETDEV_TX_BUSY;
702 	}
703 	if (tx->raw_addressing)
704 		nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
705 	else
706 		nsegs = gve_tx_add_skb_copy(priv, tx, skb);
707 
708 	/* If the packet is getting sent, we need to update the skb */
709 	if (nsegs) {
710 		netdev_tx_sent_queue(tx->netdev_txq, skb->len);
711 		skb_tx_timestamp(skb);
712 		tx->req += nsegs;
713 	} else {
714 		dev_kfree_skb_any(skb);
715 	}
716 
717 	if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
718 		return NETDEV_TX_OK;
719 
720 	/* Give packets to NIC. Even if this packet failed to send the doorbell
721 	 * might need to be rung because of xmit_more.
722 	 */
723 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
724 	return NETDEV_TX_OK;
725 }
726 
727 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
728 			   void *data, int len, void *frame_p, bool is_xsk)
729 {
730 	int pad, nfrags, ndescs, iovi, offset;
731 	struct gve_tx_buffer_state *info;
732 	u32 reqi = tx->req;
733 
734 	pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
735 	if (pad >= GVE_TX_MAX_HEADER_SIZE)
736 		pad = 0;
737 	info = &tx->info[reqi & tx->mask];
738 	info->xdp_frame = frame_p;
739 	info->xdp.size = len;
740 	info->xdp.is_xsk = is_xsk;
741 
742 	nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
743 				   &info->iov[0]);
744 	iovi = pad > 0;
745 	ndescs = nfrags - iovi;
746 	offset = 0;
747 
748 	while (iovi < nfrags) {
749 		if (!offset)
750 			gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
751 					     CHECKSUM_NONE, false, 0, ndescs,
752 					     info->iov[iovi].iov_len,
753 					     info->iov[iovi].iov_offset, len);
754 		else
755 			gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
756 					     0, 0, false, false,
757 					     info->iov[iovi].iov_len,
758 					     info->iov[iovi].iov_offset);
759 
760 		memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
761 		       data + offset, info->iov[iovi].iov_len);
762 		gve_dma_sync_for_device(&priv->pdev->dev,
763 					tx->tx_fifo.qpl->page_buses,
764 					info->iov[iovi].iov_offset,
765 					info->iov[iovi].iov_len);
766 		offset += info->iov[iovi].iov_len;
767 		iovi++;
768 		reqi++;
769 	}
770 
771 	return ndescs;
772 }
773 
774 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
775 		 u32 flags)
776 {
777 	struct gve_priv *priv = netdev_priv(dev);
778 	struct gve_tx_ring *tx;
779 	int i, err = 0, qid;
780 
781 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
782 		return -EINVAL;
783 
784 	qid = gve_xdp_tx_queue_id(priv,
785 				  smp_processor_id() % priv->num_xdp_queues);
786 
787 	tx = &priv->tx[qid];
788 
789 	spin_lock(&tx->xdp_lock);
790 	for (i = 0; i < n; i++) {
791 		err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
792 				       frames[i]->len, frames[i]);
793 		if (err)
794 			break;
795 	}
796 
797 	if (flags & XDP_XMIT_FLUSH)
798 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
799 
800 	spin_unlock(&tx->xdp_lock);
801 
802 	u64_stats_update_begin(&tx->statss);
803 	tx->xdp_xmit += n;
804 	tx->xdp_xmit_errors += n - i;
805 	u64_stats_update_end(&tx->statss);
806 
807 	return i ? i : err;
808 }
809 
810 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
811 		     void *data, int len, void *frame_p)
812 {
813 	int nsegs;
814 
815 	if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1))
816 		return -EBUSY;
817 
818 	nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
819 	tx->req += nsegs;
820 
821 	return 0;
822 }
823 
824 #define GVE_TX_START_THRESH	PAGE_SIZE
825 
826 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
827 			     u32 to_do, bool try_to_wake)
828 {
829 	struct gve_tx_buffer_state *info;
830 	u64 pkts = 0, bytes = 0;
831 	size_t space_freed = 0;
832 	struct sk_buff *skb;
833 	u32 idx;
834 	int j;
835 
836 	for (j = 0; j < to_do; j++) {
837 		idx = tx->done & tx->mask;
838 		netif_info(priv, tx_done, priv->dev,
839 			   "[%d] %s: idx=%d (req=%u done=%u)\n",
840 			   tx->q_num, __func__, idx, tx->req, tx->done);
841 		info = &tx->info[idx];
842 		skb = info->skb;
843 
844 		/* Unmap the buffer */
845 		if (tx->raw_addressing)
846 			gve_tx_unmap_buf(tx->dev, info);
847 		tx->done++;
848 		/* Mark as free */
849 		if (skb) {
850 			info->skb = NULL;
851 			bytes += skb->len;
852 			pkts++;
853 			dev_consume_skb_any(skb);
854 			if (tx->raw_addressing)
855 				continue;
856 			space_freed += gve_tx_clear_buffer_state(info);
857 		}
858 	}
859 
860 	if (!tx->raw_addressing)
861 		gve_tx_free_fifo(&tx->tx_fifo, space_freed);
862 	u64_stats_update_begin(&tx->statss);
863 	tx->bytes_done += bytes;
864 	tx->pkt_done += pkts;
865 	u64_stats_update_end(&tx->statss);
866 	netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
867 
868 	/* start the queue if we've stopped it */
869 #ifndef CONFIG_BQL
870 	/* Make sure that the doorbells are synced */
871 	smp_mb();
872 #endif
873 	if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
874 	    likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
875 		tx->wake_queue++;
876 		netif_tx_wake_queue(tx->netdev_txq);
877 	}
878 
879 	return pkts;
880 }
881 
882 u32 gve_tx_load_event_counter(struct gve_priv *priv,
883 			      struct gve_tx_ring *tx)
884 {
885 	u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
886 	__be32 counter = READ_ONCE(priv->counter_array[counter_index]);
887 
888 	return be32_to_cpu(counter);
889 }
890 
891 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
892 		      int budget)
893 {
894 	struct xdp_desc desc;
895 	int sent = 0, nsegs;
896 	void *data;
897 
898 	spin_lock(&tx->xdp_lock);
899 	while (sent < budget) {
900 		if (!gve_can_tx(tx, GVE_TX_START_THRESH))
901 			goto out;
902 
903 		if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
904 			tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
905 			goto out;
906 		}
907 
908 		data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
909 		nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
910 		tx->req += nsegs;
911 		sent++;
912 	}
913 out:
914 	if (sent > 0) {
915 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
916 		xsk_tx_release(tx->xsk_pool);
917 	}
918 	spin_unlock(&tx->xdp_lock);
919 	return sent;
920 }
921 
922 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
923 {
924 	struct gve_priv *priv = block->priv;
925 	struct gve_tx_ring *tx = block->tx;
926 	u32 nic_done;
927 	bool repoll;
928 	u32 to_do;
929 
930 	/* If budget is 0, do all the work */
931 	if (budget == 0)
932 		budget = INT_MAX;
933 
934 	/* Find out how much work there is to be done */
935 	nic_done = gve_tx_load_event_counter(priv, tx);
936 	to_do = min_t(u32, (nic_done - tx->done), budget);
937 	gve_clean_xdp_done(priv, tx, to_do);
938 	repoll = nic_done != tx->done;
939 
940 	if (tx->xsk_pool) {
941 		int sent = gve_xsk_tx(priv, tx, budget);
942 
943 		u64_stats_update_begin(&tx->statss);
944 		tx->xdp_xsk_sent += sent;
945 		u64_stats_update_end(&tx->statss);
946 		repoll |= (sent == budget);
947 		if (xsk_uses_need_wakeup(tx->xsk_pool))
948 			xsk_set_tx_need_wakeup(tx->xsk_pool);
949 	}
950 
951 	/* If we still have work we want to repoll */
952 	return repoll;
953 }
954 
955 bool gve_tx_poll(struct gve_notify_block *block, int budget)
956 {
957 	struct gve_priv *priv = block->priv;
958 	struct gve_tx_ring *tx = block->tx;
959 	u32 nic_done;
960 	u32 to_do;
961 
962 	/* If budget is 0, do all the work */
963 	if (budget == 0)
964 		budget = INT_MAX;
965 
966 	/* In TX path, it may try to clean completed pkts in order to xmit,
967 	 * to avoid cleaning conflict, use spin_lock(), it yields better
968 	 * concurrency between xmit/clean than netif's lock.
969 	 */
970 	spin_lock(&tx->clean_lock);
971 	/* Find out how much work there is to be done */
972 	nic_done = gve_tx_load_event_counter(priv, tx);
973 	to_do = min_t(u32, (nic_done - tx->done), budget);
974 	gve_clean_tx_done(priv, tx, to_do, true);
975 	spin_unlock(&tx->clean_lock);
976 	/* If we still have work we want to repoll */
977 	return nic_done != tx->done;
978 }
979 
980 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
981 {
982 	u32 nic_done = gve_tx_load_event_counter(priv, tx);
983 
984 	return nic_done != tx->done;
985 }
986