xref: /linux/drivers/net/ethernet/google/gve/gve_tx.c (revision 9fc31a9251de4acaab2d0704450d70ddc99f5ea2)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15 
16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 				       struct gve_queue_resources *q_resources,
18 				       u32 val)
19 {
20 	iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22 
23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 	u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 	struct gve_tx_ring *tx = &priv->tx[tx_qid];
27 
28 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30 
31 /* gvnic can only transmit from a Registered Segment.
32  * We copy skb payloads into the registered segment before writing Tx
33  * descriptors and ringing the Tx doorbell.
34  *
35  * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36  * free allocations in the order they were allocated.
37  */
38 
39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 	fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 			  PAGE_KERNEL);
43 	if (unlikely(!fifo->base)) {
44 		netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 			  fifo->qpl->id);
46 		return -ENOMEM;
47 	}
48 
49 	fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 	atomic_set(&fifo->available, fifo->size);
51 	fifo->head = 0;
52 	return 0;
53 }
54 
55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 	WARN(atomic_read(&fifo->available) != fifo->size,
58 	     "Releasing non-empty fifo");
59 
60 	vunmap(fifo->base);
61 }
62 
63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 					  size_t bytes)
65 {
66 	return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68 
69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 	return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73 
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75  * @fifo: FIFO to allocate from
76  * @bytes: Allocation size
77  * @iov: Scatter-gather elements to fill with allocation fragment base/len
78  *
79  * Returns number of valid elements in iov[] or negative on error.
80  *
81  * Allocations from a given FIFO must be externally synchronized but concurrent
82  * allocation and frees are allowed.
83  */
84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 			     struct gve_tx_iovec iov[2])
86 {
87 	size_t overflow, padding;
88 	u32 aligned_head;
89 	int nfrags = 0;
90 
91 	if (!bytes)
92 		return 0;
93 
94 	/* This check happens before we know how much padding is needed to
95 	 * align to a cacheline boundary for the payload, but that is fine,
96 	 * because the FIFO head always start aligned, and the FIFO's boundaries
97 	 * are aligned, so if there is space for the data, there is space for
98 	 * the padding to the next alignment.
99 	 */
100 	WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 	     "Reached %s when there's not enough space in the fifo", __func__);
102 
103 	nfrags++;
104 
105 	iov[0].iov_offset = fifo->head;
106 	iov[0].iov_len = bytes;
107 	fifo->head += bytes;
108 
109 	if (fifo->head > fifo->size) {
110 		/* If the allocation did not fit in the tail fragment of the
111 		 * FIFO, also use the head fragment.
112 		 */
113 		nfrags++;
114 		overflow = fifo->head - fifo->size;
115 		iov[0].iov_len -= overflow;
116 		iov[1].iov_offset = 0;	/* Start of fifo*/
117 		iov[1].iov_len = overflow;
118 
119 		fifo->head = overflow;
120 	}
121 
122 	/* Re-align to a cacheline boundary */
123 	aligned_head = L1_CACHE_ALIGN(fifo->head);
124 	padding = aligned_head - fifo->head;
125 	iov[nfrags - 1].iov_padding = padding;
126 	atomic_sub(bytes + padding, &fifo->available);
127 	fifo->head = aligned_head;
128 
129 	if (fifo->head == fifo->size)
130 		fifo->head = 0;
131 
132 	return nfrags;
133 }
134 
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136  * @fifo: FIFO to return fragments to
137  * @bytes: Bytes to free
138  */
139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 	atomic_add(bytes, &fifo->available);
142 }
143 
144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 	size_t space_freed = 0;
147 	int i;
148 
149 	for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 		space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 		info->iov[i].iov_len = 0;
152 		info->iov[i].iov_padding = 0;
153 	}
154 	return space_freed;
155 }
156 
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 			      u32 to_do)
159 {
160 	struct gve_tx_buffer_state *info;
161 	u32 clean_end = tx->done + to_do;
162 	u64 pkts = 0, bytes = 0;
163 	size_t space_freed = 0;
164 	u32 xsk_complete = 0;
165 	u32 idx;
166 
167 	for (; tx->done < clean_end; tx->done++) {
168 		idx = tx->done & tx->mask;
169 		info = &tx->info[idx];
170 
171 		if (unlikely(!info->xdp.size))
172 			continue;
173 
174 		bytes += info->xdp.size;
175 		pkts++;
176 		xsk_complete += info->xdp.is_xsk;
177 
178 		info->xdp.size = 0;
179 		if (info->xdp_frame) {
180 			xdp_return_frame(info->xdp_frame);
181 			info->xdp_frame = NULL;
182 		}
183 		space_freed += gve_tx_clear_buffer_state(info);
184 	}
185 
186 	gve_tx_free_fifo(&tx->tx_fifo, space_freed);
187 	if (xsk_complete > 0 && tx->xsk_pool)
188 		xsk_tx_completed(tx->xsk_pool, xsk_complete);
189 	u64_stats_update_begin(&tx->statss);
190 	tx->bytes_done += bytes;
191 	tx->pkt_done += pkts;
192 	u64_stats_update_end(&tx->statss);
193 	return pkts;
194 }
195 
196 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
197 			     u32 to_do, bool try_to_wake);
198 
199 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
200 {
201 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
202 	struct gve_tx_ring *tx = &priv->tx[idx];
203 
204 	if (!gve_tx_was_added_to_block(priv, idx))
205 		return;
206 
207 	gve_remove_napi(priv, ntfy_idx);
208 	gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
209 	netdev_tx_reset_queue(tx->netdev_txq);
210 	gve_tx_remove_from_block(priv, idx);
211 }
212 
213 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
214 				 struct gve_tx_alloc_rings_cfg *cfg)
215 {
216 	struct device *hdev = &priv->pdev->dev;
217 	int idx = tx->q_num;
218 	size_t bytes;
219 	u32 slots;
220 
221 	slots = tx->mask + 1;
222 	dma_free_coherent(hdev, sizeof(*tx->q_resources),
223 			  tx->q_resources, tx->q_resources_bus);
224 	tx->q_resources = NULL;
225 
226 	if (!tx->raw_addressing) {
227 		gve_tx_fifo_release(priv, &tx->tx_fifo);
228 		tx->tx_fifo.qpl = NULL;
229 	}
230 
231 	bytes = sizeof(*tx->desc) * slots;
232 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
233 	tx->desc = NULL;
234 
235 	vfree(tx->info);
236 	tx->info = NULL;
237 
238 	netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
239 }
240 
241 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
242 {
243 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
244 	struct gve_tx_ring *tx = &priv->tx[idx];
245 
246 	gve_tx_add_to_block(priv, idx);
247 
248 	tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
249 	gve_add_napi(priv, ntfy_idx, gve_napi_poll);
250 }
251 
252 static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
253 				 struct gve_tx_alloc_rings_cfg *cfg,
254 				 struct gve_tx_ring *tx,
255 				 int idx)
256 {
257 	struct device *hdev = &priv->pdev->dev;
258 	size_t bytes;
259 
260 	/* Make sure everything is zeroed to start */
261 	memset(tx, 0, sizeof(*tx));
262 	spin_lock_init(&tx->clean_lock);
263 	spin_lock_init(&tx->xdp_lock);
264 	tx->q_num = idx;
265 
266 	tx->mask = cfg->ring_size - 1;
267 
268 	/* alloc metadata */
269 	tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
270 	if (!tx->info)
271 		return -ENOMEM;
272 
273 	/* alloc tx queue */
274 	bytes = sizeof(*tx->desc) * cfg->ring_size;
275 	tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
276 	if (!tx->desc)
277 		goto abort_with_info;
278 
279 	tx->raw_addressing = cfg->raw_addressing;
280 	tx->dev = hdev;
281 	if (!tx->raw_addressing) {
282 		u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
283 
284 		tx->tx_fifo.qpl = &cfg->qpls[qpl_id];
285 		/* map Tx FIFO */
286 		if (gve_tx_fifo_init(priv, &tx->tx_fifo))
287 			goto abort_with_desc;
288 	}
289 
290 	tx->q_resources =
291 		dma_alloc_coherent(hdev,
292 				   sizeof(*tx->q_resources),
293 				   &tx->q_resources_bus,
294 				   GFP_KERNEL);
295 	if (!tx->q_resources)
296 		goto abort_with_fifo;
297 
298 	return 0;
299 
300 abort_with_fifo:
301 	if (!tx->raw_addressing)
302 		gve_tx_fifo_release(priv, &tx->tx_fifo);
303 abort_with_desc:
304 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
305 	tx->desc = NULL;
306 abort_with_info:
307 	vfree(tx->info);
308 	tx->info = NULL;
309 	return -ENOMEM;
310 }
311 
312 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
313 			   struct gve_tx_alloc_rings_cfg *cfg)
314 {
315 	struct gve_tx_ring *tx = cfg->tx;
316 	int err = 0;
317 	int i, j;
318 
319 	if (!cfg->raw_addressing && !cfg->qpls) {
320 		netif_err(priv, drv, priv->dev,
321 			  "Cannot alloc QPL ring before allocing QPLs\n");
322 		return -EINVAL;
323 	}
324 
325 	if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
326 		netif_err(priv, drv, priv->dev,
327 			  "Cannot alloc more than the max num of Tx rings\n");
328 		return -EINVAL;
329 	}
330 
331 	if (cfg->start_idx == 0) {
332 		tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
333 			      GFP_KERNEL);
334 		if (!tx)
335 			return -ENOMEM;
336 	} else if (!tx) {
337 		netif_err(priv, drv, priv->dev,
338 			  "Cannot alloc tx rings from a nonzero start idx without tx array\n");
339 		return -EINVAL;
340 	}
341 
342 	for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
343 		err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
344 		if (err) {
345 			netif_err(priv, drv, priv->dev,
346 				  "Failed to alloc tx ring=%d: err=%d\n",
347 				  i, err);
348 			goto cleanup;
349 		}
350 	}
351 
352 	cfg->tx = tx;
353 	return 0;
354 
355 cleanup:
356 	for (j = 0; j < i; j++)
357 		gve_tx_free_ring_gqi(priv, &tx[j], cfg);
358 	if (cfg->start_idx == 0)
359 		kvfree(tx);
360 	return err;
361 }
362 
363 void gve_tx_free_rings_gqi(struct gve_priv *priv,
364 			   struct gve_tx_alloc_rings_cfg *cfg)
365 {
366 	struct gve_tx_ring *tx = cfg->tx;
367 	int i;
368 
369 	if (!tx)
370 		return;
371 
372 	for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
373 		gve_tx_free_ring_gqi(priv, &tx[i], cfg);
374 
375 	if (cfg->start_idx == 0) {
376 		kvfree(tx);
377 		cfg->tx = NULL;
378 	}
379 }
380 
381 /* gve_tx_avail - Calculates the number of slots available in the ring
382  * @tx: tx ring to check
383  *
384  * Returns the number of slots available
385  *
386  * The capacity of the queue is mask + 1. We don't need to reserve an entry.
387  **/
388 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
389 {
390 	return tx->mask + 1 - (tx->req - tx->done);
391 }
392 
393 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
394 					      struct sk_buff *skb)
395 {
396 	int pad_bytes, align_hdr_pad;
397 	int bytes;
398 	int hlen;
399 
400 	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
401 				 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
402 
403 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
404 						   hlen);
405 	/* We need to take into account the header alignment padding. */
406 	align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
407 	bytes = align_hdr_pad + pad_bytes + skb->len;
408 
409 	return bytes;
410 }
411 
412 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
413  * 1 for each skb frag
414  * 1 for the skb linear portion
415  * 1 for when tcp hdr needs to be in separate descriptor
416  * 1 if the payload wraps to the beginning of the FIFO
417  * 1 for metadata descriptor
418  */
419 #define MAX_TX_DESC_NEEDED	(MAX_SKB_FRAGS + 4)
420 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
421 {
422 	if (info->skb) {
423 		dma_unmap_single(dev, dma_unmap_addr(info, dma),
424 				 dma_unmap_len(info, len),
425 				 DMA_TO_DEVICE);
426 		dma_unmap_len_set(info, len, 0);
427 	} else {
428 		dma_unmap_page(dev, dma_unmap_addr(info, dma),
429 			       dma_unmap_len(info, len),
430 			       DMA_TO_DEVICE);
431 		dma_unmap_len_set(info, len, 0);
432 	}
433 }
434 
435 /* Check if sufficient resources (descriptor ring space, FIFO space) are
436  * available to transmit the given number of bytes.
437  */
438 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
439 {
440 	bool can_alloc = true;
441 
442 	if (!tx->raw_addressing)
443 		can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
444 
445 	return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
446 }
447 
448 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
449 
450 /* Stops the queue if the skb cannot be transmitted. */
451 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
452 			     struct sk_buff *skb)
453 {
454 	int bytes_required = 0;
455 	u32 nic_done;
456 	u32 to_do;
457 	int ret;
458 
459 	if (!tx->raw_addressing)
460 		bytes_required = gve_skb_fifo_bytes_required(tx, skb);
461 
462 	if (likely(gve_can_tx(tx, bytes_required)))
463 		return 0;
464 
465 	ret = -EBUSY;
466 	spin_lock(&tx->clean_lock);
467 	nic_done = gve_tx_load_event_counter(priv, tx);
468 	to_do = nic_done - tx->done;
469 
470 	/* Only try to clean if there is hope for TX */
471 	if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
472 		if (to_do > 0) {
473 			to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
474 			gve_clean_tx_done(priv, tx, to_do, false);
475 		}
476 		if (likely(gve_can_tx(tx, bytes_required)))
477 			ret = 0;
478 	}
479 	if (ret) {
480 		/* No space, so stop the queue */
481 		tx->stop_queue++;
482 		netif_tx_stop_queue(tx->netdev_txq);
483 	}
484 	spin_unlock(&tx->clean_lock);
485 
486 	return ret;
487 }
488 
489 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
490 				 u16 csum_offset, u8 ip_summed, bool is_gso,
491 				 int l4_hdr_offset, u32 desc_cnt,
492 				 u16 hlen, u64 addr, u16 pkt_len)
493 {
494 	/* l4_hdr_offset and csum_offset are in units of 16-bit words */
495 	if (is_gso) {
496 		pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
497 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
498 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
499 	} else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
500 		pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
501 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
502 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
503 	} else {
504 		pkt_desc->pkt.type_flags = GVE_TXD_STD;
505 		pkt_desc->pkt.l4_csum_offset = 0;
506 		pkt_desc->pkt.l4_hdr_offset = 0;
507 	}
508 	pkt_desc->pkt.desc_cnt = desc_cnt;
509 	pkt_desc->pkt.len = cpu_to_be16(pkt_len);
510 	pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
511 	pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
512 }
513 
514 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
515 				 struct sk_buff *skb)
516 {
517 	BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
518 
519 	mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
520 	mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
521 				   GVE_MTD_PATH_HASH_L4;
522 	mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
523 	mtd_desc->mtd.reserved0 = 0;
524 	mtd_desc->mtd.reserved1 = 0;
525 }
526 
527 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
528 				 u16 l3_offset, u16 gso_size,
529 				 bool is_gso_v6, bool is_gso,
530 				 u16 len, u64 addr)
531 {
532 	seg_desc->seg.type_flags = GVE_TXD_SEG;
533 	if (is_gso) {
534 		if (is_gso_v6)
535 			seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
536 		seg_desc->seg.l3_offset = l3_offset >> 1;
537 		seg_desc->seg.mss = cpu_to_be16(gso_size);
538 	}
539 	seg_desc->seg.seg_len = cpu_to_be16(len);
540 	seg_desc->seg.seg_addr = cpu_to_be64(addr);
541 }
542 
543 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
544 				    u64 iov_offset, u64 iov_len)
545 {
546 	u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
547 	u64 first_page = iov_offset / PAGE_SIZE;
548 	u64 page;
549 
550 	for (page = first_page; page <= last_page; page++)
551 		dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
552 }
553 
554 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
555 {
556 	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
557 	union gve_tx_desc *pkt_desc, *seg_desc;
558 	struct gve_tx_buffer_state *info;
559 	int mtd_desc_nr = !!skb->l4_hash;
560 	bool is_gso = skb_is_gso(skb);
561 	u32 idx = tx->req & tx->mask;
562 	int payload_iov = 2;
563 	int copy_offset;
564 	u32 next_idx;
565 	int i;
566 
567 	info = &tx->info[idx];
568 	pkt_desc = &tx->desc[idx];
569 
570 	l4_hdr_offset = skb_checksum_start_offset(skb);
571 	/* If the skb is gso, then we want the tcp header alone in the first segment
572 	 * otherwise we want the minimum required by the gVNIC spec.
573 	 */
574 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
575 			min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
576 
577 	info->skb =  skb;
578 	/* We don't want to split the header, so if necessary, pad to the end
579 	 * of the fifo and then put the header at the beginning of the fifo.
580 	 */
581 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
582 	hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
583 				       &info->iov[0]);
584 	WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
585 	payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
586 					   &info->iov[payload_iov]);
587 
588 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
589 			     is_gso, l4_hdr_offset,
590 			     1 + mtd_desc_nr + payload_nfrags, hlen,
591 			     info->iov[hdr_nfrags - 1].iov_offset, skb->len);
592 
593 	skb_copy_bits(skb, 0,
594 		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
595 		      hlen);
596 	gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
597 				info->iov[hdr_nfrags - 1].iov_offset,
598 				info->iov[hdr_nfrags - 1].iov_len);
599 	copy_offset = hlen;
600 
601 	if (mtd_desc_nr) {
602 		next_idx = (tx->req + 1) & tx->mask;
603 		gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
604 	}
605 
606 	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
607 		next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
608 		seg_desc = &tx->desc[next_idx];
609 
610 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
611 				     skb_shinfo(skb)->gso_size,
612 				     skb_is_gso_v6(skb), is_gso,
613 				     info->iov[i].iov_len,
614 				     info->iov[i].iov_offset);
615 
616 		skb_copy_bits(skb, copy_offset,
617 			      tx->tx_fifo.base + info->iov[i].iov_offset,
618 			      info->iov[i].iov_len);
619 		gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
620 					info->iov[i].iov_offset,
621 					info->iov[i].iov_len);
622 		copy_offset += info->iov[i].iov_len;
623 	}
624 
625 	return 1 + mtd_desc_nr + payload_nfrags;
626 }
627 
628 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
629 				  struct sk_buff *skb)
630 {
631 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
632 	int hlen, num_descriptors, l4_hdr_offset;
633 	union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
634 	struct gve_tx_buffer_state *info;
635 	int mtd_desc_nr = !!skb->l4_hash;
636 	bool is_gso = skb_is_gso(skb);
637 	u32 idx = tx->req & tx->mask;
638 	u64 addr;
639 	u32 len;
640 	int i;
641 
642 	info = &tx->info[idx];
643 	pkt_desc = &tx->desc[idx];
644 
645 	l4_hdr_offset = skb_checksum_start_offset(skb);
646 	/* If the skb is gso, then we want only up to the tcp header in the first segment
647 	 * to efficiently replicate on each segment otherwise we want the linear portion
648 	 * of the skb (which will contain the checksum because skb->csum_start and
649 	 * skb->csum_offset are given relative to skb->head) in the first segment.
650 	 */
651 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
652 	len = skb_headlen(skb);
653 
654 	info->skb =  skb;
655 
656 	addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
657 	if (unlikely(dma_mapping_error(tx->dev, addr))) {
658 		tx->dma_mapping_error++;
659 		goto drop;
660 	}
661 	dma_unmap_len_set(info, len, len);
662 	dma_unmap_addr_set(info, dma, addr);
663 
664 	num_descriptors = 1 + shinfo->nr_frags;
665 	if (hlen < len)
666 		num_descriptors++;
667 	if (mtd_desc_nr)
668 		num_descriptors++;
669 
670 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
671 			     is_gso, l4_hdr_offset,
672 			     num_descriptors, hlen, addr, skb->len);
673 
674 	if (mtd_desc_nr) {
675 		idx = (idx + 1) & tx->mask;
676 		mtd_desc = &tx->desc[idx];
677 		gve_tx_fill_mtd_desc(mtd_desc, skb);
678 	}
679 
680 	if (hlen < len) {
681 		/* For gso the rest of the linear portion of the skb needs to
682 		 * be in its own descriptor.
683 		 */
684 		len -= hlen;
685 		addr += hlen;
686 		idx = (idx + 1) & tx->mask;
687 		seg_desc = &tx->desc[idx];
688 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
689 				     skb_shinfo(skb)->gso_size,
690 				     skb_is_gso_v6(skb), is_gso, len, addr);
691 	}
692 
693 	for (i = 0; i < shinfo->nr_frags; i++) {
694 		const skb_frag_t *frag = &shinfo->frags[i];
695 
696 		idx = (idx + 1) & tx->mask;
697 		seg_desc = &tx->desc[idx];
698 		len = skb_frag_size(frag);
699 		addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
700 		if (unlikely(dma_mapping_error(tx->dev, addr))) {
701 			tx->dma_mapping_error++;
702 			goto unmap_drop;
703 		}
704 		tx->info[idx].skb = NULL;
705 		dma_unmap_len_set(&tx->info[idx], len, len);
706 		dma_unmap_addr_set(&tx->info[idx], dma, addr);
707 
708 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
709 				     skb_shinfo(skb)->gso_size,
710 				     skb_is_gso_v6(skb), is_gso, len, addr);
711 	}
712 
713 	return num_descriptors;
714 
715 unmap_drop:
716 	i += num_descriptors - shinfo->nr_frags;
717 	while (i--) {
718 		/* Skip metadata descriptor, if set */
719 		if (i == 1 && mtd_desc_nr == 1)
720 			continue;
721 		idx--;
722 		gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
723 	}
724 drop:
725 	tx->dropped_pkt++;
726 	return 0;
727 }
728 
729 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
730 {
731 	struct gve_priv *priv = netdev_priv(dev);
732 	struct gve_tx_ring *tx;
733 	int nsegs;
734 
735 	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
736 	     "skb queue index out of range");
737 	tx = &priv->tx[skb_get_queue_mapping(skb)];
738 	if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
739 		/* We need to ring the txq doorbell -- we have stopped the Tx
740 		 * queue for want of resources, but prior calls to gve_tx()
741 		 * may have added descriptors without ringing the doorbell.
742 		 */
743 
744 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
745 		return NETDEV_TX_BUSY;
746 	}
747 	if (tx->raw_addressing)
748 		nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
749 	else
750 		nsegs = gve_tx_add_skb_copy(priv, tx, skb);
751 
752 	/* If the packet is getting sent, we need to update the skb */
753 	if (nsegs) {
754 		netdev_tx_sent_queue(tx->netdev_txq, skb->len);
755 		skb_tx_timestamp(skb);
756 		tx->req += nsegs;
757 	} else {
758 		dev_kfree_skb_any(skb);
759 	}
760 
761 	if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
762 		return NETDEV_TX_OK;
763 
764 	/* Give packets to NIC. Even if this packet failed to send the doorbell
765 	 * might need to be rung because of xmit_more.
766 	 */
767 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
768 	return NETDEV_TX_OK;
769 }
770 
771 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
772 			   void *data, int len, void *frame_p, bool is_xsk)
773 {
774 	int pad, nfrags, ndescs, iovi, offset;
775 	struct gve_tx_buffer_state *info;
776 	u32 reqi = tx->req;
777 
778 	pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
779 	if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
780 		pad = 0;
781 	info = &tx->info[reqi & tx->mask];
782 	info->xdp_frame = frame_p;
783 	info->xdp.size = len;
784 	info->xdp.is_xsk = is_xsk;
785 
786 	nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
787 				   &info->iov[0]);
788 	iovi = pad > 0;
789 	ndescs = nfrags - iovi;
790 	offset = 0;
791 
792 	while (iovi < nfrags) {
793 		if (!offset)
794 			gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
795 					     CHECKSUM_NONE, false, 0, ndescs,
796 					     info->iov[iovi].iov_len,
797 					     info->iov[iovi].iov_offset, len);
798 		else
799 			gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
800 					     0, 0, false, false,
801 					     info->iov[iovi].iov_len,
802 					     info->iov[iovi].iov_offset);
803 
804 		memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
805 		       data + offset, info->iov[iovi].iov_len);
806 		gve_dma_sync_for_device(&priv->pdev->dev,
807 					tx->tx_fifo.qpl->page_buses,
808 					info->iov[iovi].iov_offset,
809 					info->iov[iovi].iov_len);
810 		offset += info->iov[iovi].iov_len;
811 		iovi++;
812 		reqi++;
813 	}
814 
815 	return ndescs;
816 }
817 
818 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
819 		 u32 flags)
820 {
821 	struct gve_priv *priv = netdev_priv(dev);
822 	struct gve_tx_ring *tx;
823 	int i, err = 0, qid;
824 
825 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
826 		return -EINVAL;
827 
828 	qid = gve_xdp_tx_queue_id(priv,
829 				  smp_processor_id() % priv->num_xdp_queues);
830 
831 	tx = &priv->tx[qid];
832 
833 	spin_lock(&tx->xdp_lock);
834 	for (i = 0; i < n; i++) {
835 		err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
836 				       frames[i]->len, frames[i]);
837 		if (err)
838 			break;
839 	}
840 
841 	if (flags & XDP_XMIT_FLUSH)
842 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
843 
844 	spin_unlock(&tx->xdp_lock);
845 
846 	u64_stats_update_begin(&tx->statss);
847 	tx->xdp_xmit += n;
848 	tx->xdp_xmit_errors += n - i;
849 	u64_stats_update_end(&tx->statss);
850 
851 	return i ? i : err;
852 }
853 
854 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
855 		     void *data, int len, void *frame_p)
856 {
857 	int nsegs;
858 
859 	if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
860 		return -EBUSY;
861 
862 	nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
863 	tx->req += nsegs;
864 
865 	return 0;
866 }
867 
868 #define GVE_TX_START_THRESH	4096
869 
870 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
871 			     u32 to_do, bool try_to_wake)
872 {
873 	struct gve_tx_buffer_state *info;
874 	u64 pkts = 0, bytes = 0;
875 	size_t space_freed = 0;
876 	struct sk_buff *skb;
877 	u32 idx;
878 	int j;
879 
880 	for (j = 0; j < to_do; j++) {
881 		idx = tx->done & tx->mask;
882 		netif_info(priv, tx_done, priv->dev,
883 			   "[%d] %s: idx=%d (req=%u done=%u)\n",
884 			   tx->q_num, __func__, idx, tx->req, tx->done);
885 		info = &tx->info[idx];
886 		skb = info->skb;
887 
888 		/* Unmap the buffer */
889 		if (tx->raw_addressing)
890 			gve_tx_unmap_buf(tx->dev, info);
891 		tx->done++;
892 		/* Mark as free */
893 		if (skb) {
894 			info->skb = NULL;
895 			bytes += skb->len;
896 			pkts++;
897 			dev_consume_skb_any(skb);
898 			if (tx->raw_addressing)
899 				continue;
900 			space_freed += gve_tx_clear_buffer_state(info);
901 		}
902 	}
903 
904 	if (!tx->raw_addressing)
905 		gve_tx_free_fifo(&tx->tx_fifo, space_freed);
906 	u64_stats_update_begin(&tx->statss);
907 	tx->bytes_done += bytes;
908 	tx->pkt_done += pkts;
909 	u64_stats_update_end(&tx->statss);
910 	netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
911 
912 	/* start the queue if we've stopped it */
913 #ifndef CONFIG_BQL
914 	/* Make sure that the doorbells are synced */
915 	smp_mb();
916 #endif
917 	if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
918 	    likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
919 		tx->wake_queue++;
920 		netif_tx_wake_queue(tx->netdev_txq);
921 	}
922 
923 	return pkts;
924 }
925 
926 u32 gve_tx_load_event_counter(struct gve_priv *priv,
927 			      struct gve_tx_ring *tx)
928 {
929 	u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
930 	__be32 counter = READ_ONCE(priv->counter_array[counter_index]);
931 
932 	return be32_to_cpu(counter);
933 }
934 
935 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
936 		      int budget)
937 {
938 	struct xdp_desc desc;
939 	int sent = 0, nsegs;
940 	void *data;
941 
942 	spin_lock(&tx->xdp_lock);
943 	while (sent < budget) {
944 		if (!gve_can_tx(tx, GVE_TX_START_THRESH))
945 			goto out;
946 
947 		if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
948 			tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
949 			goto out;
950 		}
951 
952 		data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
953 		nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
954 		tx->req += nsegs;
955 		sent++;
956 	}
957 out:
958 	if (sent > 0) {
959 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
960 		xsk_tx_release(tx->xsk_pool);
961 	}
962 	spin_unlock(&tx->xdp_lock);
963 	return sent;
964 }
965 
966 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
967 {
968 	struct gve_priv *priv = block->priv;
969 	struct gve_tx_ring *tx = block->tx;
970 	u32 nic_done;
971 	bool repoll;
972 	u32 to_do;
973 
974 	/* Find out how much work there is to be done */
975 	nic_done = gve_tx_load_event_counter(priv, tx);
976 	to_do = min_t(u32, (nic_done - tx->done), budget);
977 	gve_clean_xdp_done(priv, tx, to_do);
978 	repoll = nic_done != tx->done;
979 
980 	if (tx->xsk_pool) {
981 		int sent = gve_xsk_tx(priv, tx, budget);
982 
983 		u64_stats_update_begin(&tx->statss);
984 		tx->xdp_xsk_sent += sent;
985 		u64_stats_update_end(&tx->statss);
986 		repoll |= (sent == budget);
987 		if (xsk_uses_need_wakeup(tx->xsk_pool))
988 			xsk_set_tx_need_wakeup(tx->xsk_pool);
989 	}
990 
991 	/* If we still have work we want to repoll */
992 	return repoll;
993 }
994 
995 bool gve_tx_poll(struct gve_notify_block *block, int budget)
996 {
997 	struct gve_priv *priv = block->priv;
998 	struct gve_tx_ring *tx = block->tx;
999 	u32 nic_done;
1000 	u32 to_do;
1001 
1002 	/* If budget is 0, do all the work */
1003 	if (budget == 0)
1004 		budget = INT_MAX;
1005 
1006 	/* In TX path, it may try to clean completed pkts in order to xmit,
1007 	 * to avoid cleaning conflict, use spin_lock(), it yields better
1008 	 * concurrency between xmit/clean than netif's lock.
1009 	 */
1010 	spin_lock(&tx->clean_lock);
1011 	/* Find out how much work there is to be done */
1012 	nic_done = gve_tx_load_event_counter(priv, tx);
1013 	to_do = min_t(u32, (nic_done - tx->done), budget);
1014 	gve_clean_tx_done(priv, tx, to_do, true);
1015 	spin_unlock(&tx->clean_lock);
1016 	/* If we still have work we want to repoll */
1017 	return nic_done != tx->done;
1018 }
1019 
1020 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1021 {
1022 	u32 nic_done = gve_tx_load_event_counter(priv, tx);
1023 
1024 	return nic_done != tx->done;
1025 }
1026