xref: /linux/drivers/net/ethernet/google/gve/gve_tx.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15 
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 				       struct gve_queue_resources *q_resources,
18 				       u32 val)
19 {
20 	iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22 
gve_xdp_tx_flush(struct gve_priv * priv,u32 xdp_qid)23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 	u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 	struct gve_tx_ring *tx = &priv->tx[tx_qid];
27 
28 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30 
31 /* gvnic can only transmit from a Registered Segment.
32  * We copy skb payloads into the registered segment before writing Tx
33  * descriptors and ringing the Tx doorbell.
34  *
35  * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36  * free allocations in the order they were allocated.
37  */
38 
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 	fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 			  PAGE_KERNEL);
43 	if (unlikely(!fifo->base)) {
44 		netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 			  fifo->qpl->id);
46 		return -ENOMEM;
47 	}
48 
49 	fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 	atomic_set(&fifo->available, fifo->size);
51 	fifo->head = 0;
52 	return 0;
53 }
54 
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 	WARN(atomic_read(&fifo->available) != fifo->size,
58 	     "Releasing non-empty fifo");
59 
60 	vunmap(fifo->base);
61 }
62 
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 					  size_t bytes)
65 {
66 	return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68 
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 	return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73 
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75  * @fifo: FIFO to allocate from
76  * @bytes: Allocation size
77  * @iov: Scatter-gather elements to fill with allocation fragment base/len
78  *
79  * Returns number of valid elements in iov[] or negative on error.
80  *
81  * Allocations from a given FIFO must be externally synchronized but concurrent
82  * allocation and frees are allowed.
83  */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 			     struct gve_tx_iovec iov[2])
86 {
87 	size_t overflow, padding;
88 	u32 aligned_head;
89 	int nfrags = 0;
90 
91 	if (!bytes)
92 		return 0;
93 
94 	/* This check happens before we know how much padding is needed to
95 	 * align to a cacheline boundary for the payload, but that is fine,
96 	 * because the FIFO head always start aligned, and the FIFO's boundaries
97 	 * are aligned, so if there is space for the data, there is space for
98 	 * the padding to the next alignment.
99 	 */
100 	WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 	     "Reached %s when there's not enough space in the fifo", __func__);
102 
103 	nfrags++;
104 
105 	iov[0].iov_offset = fifo->head;
106 	iov[0].iov_len = bytes;
107 	fifo->head += bytes;
108 
109 	if (fifo->head > fifo->size) {
110 		/* If the allocation did not fit in the tail fragment of the
111 		 * FIFO, also use the head fragment.
112 		 */
113 		nfrags++;
114 		overflow = fifo->head - fifo->size;
115 		iov[0].iov_len -= overflow;
116 		iov[1].iov_offset = 0;	/* Start of fifo*/
117 		iov[1].iov_len = overflow;
118 
119 		fifo->head = overflow;
120 	}
121 
122 	/* Re-align to a cacheline boundary */
123 	aligned_head = L1_CACHE_ALIGN(fifo->head);
124 	padding = aligned_head - fifo->head;
125 	iov[nfrags - 1].iov_padding = padding;
126 	atomic_sub(bytes + padding, &fifo->available);
127 	fifo->head = aligned_head;
128 
129 	if (fifo->head == fifo->size)
130 		fifo->head = 0;
131 
132 	return nfrags;
133 }
134 
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136  * @fifo: FIFO to return fragments to
137  * @bytes: Bytes to free
138  */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 	atomic_add(bytes, &fifo->available);
142 }
143 
gve_tx_clear_buffer_state(struct gve_tx_buffer_state * info)144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 	size_t space_freed = 0;
147 	int i;
148 
149 	for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 		space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 		info->iov[i].iov_len = 0;
152 		info->iov[i].iov_padding = 0;
153 	}
154 	return space_freed;
155 }
156 
gve_clean_xdp_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do)157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 			      u32 to_do)
159 {
160 	struct gve_tx_buffer_state *info;
161 	u64 pkts = 0, bytes = 0;
162 	size_t space_freed = 0;
163 	u32 xsk_complete = 0;
164 	u32 idx;
165 	int i;
166 
167 	for (i = 0; i < to_do; i++) {
168 		idx = tx->done & tx->mask;
169 		info = &tx->info[idx];
170 		tx->done++;
171 
172 		if (unlikely(!info->xdp.size))
173 			continue;
174 
175 		bytes += info->xdp.size;
176 		pkts++;
177 		xsk_complete += info->xdp.is_xsk;
178 
179 		info->xdp.size = 0;
180 		if (info->xdp_frame) {
181 			xdp_return_frame(info->xdp_frame);
182 			info->xdp_frame = NULL;
183 		}
184 		space_freed += gve_tx_clear_buffer_state(info);
185 	}
186 
187 	gve_tx_free_fifo(&tx->tx_fifo, space_freed);
188 	if (xsk_complete > 0 && tx->xsk_pool)
189 		xsk_tx_completed(tx->xsk_pool, xsk_complete);
190 	u64_stats_update_begin(&tx->statss);
191 	tx->bytes_done += bytes;
192 	tx->pkt_done += pkts;
193 	u64_stats_update_end(&tx->statss);
194 	return pkts;
195 }
196 
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
198 			     u32 to_do, bool try_to_wake);
199 
gve_tx_stop_ring_gqi(struct gve_priv * priv,int idx)200 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
201 {
202 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
203 	struct gve_tx_ring *tx = &priv->tx[idx];
204 
205 	if (!gve_tx_was_added_to_block(priv, idx))
206 		return;
207 
208 	gve_remove_napi(priv, ntfy_idx);
209 	if (tx->q_num < priv->tx_cfg.num_queues)
210 		gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
211 	else
212 		gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
213 	netdev_tx_reset_queue(tx->netdev_txq);
214 	gve_tx_remove_from_block(priv, idx);
215 }
216 
gve_tx_free_ring_gqi(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg)217 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
218 				 struct gve_tx_alloc_rings_cfg *cfg)
219 {
220 	struct device *hdev = &priv->pdev->dev;
221 	int idx = tx->q_num;
222 	size_t bytes;
223 	u32 qpl_id;
224 	u32 slots;
225 
226 	slots = tx->mask + 1;
227 	dma_free_coherent(hdev, sizeof(*tx->q_resources),
228 			  tx->q_resources, tx->q_resources_bus);
229 	tx->q_resources = NULL;
230 
231 	if (tx->tx_fifo.qpl) {
232 		if (tx->tx_fifo.base)
233 			gve_tx_fifo_release(priv, &tx->tx_fifo);
234 
235 		qpl_id = gve_tx_qpl_id(priv, tx->q_num);
236 		gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
237 		tx->tx_fifo.qpl = NULL;
238 	}
239 
240 	bytes = sizeof(*tx->desc) * slots;
241 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
242 	tx->desc = NULL;
243 
244 	vfree(tx->info);
245 	tx->info = NULL;
246 
247 	netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
248 }
249 
gve_tx_start_ring_gqi(struct gve_priv * priv,int idx)250 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
251 {
252 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
253 	struct gve_tx_ring *tx = &priv->tx[idx];
254 
255 	gve_tx_add_to_block(priv, idx);
256 
257 	tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
258 	gve_add_napi(priv, ntfy_idx, gve_napi_poll);
259 }
260 
gve_tx_alloc_ring_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx)261 static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
262 				 struct gve_tx_alloc_rings_cfg *cfg,
263 				 struct gve_tx_ring *tx,
264 				 int idx)
265 {
266 	struct device *hdev = &priv->pdev->dev;
267 	int qpl_page_cnt;
268 	u32 qpl_id = 0;
269 	size_t bytes;
270 
271 	/* Make sure everything is zeroed to start */
272 	memset(tx, 0, sizeof(*tx));
273 	spin_lock_init(&tx->clean_lock);
274 	spin_lock_init(&tx->xdp_lock);
275 	tx->q_num = idx;
276 
277 	tx->mask = cfg->ring_size - 1;
278 
279 	/* alloc metadata */
280 	tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
281 	if (!tx->info)
282 		return -ENOMEM;
283 
284 	/* alloc tx queue */
285 	bytes = sizeof(*tx->desc) * cfg->ring_size;
286 	tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
287 	if (!tx->desc)
288 		goto abort_with_info;
289 
290 	tx->raw_addressing = cfg->raw_addressing;
291 	tx->dev = hdev;
292 	if (!tx->raw_addressing) {
293 		qpl_id = gve_tx_qpl_id(priv, tx->q_num);
294 		qpl_page_cnt = priv->tx_pages_per_qpl;
295 
296 		tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
297 							    qpl_page_cnt);
298 		if (!tx->tx_fifo.qpl)
299 			goto abort_with_desc;
300 
301 		/* map Tx FIFO */
302 		if (gve_tx_fifo_init(priv, &tx->tx_fifo))
303 			goto abort_with_qpl;
304 	}
305 
306 	tx->q_resources =
307 		dma_alloc_coherent(hdev,
308 				   sizeof(*tx->q_resources),
309 				   &tx->q_resources_bus,
310 				   GFP_KERNEL);
311 	if (!tx->q_resources)
312 		goto abort_with_fifo;
313 
314 	return 0;
315 
316 abort_with_fifo:
317 	if (!tx->raw_addressing)
318 		gve_tx_fifo_release(priv, &tx->tx_fifo);
319 abort_with_qpl:
320 	if (!tx->raw_addressing) {
321 		gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
322 		tx->tx_fifo.qpl = NULL;
323 	}
324 abort_with_desc:
325 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
326 	tx->desc = NULL;
327 abort_with_info:
328 	vfree(tx->info);
329 	tx->info = NULL;
330 	return -ENOMEM;
331 }
332 
gve_tx_alloc_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)333 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
334 			   struct gve_tx_alloc_rings_cfg *cfg)
335 {
336 	struct gve_tx_ring *tx = cfg->tx;
337 	int total_queues;
338 	int err = 0;
339 	int i, j;
340 
341 	total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
342 	if (total_queues > cfg->qcfg->max_queues) {
343 		netif_err(priv, drv, priv->dev,
344 			  "Cannot alloc more than the max num of Tx rings\n");
345 		return -EINVAL;
346 	}
347 
348 	tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
349 		      GFP_KERNEL);
350 	if (!tx)
351 		return -ENOMEM;
352 
353 	for (i = 0; i < total_queues; i++) {
354 		err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
355 		if (err) {
356 			netif_err(priv, drv, priv->dev,
357 				  "Failed to alloc tx ring=%d: err=%d\n",
358 				  i, err);
359 			goto cleanup;
360 		}
361 	}
362 
363 	cfg->tx = tx;
364 	return 0;
365 
366 cleanup:
367 	for (j = 0; j < i; j++)
368 		gve_tx_free_ring_gqi(priv, &tx[j], cfg);
369 	kvfree(tx);
370 	return err;
371 }
372 
gve_tx_free_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)373 void gve_tx_free_rings_gqi(struct gve_priv *priv,
374 			   struct gve_tx_alloc_rings_cfg *cfg)
375 {
376 	struct gve_tx_ring *tx = cfg->tx;
377 	int i;
378 
379 	if (!tx)
380 		return;
381 
382 	for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
383 		gve_tx_free_ring_gqi(priv, &tx[i], cfg);
384 
385 	kvfree(tx);
386 	cfg->tx = NULL;
387 }
388 
389 /* gve_tx_avail - Calculates the number of slots available in the ring
390  * @tx: tx ring to check
391  *
392  * Returns the number of slots available
393  *
394  * The capacity of the queue is mask + 1. We don't need to reserve an entry.
395  **/
gve_tx_avail(struct gve_tx_ring * tx)396 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
397 {
398 	return tx->mask + 1 - (tx->req - tx->done);
399 }
400 
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)401 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
402 					      struct sk_buff *skb)
403 {
404 	int pad_bytes, align_hdr_pad;
405 	int bytes;
406 	int hlen;
407 
408 	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
409 				 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
410 
411 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
412 						   hlen);
413 	/* We need to take into account the header alignment padding. */
414 	align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
415 	bytes = align_hdr_pad + pad_bytes + skb->len;
416 
417 	return bytes;
418 }
419 
420 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
421  * 1 for each skb frag
422  * 1 for the skb linear portion
423  * 1 for when tcp hdr needs to be in separate descriptor
424  * 1 if the payload wraps to the beginning of the FIFO
425  * 1 for metadata descriptor
426  */
427 #define MAX_TX_DESC_NEEDED	(MAX_SKB_FRAGS + 4)
gve_tx_unmap_buf(struct device * dev,struct gve_tx_buffer_state * info)428 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
429 {
430 	if (info->skb) {
431 		dma_unmap_single(dev, dma_unmap_addr(info, dma),
432 				 dma_unmap_len(info, len),
433 				 DMA_TO_DEVICE);
434 		dma_unmap_len_set(info, len, 0);
435 	} else {
436 		dma_unmap_page(dev, dma_unmap_addr(info, dma),
437 			       dma_unmap_len(info, len),
438 			       DMA_TO_DEVICE);
439 		dma_unmap_len_set(info, len, 0);
440 	}
441 }
442 
443 /* Check if sufficient resources (descriptor ring space, FIFO space) are
444  * available to transmit the given number of bytes.
445  */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)446 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
447 {
448 	bool can_alloc = true;
449 
450 	if (!tx->raw_addressing)
451 		can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
452 
453 	return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
454 }
455 
456 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
457 
458 /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)459 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
460 			     struct sk_buff *skb)
461 {
462 	int bytes_required = 0;
463 	u32 nic_done;
464 	u32 to_do;
465 	int ret;
466 
467 	if (!tx->raw_addressing)
468 		bytes_required = gve_skb_fifo_bytes_required(tx, skb);
469 
470 	if (likely(gve_can_tx(tx, bytes_required)))
471 		return 0;
472 
473 	ret = -EBUSY;
474 	spin_lock(&tx->clean_lock);
475 	nic_done = gve_tx_load_event_counter(priv, tx);
476 	to_do = nic_done - tx->done;
477 
478 	/* Only try to clean if there is hope for TX */
479 	if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
480 		if (to_do > 0) {
481 			to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
482 			gve_clean_tx_done(priv, tx, to_do, false);
483 		}
484 		if (likely(gve_can_tx(tx, bytes_required)))
485 			ret = 0;
486 	}
487 	if (ret) {
488 		/* No space, so stop the queue */
489 		tx->stop_queue++;
490 		netif_tx_stop_queue(tx->netdev_txq);
491 	}
492 	spin_unlock(&tx->clean_lock);
493 
494 	return ret;
495 }
496 
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,u16 csum_offset,u8 ip_summed,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr,u16 pkt_len)497 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
498 				 u16 csum_offset, u8 ip_summed, bool is_gso,
499 				 int l4_hdr_offset, u32 desc_cnt,
500 				 u16 hlen, u64 addr, u16 pkt_len)
501 {
502 	/* l4_hdr_offset and csum_offset are in units of 16-bit words */
503 	if (is_gso) {
504 		pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
505 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
506 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
507 	} else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
508 		pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
509 		pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
510 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
511 	} else {
512 		pkt_desc->pkt.type_flags = GVE_TXD_STD;
513 		pkt_desc->pkt.l4_csum_offset = 0;
514 		pkt_desc->pkt.l4_hdr_offset = 0;
515 	}
516 	pkt_desc->pkt.desc_cnt = desc_cnt;
517 	pkt_desc->pkt.len = cpu_to_be16(pkt_len);
518 	pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
519 	pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
520 }
521 
gve_tx_fill_mtd_desc(union gve_tx_desc * mtd_desc,struct sk_buff * skb)522 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
523 				 struct sk_buff *skb)
524 {
525 	BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
526 
527 	mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
528 	mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
529 				   GVE_MTD_PATH_HASH_L4;
530 	mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
531 	mtd_desc->mtd.reserved0 = 0;
532 	mtd_desc->mtd.reserved1 = 0;
533 }
534 
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,u16 l3_offset,u16 gso_size,bool is_gso_v6,bool is_gso,u16 len,u64 addr)535 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
536 				 u16 l3_offset, u16 gso_size,
537 				 bool is_gso_v6, bool is_gso,
538 				 u16 len, u64 addr)
539 {
540 	seg_desc->seg.type_flags = GVE_TXD_SEG;
541 	if (is_gso) {
542 		if (is_gso_v6)
543 			seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
544 		seg_desc->seg.l3_offset = l3_offset >> 1;
545 		seg_desc->seg.mss = cpu_to_be16(gso_size);
546 	}
547 	seg_desc->seg.seg_len = cpu_to_be16(len);
548 	seg_desc->seg.seg_addr = cpu_to_be64(addr);
549 }
550 
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)551 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
552 				    u64 iov_offset, u64 iov_len)
553 {
554 	u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
555 	u64 first_page = iov_offset / PAGE_SIZE;
556 	u64 page;
557 
558 	for (page = first_page; page <= last_page; page++)
559 		dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
560 }
561 
gve_tx_add_skb_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)562 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
563 {
564 	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
565 	union gve_tx_desc *pkt_desc, *seg_desc;
566 	struct gve_tx_buffer_state *info;
567 	int mtd_desc_nr = !!skb->l4_hash;
568 	bool is_gso = skb_is_gso(skb);
569 	u32 idx = tx->req & tx->mask;
570 	int payload_iov = 2;
571 	int copy_offset;
572 	u32 next_idx;
573 	int i;
574 
575 	info = &tx->info[idx];
576 	pkt_desc = &tx->desc[idx];
577 
578 	l4_hdr_offset = skb_checksum_start_offset(skb);
579 	/* If the skb is gso, then we want the tcp header alone in the first segment
580 	 * otherwise we want the minimum required by the gVNIC spec.
581 	 */
582 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
583 			min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
584 
585 	info->skb =  skb;
586 	/* We don't want to split the header, so if necessary, pad to the end
587 	 * of the fifo and then put the header at the beginning of the fifo.
588 	 */
589 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
590 	hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
591 				       &info->iov[0]);
592 	WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
593 	payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
594 					   &info->iov[payload_iov]);
595 
596 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
597 			     is_gso, l4_hdr_offset,
598 			     1 + mtd_desc_nr + payload_nfrags, hlen,
599 			     info->iov[hdr_nfrags - 1].iov_offset, skb->len);
600 
601 	skb_copy_bits(skb, 0,
602 		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
603 		      hlen);
604 	gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
605 				info->iov[hdr_nfrags - 1].iov_offset,
606 				info->iov[hdr_nfrags - 1].iov_len);
607 	copy_offset = hlen;
608 
609 	if (mtd_desc_nr) {
610 		next_idx = (tx->req + 1) & tx->mask;
611 		gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
612 	}
613 
614 	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
615 		next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
616 		seg_desc = &tx->desc[next_idx];
617 
618 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
619 				     skb_shinfo(skb)->gso_size,
620 				     skb_is_gso_v6(skb), is_gso,
621 				     info->iov[i].iov_len,
622 				     info->iov[i].iov_offset);
623 
624 		skb_copy_bits(skb, copy_offset,
625 			      tx->tx_fifo.base + info->iov[i].iov_offset,
626 			      info->iov[i].iov_len);
627 		gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
628 					info->iov[i].iov_offset,
629 					info->iov[i].iov_len);
630 		copy_offset += info->iov[i].iov_len;
631 	}
632 
633 	return 1 + mtd_desc_nr + payload_nfrags;
634 }
635 
gve_tx_add_skb_no_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)636 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
637 				  struct sk_buff *skb)
638 {
639 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
640 	int hlen, num_descriptors, l4_hdr_offset;
641 	union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
642 	struct gve_tx_buffer_state *info;
643 	int mtd_desc_nr = !!skb->l4_hash;
644 	bool is_gso = skb_is_gso(skb);
645 	u32 idx = tx->req & tx->mask;
646 	u64 addr;
647 	u32 len;
648 	int i;
649 
650 	info = &tx->info[idx];
651 	pkt_desc = &tx->desc[idx];
652 
653 	l4_hdr_offset = skb_checksum_start_offset(skb);
654 	/* If the skb is gso, then we want only up to the tcp header in the first segment
655 	 * to efficiently replicate on each segment otherwise we want the linear portion
656 	 * of the skb (which will contain the checksum because skb->csum_start and
657 	 * skb->csum_offset are given relative to skb->head) in the first segment.
658 	 */
659 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
660 	len = skb_headlen(skb);
661 
662 	info->skb =  skb;
663 
664 	addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
665 	if (unlikely(dma_mapping_error(tx->dev, addr))) {
666 		tx->dma_mapping_error++;
667 		goto drop;
668 	}
669 	dma_unmap_len_set(info, len, len);
670 	dma_unmap_addr_set(info, dma, addr);
671 
672 	num_descriptors = 1 + shinfo->nr_frags;
673 	if (hlen < len)
674 		num_descriptors++;
675 	if (mtd_desc_nr)
676 		num_descriptors++;
677 
678 	gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
679 			     is_gso, l4_hdr_offset,
680 			     num_descriptors, hlen, addr, skb->len);
681 
682 	if (mtd_desc_nr) {
683 		idx = (idx + 1) & tx->mask;
684 		mtd_desc = &tx->desc[idx];
685 		gve_tx_fill_mtd_desc(mtd_desc, skb);
686 	}
687 
688 	if (hlen < len) {
689 		/* For gso the rest of the linear portion of the skb needs to
690 		 * be in its own descriptor.
691 		 */
692 		len -= hlen;
693 		addr += hlen;
694 		idx = (idx + 1) & tx->mask;
695 		seg_desc = &tx->desc[idx];
696 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
697 				     skb_shinfo(skb)->gso_size,
698 				     skb_is_gso_v6(skb), is_gso, len, addr);
699 	}
700 
701 	for (i = 0; i < shinfo->nr_frags; i++) {
702 		const skb_frag_t *frag = &shinfo->frags[i];
703 
704 		idx = (idx + 1) & tx->mask;
705 		seg_desc = &tx->desc[idx];
706 		len = skb_frag_size(frag);
707 		addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
708 		if (unlikely(dma_mapping_error(tx->dev, addr))) {
709 			tx->dma_mapping_error++;
710 			goto unmap_drop;
711 		}
712 		tx->info[idx].skb = NULL;
713 		dma_unmap_len_set(&tx->info[idx], len, len);
714 		dma_unmap_addr_set(&tx->info[idx], dma, addr);
715 
716 		gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
717 				     skb_shinfo(skb)->gso_size,
718 				     skb_is_gso_v6(skb), is_gso, len, addr);
719 	}
720 
721 	return num_descriptors;
722 
723 unmap_drop:
724 	i += num_descriptors - shinfo->nr_frags;
725 	while (i--) {
726 		/* Skip metadata descriptor, if set */
727 		if (i == 1 && mtd_desc_nr == 1)
728 			continue;
729 		idx--;
730 		gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
731 	}
732 drop:
733 	tx->dropped_pkt++;
734 	return 0;
735 }
736 
gve_tx(struct sk_buff * skb,struct net_device * dev)737 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
738 {
739 	struct gve_priv *priv = netdev_priv(dev);
740 	struct gve_tx_ring *tx;
741 	int nsegs;
742 
743 	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
744 	     "skb queue index out of range");
745 	tx = &priv->tx[skb_get_queue_mapping(skb)];
746 	if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
747 		/* We need to ring the txq doorbell -- we have stopped the Tx
748 		 * queue for want of resources, but prior calls to gve_tx()
749 		 * may have added descriptors without ringing the doorbell.
750 		 */
751 
752 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
753 		return NETDEV_TX_BUSY;
754 	}
755 	if (tx->raw_addressing)
756 		nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
757 	else
758 		nsegs = gve_tx_add_skb_copy(priv, tx, skb);
759 
760 	/* If the packet is getting sent, we need to update the skb */
761 	if (nsegs) {
762 		netdev_tx_sent_queue(tx->netdev_txq, skb->len);
763 		skb_tx_timestamp(skb);
764 		tx->req += nsegs;
765 	} else {
766 		dev_kfree_skb_any(skb);
767 	}
768 
769 	if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
770 		return NETDEV_TX_OK;
771 
772 	/* Give packets to NIC. Even if this packet failed to send the doorbell
773 	 * might need to be rung because of xmit_more.
774 	 */
775 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
776 	return NETDEV_TX_OK;
777 }
778 
gve_tx_fill_xdp(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p,bool is_xsk)779 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
780 			   void *data, int len, void *frame_p, bool is_xsk)
781 {
782 	int pad, nfrags, ndescs, iovi, offset;
783 	struct gve_tx_buffer_state *info;
784 	u32 reqi = tx->req;
785 
786 	pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
787 	if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
788 		pad = 0;
789 	info = &tx->info[reqi & tx->mask];
790 	info->xdp_frame = frame_p;
791 	info->xdp.size = len;
792 	info->xdp.is_xsk = is_xsk;
793 
794 	nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
795 				   &info->iov[0]);
796 	iovi = pad > 0;
797 	ndescs = nfrags - iovi;
798 	offset = 0;
799 
800 	while (iovi < nfrags) {
801 		if (!offset)
802 			gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
803 					     CHECKSUM_NONE, false, 0, ndescs,
804 					     info->iov[iovi].iov_len,
805 					     info->iov[iovi].iov_offset, len);
806 		else
807 			gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
808 					     0, 0, false, false,
809 					     info->iov[iovi].iov_len,
810 					     info->iov[iovi].iov_offset);
811 
812 		memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
813 		       data + offset, info->iov[iovi].iov_len);
814 		gve_dma_sync_for_device(&priv->pdev->dev,
815 					tx->tx_fifo.qpl->page_buses,
816 					info->iov[iovi].iov_offset,
817 					info->iov[iovi].iov_len);
818 		offset += info->iov[iovi].iov_len;
819 		iovi++;
820 		reqi++;
821 	}
822 
823 	return ndescs;
824 }
825 
gve_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)826 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
827 		 u32 flags)
828 {
829 	struct gve_priv *priv = netdev_priv(dev);
830 	struct gve_tx_ring *tx;
831 	int i, err = 0, qid;
832 
833 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
834 		return -EINVAL;
835 
836 	if (!gve_get_napi_enabled(priv))
837 		return -ENETDOWN;
838 
839 	qid = gve_xdp_tx_queue_id(priv,
840 				  smp_processor_id() % priv->tx_cfg.num_xdp_queues);
841 
842 	tx = &priv->tx[qid];
843 
844 	spin_lock(&tx->xdp_lock);
845 	for (i = 0; i < n; i++) {
846 		err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
847 				       frames[i]->len, frames[i]);
848 		if (err)
849 			break;
850 	}
851 
852 	if (flags & XDP_XMIT_FLUSH)
853 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
854 
855 	spin_unlock(&tx->xdp_lock);
856 
857 	u64_stats_update_begin(&tx->statss);
858 	tx->xdp_xmit += n;
859 	tx->xdp_xmit_errors += n - i;
860 	u64_stats_update_end(&tx->statss);
861 
862 	return i ? i : err;
863 }
864 
gve_xdp_xmit_one(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p)865 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
866 		     void *data, int len, void *frame_p)
867 {
868 	int nsegs;
869 
870 	if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
871 		return -EBUSY;
872 
873 	nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
874 	tx->req += nsegs;
875 
876 	return 0;
877 }
878 
879 #define GVE_TX_START_THRESH	4096
880 
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)881 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
882 			     u32 to_do, bool try_to_wake)
883 {
884 	struct gve_tx_buffer_state *info;
885 	u64 pkts = 0, bytes = 0;
886 	size_t space_freed = 0;
887 	struct sk_buff *skb;
888 	u32 idx;
889 	int j;
890 
891 	for (j = 0; j < to_do; j++) {
892 		idx = tx->done & tx->mask;
893 		netif_info(priv, tx_done, priv->dev,
894 			   "[%d] %s: idx=%d (req=%u done=%u)\n",
895 			   tx->q_num, __func__, idx, tx->req, tx->done);
896 		info = &tx->info[idx];
897 		skb = info->skb;
898 
899 		/* Unmap the buffer */
900 		if (tx->raw_addressing)
901 			gve_tx_unmap_buf(tx->dev, info);
902 		tx->done++;
903 		/* Mark as free */
904 		if (skb) {
905 			info->skb = NULL;
906 			bytes += skb->len;
907 			pkts++;
908 			dev_consume_skb_any(skb);
909 			if (tx->raw_addressing)
910 				continue;
911 			space_freed += gve_tx_clear_buffer_state(info);
912 		}
913 	}
914 
915 	if (!tx->raw_addressing)
916 		gve_tx_free_fifo(&tx->tx_fifo, space_freed);
917 	u64_stats_update_begin(&tx->statss);
918 	tx->bytes_done += bytes;
919 	tx->pkt_done += pkts;
920 	u64_stats_update_end(&tx->statss);
921 	netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
922 
923 	/* start the queue if we've stopped it */
924 #ifndef CONFIG_BQL
925 	/* Make sure that the doorbells are synced */
926 	smp_mb();
927 #endif
928 	if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
929 	    likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
930 		tx->wake_queue++;
931 		netif_tx_wake_queue(tx->netdev_txq);
932 	}
933 
934 	return pkts;
935 }
936 
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)937 u32 gve_tx_load_event_counter(struct gve_priv *priv,
938 			      struct gve_tx_ring *tx)
939 {
940 	u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
941 	__be32 counter = READ_ONCE(priv->counter_array[counter_index]);
942 
943 	return be32_to_cpu(counter);
944 }
945 
gve_xsk_tx(struct gve_priv * priv,struct gve_tx_ring * tx,int budget)946 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
947 		      int budget)
948 {
949 	struct xdp_desc desc;
950 	int sent = 0, nsegs;
951 	void *data;
952 
953 	spin_lock(&tx->xdp_lock);
954 	while (sent < budget) {
955 		if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
956 		    !xsk_tx_peek_desc(tx->xsk_pool, &desc))
957 			goto out;
958 
959 		data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
960 		nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
961 		tx->req += nsegs;
962 		sent++;
963 	}
964 out:
965 	if (sent > 0) {
966 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
967 		xsk_tx_release(tx->xsk_pool);
968 	}
969 	spin_unlock(&tx->xdp_lock);
970 	return sent;
971 }
972 
gve_xsk_tx_poll(struct gve_notify_block * rx_block,int budget)973 int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
974 {
975 	struct gve_rx_ring *rx = rx_block->rx;
976 	struct gve_priv *priv = rx->gve;
977 	struct gve_tx_ring *tx;
978 	int sent = 0;
979 
980 	tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
981 	if (tx->xsk_pool) {
982 		sent = gve_xsk_tx(priv, tx, budget);
983 
984 		u64_stats_update_begin(&tx->statss);
985 		tx->xdp_xsk_sent += sent;
986 		u64_stats_update_end(&tx->statss);
987 		if (xsk_uses_need_wakeup(tx->xsk_pool))
988 			xsk_set_tx_need_wakeup(tx->xsk_pool);
989 	}
990 
991 	return sent;
992 }
993 
gve_xdp_poll(struct gve_notify_block * block,int budget)994 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
995 {
996 	struct gve_priv *priv = block->priv;
997 	struct gve_tx_ring *tx = block->tx;
998 	u32 nic_done;
999 	u32 to_do;
1000 
1001 	/* Find out how much work there is to be done */
1002 	nic_done = gve_tx_load_event_counter(priv, tx);
1003 	to_do = min_t(u32, (nic_done - tx->done), budget);
1004 	gve_clean_xdp_done(priv, tx, to_do);
1005 
1006 	/* If we still have work we want to repoll */
1007 	return nic_done != tx->done;
1008 }
1009 
gve_tx_poll(struct gve_notify_block * block,int budget)1010 bool gve_tx_poll(struct gve_notify_block *block, int budget)
1011 {
1012 	struct gve_priv *priv = block->priv;
1013 	struct gve_tx_ring *tx = block->tx;
1014 	u32 nic_done;
1015 	u32 to_do;
1016 
1017 	/* If budget is 0, do all the work */
1018 	if (budget == 0)
1019 		budget = INT_MAX;
1020 
1021 	/* In TX path, it may try to clean completed pkts in order to xmit,
1022 	 * to avoid cleaning conflict, use spin_lock(), it yields better
1023 	 * concurrency between xmit/clean than netif's lock.
1024 	 */
1025 	spin_lock(&tx->clean_lock);
1026 	/* Find out how much work there is to be done */
1027 	nic_done = gve_tx_load_event_counter(priv, tx);
1028 	to_do = min_t(u32, (nic_done - tx->done), budget);
1029 	gve_clean_tx_done(priv, tx, to_do, true);
1030 	spin_unlock(&tx->clean_lock);
1031 	/* If we still have work we want to repoll */
1032 	return nic_done != tx->done;
1033 }
1034 
gve_tx_clean_pending(struct gve_priv * priv,struct gve_tx_ring * tx)1035 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1036 {
1037 	u32 nic_done = gve_tx_load_event_counter(priv, tx);
1038 
1039 	return nic_done != tx->done;
1040 }
1041