1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 struct gve_queue_resources *q_resources,
18 u32 val)
19 {
20 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22
gve_xdp_tx_flush(struct gve_priv * priv,u32 xdp_qid)23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 struct gve_tx_ring *tx = &priv->tx[tx_qid];
27
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30
31 /* gvnic can only transmit from a Registered Segment.
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
34 *
35 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36 * free allocations in the order they were allocated.
37 */
38
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 PAGE_KERNEL);
43 if (unlikely(!fifo->base)) {
44 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 fifo->qpl->id);
46 return -ENOMEM;
47 }
48
49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 atomic_set(&fifo->available, fifo->size);
51 fifo->head = 0;
52 return 0;
53 }
54
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 WARN(atomic_read(&fifo->available) != fifo->size,
58 "Releasing non-empty fifo");
59
60 vunmap(fifo->base);
61 }
62
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 size_t bytes)
65 {
66 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75 * @fifo: FIFO to allocate from
76 * @bytes: Allocation size
77 * @iov: Scatter-gather elements to fill with allocation fragment base/len
78 *
79 * Returns number of valid elements in iov[] or negative on error.
80 *
81 * Allocations from a given FIFO must be externally synchronized but concurrent
82 * allocation and frees are allowed.
83 */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 struct gve_tx_iovec iov[2])
86 {
87 size_t overflow, padding;
88 u32 aligned_head;
89 int nfrags = 0;
90
91 if (!bytes)
92 return 0;
93
94 /* This check happens before we know how much padding is needed to
95 * align to a cacheline boundary for the payload, but that is fine,
96 * because the FIFO head always start aligned, and the FIFO's boundaries
97 * are aligned, so if there is space for the data, there is space for
98 * the padding to the next alignment.
99 */
100 WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 "Reached %s when there's not enough space in the fifo", __func__);
102
103 nfrags++;
104
105 iov[0].iov_offset = fifo->head;
106 iov[0].iov_len = bytes;
107 fifo->head += bytes;
108
109 if (fifo->head > fifo->size) {
110 /* If the allocation did not fit in the tail fragment of the
111 * FIFO, also use the head fragment.
112 */
113 nfrags++;
114 overflow = fifo->head - fifo->size;
115 iov[0].iov_len -= overflow;
116 iov[1].iov_offset = 0; /* Start of fifo*/
117 iov[1].iov_len = overflow;
118
119 fifo->head = overflow;
120 }
121
122 /* Re-align to a cacheline boundary */
123 aligned_head = L1_CACHE_ALIGN(fifo->head);
124 padding = aligned_head - fifo->head;
125 iov[nfrags - 1].iov_padding = padding;
126 atomic_sub(bytes + padding, &fifo->available);
127 fifo->head = aligned_head;
128
129 if (fifo->head == fifo->size)
130 fifo->head = 0;
131
132 return nfrags;
133 }
134
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136 * @fifo: FIFO to return fragments to
137 * @bytes: Bytes to free
138 */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 atomic_add(bytes, &fifo->available);
142 }
143
gve_tx_clear_buffer_state(struct gve_tx_buffer_state * info)144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 size_t space_freed = 0;
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 info->iov[i].iov_len = 0;
152 info->iov[i].iov_padding = 0;
153 }
154 return space_freed;
155 }
156
gve_clean_xdp_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do)157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 u32 to_do)
159 {
160 struct gve_tx_buffer_state *info;
161 u64 pkts = 0, bytes = 0;
162 size_t space_freed = 0;
163 u32 xsk_complete = 0;
164 u32 idx;
165 int i;
166
167 for (i = 0; i < to_do; i++) {
168 idx = tx->done & tx->mask;
169 info = &tx->info[idx];
170 tx->done++;
171
172 if (unlikely(!info->xdp.size))
173 continue;
174
175 bytes += info->xdp.size;
176 pkts++;
177 xsk_complete += info->xdp.is_xsk;
178
179 info->xdp.size = 0;
180 if (info->xdp_frame) {
181 xdp_return_frame(info->xdp_frame);
182 info->xdp_frame = NULL;
183 }
184 space_freed += gve_tx_clear_buffer_state(info);
185 }
186
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
188 if (xsk_complete > 0 && tx->xsk_pool)
189 xsk_tx_completed(tx->xsk_pool, xsk_complete);
190 u64_stats_update_begin(&tx->statss);
191 tx->bytes_done += bytes;
192 tx->pkt_done += pkts;
193 u64_stats_update_end(&tx->statss);
194 return pkts;
195 }
196
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
198 u32 to_do, bool try_to_wake);
199
gve_tx_stop_ring_gqi(struct gve_priv * priv,int idx)200 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
201 {
202 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
203 struct gve_tx_ring *tx = &priv->tx[idx];
204
205 if (!gve_tx_was_added_to_block(priv, idx))
206 return;
207
208 gve_remove_napi(priv, ntfy_idx);
209 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
210 netdev_tx_reset_queue(tx->netdev_txq);
211 gve_tx_remove_from_block(priv, idx);
212 }
213
gve_tx_free_ring_gqi(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg)214 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
215 struct gve_tx_alloc_rings_cfg *cfg)
216 {
217 struct device *hdev = &priv->pdev->dev;
218 int idx = tx->q_num;
219 size_t bytes;
220 u32 qpl_id;
221 u32 slots;
222
223 slots = tx->mask + 1;
224 dma_free_coherent(hdev, sizeof(*tx->q_resources),
225 tx->q_resources, tx->q_resources_bus);
226 tx->q_resources = NULL;
227
228 if (tx->tx_fifo.qpl) {
229 if (tx->tx_fifo.base)
230 gve_tx_fifo_release(priv, &tx->tx_fifo);
231
232 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
233 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
234 tx->tx_fifo.qpl = NULL;
235 }
236
237 bytes = sizeof(*tx->desc) * slots;
238 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
239 tx->desc = NULL;
240
241 vfree(tx->info);
242 tx->info = NULL;
243
244 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
245 }
246
gve_tx_start_ring_gqi(struct gve_priv * priv,int idx)247 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
248 {
249 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
250 struct gve_tx_ring *tx = &priv->tx[idx];
251
252 gve_tx_add_to_block(priv, idx);
253
254 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
255 gve_add_napi(priv, ntfy_idx, gve_napi_poll);
256 }
257
gve_tx_alloc_ring_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx)258 static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
259 struct gve_tx_alloc_rings_cfg *cfg,
260 struct gve_tx_ring *tx,
261 int idx)
262 {
263 struct device *hdev = &priv->pdev->dev;
264 int qpl_page_cnt;
265 u32 qpl_id = 0;
266 size_t bytes;
267
268 /* Make sure everything is zeroed to start */
269 memset(tx, 0, sizeof(*tx));
270 spin_lock_init(&tx->clean_lock);
271 spin_lock_init(&tx->xdp_lock);
272 tx->q_num = idx;
273
274 tx->mask = cfg->ring_size - 1;
275
276 /* alloc metadata */
277 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
278 if (!tx->info)
279 return -ENOMEM;
280
281 /* alloc tx queue */
282 bytes = sizeof(*tx->desc) * cfg->ring_size;
283 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
284 if (!tx->desc)
285 goto abort_with_info;
286
287 tx->raw_addressing = cfg->raw_addressing;
288 tx->dev = hdev;
289 if (!tx->raw_addressing) {
290 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
291 qpl_page_cnt = priv->tx_pages_per_qpl;
292
293 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
294 qpl_page_cnt);
295 if (!tx->tx_fifo.qpl)
296 goto abort_with_desc;
297
298 /* map Tx FIFO */
299 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
300 goto abort_with_qpl;
301 }
302
303 tx->q_resources =
304 dma_alloc_coherent(hdev,
305 sizeof(*tx->q_resources),
306 &tx->q_resources_bus,
307 GFP_KERNEL);
308 if (!tx->q_resources)
309 goto abort_with_fifo;
310
311 return 0;
312
313 abort_with_fifo:
314 if (!tx->raw_addressing)
315 gve_tx_fifo_release(priv, &tx->tx_fifo);
316 abort_with_qpl:
317 if (!tx->raw_addressing) {
318 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
319 tx->tx_fifo.qpl = NULL;
320 }
321 abort_with_desc:
322 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
323 tx->desc = NULL;
324 abort_with_info:
325 vfree(tx->info);
326 tx->info = NULL;
327 return -ENOMEM;
328 }
329
gve_tx_alloc_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)330 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
331 struct gve_tx_alloc_rings_cfg *cfg)
332 {
333 struct gve_tx_ring *tx = cfg->tx;
334 int err = 0;
335 int i, j;
336
337 if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
338 netif_err(priv, drv, priv->dev,
339 "Cannot alloc more than the max num of Tx rings\n");
340 return -EINVAL;
341 }
342
343 if (cfg->start_idx == 0) {
344 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
345 GFP_KERNEL);
346 if (!tx)
347 return -ENOMEM;
348 } else if (!tx) {
349 netif_err(priv, drv, priv->dev,
350 "Cannot alloc tx rings from a nonzero start idx without tx array\n");
351 return -EINVAL;
352 }
353
354 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
355 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
356 if (err) {
357 netif_err(priv, drv, priv->dev,
358 "Failed to alloc tx ring=%d: err=%d\n",
359 i, err);
360 goto cleanup;
361 }
362 }
363
364 cfg->tx = tx;
365 return 0;
366
367 cleanup:
368 for (j = 0; j < i; j++)
369 gve_tx_free_ring_gqi(priv, &tx[j], cfg);
370 if (cfg->start_idx == 0)
371 kvfree(tx);
372 return err;
373 }
374
gve_tx_free_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)375 void gve_tx_free_rings_gqi(struct gve_priv *priv,
376 struct gve_tx_alloc_rings_cfg *cfg)
377 {
378 struct gve_tx_ring *tx = cfg->tx;
379 int i;
380
381 if (!tx)
382 return;
383
384 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
385 gve_tx_free_ring_gqi(priv, &tx[i], cfg);
386
387 if (cfg->start_idx == 0) {
388 kvfree(tx);
389 cfg->tx = NULL;
390 }
391 }
392
393 /* gve_tx_avail - Calculates the number of slots available in the ring
394 * @tx: tx ring to check
395 *
396 * Returns the number of slots available
397 *
398 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
399 **/
gve_tx_avail(struct gve_tx_ring * tx)400 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
401 {
402 return tx->mask + 1 - (tx->req - tx->done);
403 }
404
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)405 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
406 struct sk_buff *skb)
407 {
408 int pad_bytes, align_hdr_pad;
409 int bytes;
410 int hlen;
411
412 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
413 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
414
415 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
416 hlen);
417 /* We need to take into account the header alignment padding. */
418 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
419 bytes = align_hdr_pad + pad_bytes + skb->len;
420
421 return bytes;
422 }
423
424 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
425 * 1 for each skb frag
426 * 1 for the skb linear portion
427 * 1 for when tcp hdr needs to be in separate descriptor
428 * 1 if the payload wraps to the beginning of the FIFO
429 * 1 for metadata descriptor
430 */
431 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
gve_tx_unmap_buf(struct device * dev,struct gve_tx_buffer_state * info)432 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
433 {
434 if (info->skb) {
435 dma_unmap_single(dev, dma_unmap_addr(info, dma),
436 dma_unmap_len(info, len),
437 DMA_TO_DEVICE);
438 dma_unmap_len_set(info, len, 0);
439 } else {
440 dma_unmap_page(dev, dma_unmap_addr(info, dma),
441 dma_unmap_len(info, len),
442 DMA_TO_DEVICE);
443 dma_unmap_len_set(info, len, 0);
444 }
445 }
446
447 /* Check if sufficient resources (descriptor ring space, FIFO space) are
448 * available to transmit the given number of bytes.
449 */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)450 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
451 {
452 bool can_alloc = true;
453
454 if (!tx->raw_addressing)
455 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
456
457 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
458 }
459
460 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
461
462 /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)463 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
464 struct sk_buff *skb)
465 {
466 int bytes_required = 0;
467 u32 nic_done;
468 u32 to_do;
469 int ret;
470
471 if (!tx->raw_addressing)
472 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
473
474 if (likely(gve_can_tx(tx, bytes_required)))
475 return 0;
476
477 ret = -EBUSY;
478 spin_lock(&tx->clean_lock);
479 nic_done = gve_tx_load_event_counter(priv, tx);
480 to_do = nic_done - tx->done;
481
482 /* Only try to clean if there is hope for TX */
483 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
484 if (to_do > 0) {
485 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
486 gve_clean_tx_done(priv, tx, to_do, false);
487 }
488 if (likely(gve_can_tx(tx, bytes_required)))
489 ret = 0;
490 }
491 if (ret) {
492 /* No space, so stop the queue */
493 tx->stop_queue++;
494 netif_tx_stop_queue(tx->netdev_txq);
495 }
496 spin_unlock(&tx->clean_lock);
497
498 return ret;
499 }
500
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,u16 csum_offset,u8 ip_summed,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr,u16 pkt_len)501 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
502 u16 csum_offset, u8 ip_summed, bool is_gso,
503 int l4_hdr_offset, u32 desc_cnt,
504 u16 hlen, u64 addr, u16 pkt_len)
505 {
506 /* l4_hdr_offset and csum_offset are in units of 16-bit words */
507 if (is_gso) {
508 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
509 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
510 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
511 } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
512 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
513 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
514 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
515 } else {
516 pkt_desc->pkt.type_flags = GVE_TXD_STD;
517 pkt_desc->pkt.l4_csum_offset = 0;
518 pkt_desc->pkt.l4_hdr_offset = 0;
519 }
520 pkt_desc->pkt.desc_cnt = desc_cnt;
521 pkt_desc->pkt.len = cpu_to_be16(pkt_len);
522 pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
523 pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
524 }
525
gve_tx_fill_mtd_desc(union gve_tx_desc * mtd_desc,struct sk_buff * skb)526 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
527 struct sk_buff *skb)
528 {
529 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
530
531 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
532 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
533 GVE_MTD_PATH_HASH_L4;
534 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
535 mtd_desc->mtd.reserved0 = 0;
536 mtd_desc->mtd.reserved1 = 0;
537 }
538
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,u16 l3_offset,u16 gso_size,bool is_gso_v6,bool is_gso,u16 len,u64 addr)539 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
540 u16 l3_offset, u16 gso_size,
541 bool is_gso_v6, bool is_gso,
542 u16 len, u64 addr)
543 {
544 seg_desc->seg.type_flags = GVE_TXD_SEG;
545 if (is_gso) {
546 if (is_gso_v6)
547 seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
548 seg_desc->seg.l3_offset = l3_offset >> 1;
549 seg_desc->seg.mss = cpu_to_be16(gso_size);
550 }
551 seg_desc->seg.seg_len = cpu_to_be16(len);
552 seg_desc->seg.seg_addr = cpu_to_be64(addr);
553 }
554
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)555 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
556 u64 iov_offset, u64 iov_len)
557 {
558 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
559 u64 first_page = iov_offset / PAGE_SIZE;
560 u64 page;
561
562 for (page = first_page; page <= last_page; page++)
563 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
564 }
565
gve_tx_add_skb_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)566 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
567 {
568 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
569 union gve_tx_desc *pkt_desc, *seg_desc;
570 struct gve_tx_buffer_state *info;
571 int mtd_desc_nr = !!skb->l4_hash;
572 bool is_gso = skb_is_gso(skb);
573 u32 idx = tx->req & tx->mask;
574 int payload_iov = 2;
575 int copy_offset;
576 u32 next_idx;
577 int i;
578
579 info = &tx->info[idx];
580 pkt_desc = &tx->desc[idx];
581
582 l4_hdr_offset = skb_checksum_start_offset(skb);
583 /* If the skb is gso, then we want the tcp header alone in the first segment
584 * otherwise we want the minimum required by the gVNIC spec.
585 */
586 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
587 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
588
589 info->skb = skb;
590 /* We don't want to split the header, so if necessary, pad to the end
591 * of the fifo and then put the header at the beginning of the fifo.
592 */
593 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
594 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
595 &info->iov[0]);
596 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
597 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
598 &info->iov[payload_iov]);
599
600 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
601 is_gso, l4_hdr_offset,
602 1 + mtd_desc_nr + payload_nfrags, hlen,
603 info->iov[hdr_nfrags - 1].iov_offset, skb->len);
604
605 skb_copy_bits(skb, 0,
606 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
607 hlen);
608 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
609 info->iov[hdr_nfrags - 1].iov_offset,
610 info->iov[hdr_nfrags - 1].iov_len);
611 copy_offset = hlen;
612
613 if (mtd_desc_nr) {
614 next_idx = (tx->req + 1) & tx->mask;
615 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
616 }
617
618 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
619 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
620 seg_desc = &tx->desc[next_idx];
621
622 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
623 skb_shinfo(skb)->gso_size,
624 skb_is_gso_v6(skb), is_gso,
625 info->iov[i].iov_len,
626 info->iov[i].iov_offset);
627
628 skb_copy_bits(skb, copy_offset,
629 tx->tx_fifo.base + info->iov[i].iov_offset,
630 info->iov[i].iov_len);
631 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
632 info->iov[i].iov_offset,
633 info->iov[i].iov_len);
634 copy_offset += info->iov[i].iov_len;
635 }
636
637 return 1 + mtd_desc_nr + payload_nfrags;
638 }
639
gve_tx_add_skb_no_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)640 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
641 struct sk_buff *skb)
642 {
643 const struct skb_shared_info *shinfo = skb_shinfo(skb);
644 int hlen, num_descriptors, l4_hdr_offset;
645 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
646 struct gve_tx_buffer_state *info;
647 int mtd_desc_nr = !!skb->l4_hash;
648 bool is_gso = skb_is_gso(skb);
649 u32 idx = tx->req & tx->mask;
650 u64 addr;
651 u32 len;
652 int i;
653
654 info = &tx->info[idx];
655 pkt_desc = &tx->desc[idx];
656
657 l4_hdr_offset = skb_checksum_start_offset(skb);
658 /* If the skb is gso, then we want only up to the tcp header in the first segment
659 * to efficiently replicate on each segment otherwise we want the linear portion
660 * of the skb (which will contain the checksum because skb->csum_start and
661 * skb->csum_offset are given relative to skb->head) in the first segment.
662 */
663 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
664 len = skb_headlen(skb);
665
666 info->skb = skb;
667
668 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
669 if (unlikely(dma_mapping_error(tx->dev, addr))) {
670 tx->dma_mapping_error++;
671 goto drop;
672 }
673 dma_unmap_len_set(info, len, len);
674 dma_unmap_addr_set(info, dma, addr);
675
676 num_descriptors = 1 + shinfo->nr_frags;
677 if (hlen < len)
678 num_descriptors++;
679 if (mtd_desc_nr)
680 num_descriptors++;
681
682 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
683 is_gso, l4_hdr_offset,
684 num_descriptors, hlen, addr, skb->len);
685
686 if (mtd_desc_nr) {
687 idx = (idx + 1) & tx->mask;
688 mtd_desc = &tx->desc[idx];
689 gve_tx_fill_mtd_desc(mtd_desc, skb);
690 }
691
692 if (hlen < len) {
693 /* For gso the rest of the linear portion of the skb needs to
694 * be in its own descriptor.
695 */
696 len -= hlen;
697 addr += hlen;
698 idx = (idx + 1) & tx->mask;
699 seg_desc = &tx->desc[idx];
700 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
701 skb_shinfo(skb)->gso_size,
702 skb_is_gso_v6(skb), is_gso, len, addr);
703 }
704
705 for (i = 0; i < shinfo->nr_frags; i++) {
706 const skb_frag_t *frag = &shinfo->frags[i];
707
708 idx = (idx + 1) & tx->mask;
709 seg_desc = &tx->desc[idx];
710 len = skb_frag_size(frag);
711 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
712 if (unlikely(dma_mapping_error(tx->dev, addr))) {
713 tx->dma_mapping_error++;
714 goto unmap_drop;
715 }
716 tx->info[idx].skb = NULL;
717 dma_unmap_len_set(&tx->info[idx], len, len);
718 dma_unmap_addr_set(&tx->info[idx], dma, addr);
719
720 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
721 skb_shinfo(skb)->gso_size,
722 skb_is_gso_v6(skb), is_gso, len, addr);
723 }
724
725 return num_descriptors;
726
727 unmap_drop:
728 i += num_descriptors - shinfo->nr_frags;
729 while (i--) {
730 /* Skip metadata descriptor, if set */
731 if (i == 1 && mtd_desc_nr == 1)
732 continue;
733 idx--;
734 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
735 }
736 drop:
737 tx->dropped_pkt++;
738 return 0;
739 }
740
gve_tx(struct sk_buff * skb,struct net_device * dev)741 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
742 {
743 struct gve_priv *priv = netdev_priv(dev);
744 struct gve_tx_ring *tx;
745 int nsegs;
746
747 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
748 "skb queue index out of range");
749 tx = &priv->tx[skb_get_queue_mapping(skb)];
750 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
751 /* We need to ring the txq doorbell -- we have stopped the Tx
752 * queue for want of resources, but prior calls to gve_tx()
753 * may have added descriptors without ringing the doorbell.
754 */
755
756 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
757 return NETDEV_TX_BUSY;
758 }
759 if (tx->raw_addressing)
760 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
761 else
762 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
763
764 /* If the packet is getting sent, we need to update the skb */
765 if (nsegs) {
766 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
767 skb_tx_timestamp(skb);
768 tx->req += nsegs;
769 } else {
770 dev_kfree_skb_any(skb);
771 }
772
773 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
774 return NETDEV_TX_OK;
775
776 /* Give packets to NIC. Even if this packet failed to send the doorbell
777 * might need to be rung because of xmit_more.
778 */
779 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
780 return NETDEV_TX_OK;
781 }
782
gve_tx_fill_xdp(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p,bool is_xsk)783 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
784 void *data, int len, void *frame_p, bool is_xsk)
785 {
786 int pad, nfrags, ndescs, iovi, offset;
787 struct gve_tx_buffer_state *info;
788 u32 reqi = tx->req;
789
790 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
791 if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
792 pad = 0;
793 info = &tx->info[reqi & tx->mask];
794 info->xdp_frame = frame_p;
795 info->xdp.size = len;
796 info->xdp.is_xsk = is_xsk;
797
798 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
799 &info->iov[0]);
800 iovi = pad > 0;
801 ndescs = nfrags - iovi;
802 offset = 0;
803
804 while (iovi < nfrags) {
805 if (!offset)
806 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
807 CHECKSUM_NONE, false, 0, ndescs,
808 info->iov[iovi].iov_len,
809 info->iov[iovi].iov_offset, len);
810 else
811 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
812 0, 0, false, false,
813 info->iov[iovi].iov_len,
814 info->iov[iovi].iov_offset);
815
816 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
817 data + offset, info->iov[iovi].iov_len);
818 gve_dma_sync_for_device(&priv->pdev->dev,
819 tx->tx_fifo.qpl->page_buses,
820 info->iov[iovi].iov_offset,
821 info->iov[iovi].iov_len);
822 offset += info->iov[iovi].iov_len;
823 iovi++;
824 reqi++;
825 }
826
827 return ndescs;
828 }
829
gve_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)830 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
831 u32 flags)
832 {
833 struct gve_priv *priv = netdev_priv(dev);
834 struct gve_tx_ring *tx;
835 int i, err = 0, qid;
836
837 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
838 return -EINVAL;
839
840 qid = gve_xdp_tx_queue_id(priv,
841 smp_processor_id() % priv->num_xdp_queues);
842
843 tx = &priv->tx[qid];
844
845 spin_lock(&tx->xdp_lock);
846 for (i = 0; i < n; i++) {
847 err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
848 frames[i]->len, frames[i]);
849 if (err)
850 break;
851 }
852
853 if (flags & XDP_XMIT_FLUSH)
854 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
855
856 spin_unlock(&tx->xdp_lock);
857
858 u64_stats_update_begin(&tx->statss);
859 tx->xdp_xmit += n;
860 tx->xdp_xmit_errors += n - i;
861 u64_stats_update_end(&tx->statss);
862
863 return i ? i : err;
864 }
865
gve_xdp_xmit_one(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p)866 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
867 void *data, int len, void *frame_p)
868 {
869 int nsegs;
870
871 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
872 return -EBUSY;
873
874 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
875 tx->req += nsegs;
876
877 return 0;
878 }
879
880 #define GVE_TX_START_THRESH 4096
881
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)882 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
883 u32 to_do, bool try_to_wake)
884 {
885 struct gve_tx_buffer_state *info;
886 u64 pkts = 0, bytes = 0;
887 size_t space_freed = 0;
888 struct sk_buff *skb;
889 u32 idx;
890 int j;
891
892 for (j = 0; j < to_do; j++) {
893 idx = tx->done & tx->mask;
894 netif_info(priv, tx_done, priv->dev,
895 "[%d] %s: idx=%d (req=%u done=%u)\n",
896 tx->q_num, __func__, idx, tx->req, tx->done);
897 info = &tx->info[idx];
898 skb = info->skb;
899
900 /* Unmap the buffer */
901 if (tx->raw_addressing)
902 gve_tx_unmap_buf(tx->dev, info);
903 tx->done++;
904 /* Mark as free */
905 if (skb) {
906 info->skb = NULL;
907 bytes += skb->len;
908 pkts++;
909 dev_consume_skb_any(skb);
910 if (tx->raw_addressing)
911 continue;
912 space_freed += gve_tx_clear_buffer_state(info);
913 }
914 }
915
916 if (!tx->raw_addressing)
917 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
918 u64_stats_update_begin(&tx->statss);
919 tx->bytes_done += bytes;
920 tx->pkt_done += pkts;
921 u64_stats_update_end(&tx->statss);
922 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
923
924 /* start the queue if we've stopped it */
925 #ifndef CONFIG_BQL
926 /* Make sure that the doorbells are synced */
927 smp_mb();
928 #endif
929 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
930 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
931 tx->wake_queue++;
932 netif_tx_wake_queue(tx->netdev_txq);
933 }
934
935 return pkts;
936 }
937
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)938 u32 gve_tx_load_event_counter(struct gve_priv *priv,
939 struct gve_tx_ring *tx)
940 {
941 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
942 __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
943
944 return be32_to_cpu(counter);
945 }
946
gve_xsk_tx(struct gve_priv * priv,struct gve_tx_ring * tx,int budget)947 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
948 int budget)
949 {
950 struct xdp_desc desc;
951 int sent = 0, nsegs;
952 void *data;
953
954 spin_lock(&tx->xdp_lock);
955 while (sent < budget) {
956 if (!gve_can_tx(tx, GVE_TX_START_THRESH))
957 goto out;
958
959 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
960 tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
961 goto out;
962 }
963
964 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
965 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
966 tx->req += nsegs;
967 sent++;
968 }
969 out:
970 if (sent > 0) {
971 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
972 xsk_tx_release(tx->xsk_pool);
973 }
974 spin_unlock(&tx->xdp_lock);
975 return sent;
976 }
977
gve_xdp_poll(struct gve_notify_block * block,int budget)978 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
979 {
980 struct gve_priv *priv = block->priv;
981 struct gve_tx_ring *tx = block->tx;
982 u32 nic_done;
983 bool repoll;
984 u32 to_do;
985
986 /* Find out how much work there is to be done */
987 nic_done = gve_tx_load_event_counter(priv, tx);
988 to_do = min_t(u32, (nic_done - tx->done), budget);
989 gve_clean_xdp_done(priv, tx, to_do);
990 repoll = nic_done != tx->done;
991
992 if (tx->xsk_pool) {
993 int sent = gve_xsk_tx(priv, tx, budget);
994
995 u64_stats_update_begin(&tx->statss);
996 tx->xdp_xsk_sent += sent;
997 u64_stats_update_end(&tx->statss);
998 repoll |= (sent == budget);
999 if (xsk_uses_need_wakeup(tx->xsk_pool))
1000 xsk_set_tx_need_wakeup(tx->xsk_pool);
1001 }
1002
1003 /* If we still have work we want to repoll */
1004 return repoll;
1005 }
1006
gve_tx_poll(struct gve_notify_block * block,int budget)1007 bool gve_tx_poll(struct gve_notify_block *block, int budget)
1008 {
1009 struct gve_priv *priv = block->priv;
1010 struct gve_tx_ring *tx = block->tx;
1011 u32 nic_done;
1012 u32 to_do;
1013
1014 /* If budget is 0, do all the work */
1015 if (budget == 0)
1016 budget = INT_MAX;
1017
1018 /* In TX path, it may try to clean completed pkts in order to xmit,
1019 * to avoid cleaning conflict, use spin_lock(), it yields better
1020 * concurrency between xmit/clean than netif's lock.
1021 */
1022 spin_lock(&tx->clean_lock);
1023 /* Find out how much work there is to be done */
1024 nic_done = gve_tx_load_event_counter(priv, tx);
1025 to_do = min_t(u32, (nic_done - tx->done), budget);
1026 gve_clean_tx_done(priv, tx, to_do, true);
1027 spin_unlock(&tx->clean_lock);
1028 /* If we still have work we want to repoll */
1029 return nic_done != tx->done;
1030 }
1031
gve_tx_clean_pending(struct gve_priv * priv,struct gve_tx_ring * tx)1032 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1033 {
1034 u32 nic_done = gve_tx_load_event_counter(priv, tx);
1035
1036 return nic_done != tx->done;
1037 }
1038