1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include "gve_utils.h"
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
14 #include <net/xdp_sock_drv.h>
15
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)16 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
17 struct gve_queue_resources *q_resources,
18 u32 val)
19 {
20 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
21 }
22
gve_xdp_tx_flush(struct gve_priv * priv,u32 xdp_qid)23 void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
24 {
25 u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
26 struct gve_tx_ring *tx = &priv->tx[tx_qid];
27
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
29 }
30
31 /* gvnic can only transmit from a Registered Segment.
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
34 *
35 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
36 * free allocations in the order they were allocated.
37 */
38
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)39 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
40 {
41 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
42 PAGE_KERNEL);
43 if (unlikely(!fifo->base)) {
44 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
45 fifo->qpl->id);
46 return -ENOMEM;
47 }
48
49 fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
50 atomic_set(&fifo->available, fifo->size);
51 fifo->head = 0;
52 return 0;
53 }
54
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)55 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
56 {
57 WARN(atomic_read(&fifo->available) != fifo->size,
58 "Releasing non-empty fifo");
59
60 vunmap(fifo->base);
61 }
62
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)63 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
64 size_t bytes)
65 {
66 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
67 }
68
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)69 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
70 {
71 return (atomic_read(&fifo->available) <= bytes) ? false : true;
72 }
73
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
75 * @fifo: FIFO to allocate from
76 * @bytes: Allocation size
77 * @iov: Scatter-gather elements to fill with allocation fragment base/len
78 *
79 * Returns number of valid elements in iov[] or negative on error.
80 *
81 * Allocations from a given FIFO must be externally synchronized but concurrent
82 * allocation and frees are allowed.
83 */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])84 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
85 struct gve_tx_iovec iov[2])
86 {
87 size_t overflow, padding;
88 u32 aligned_head;
89 int nfrags = 0;
90
91 if (!bytes)
92 return 0;
93
94 /* This check happens before we know how much padding is needed to
95 * align to a cacheline boundary for the payload, but that is fine,
96 * because the FIFO head always start aligned, and the FIFO's boundaries
97 * are aligned, so if there is space for the data, there is space for
98 * the padding to the next alignment.
99 */
100 WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
101 "Reached %s when there's not enough space in the fifo", __func__);
102
103 nfrags++;
104
105 iov[0].iov_offset = fifo->head;
106 iov[0].iov_len = bytes;
107 fifo->head += bytes;
108
109 if (fifo->head > fifo->size) {
110 /* If the allocation did not fit in the tail fragment of the
111 * FIFO, also use the head fragment.
112 */
113 nfrags++;
114 overflow = fifo->head - fifo->size;
115 iov[0].iov_len -= overflow;
116 iov[1].iov_offset = 0; /* Start of fifo*/
117 iov[1].iov_len = overflow;
118
119 fifo->head = overflow;
120 }
121
122 /* Re-align to a cacheline boundary */
123 aligned_head = L1_CACHE_ALIGN(fifo->head);
124 padding = aligned_head - fifo->head;
125 iov[nfrags - 1].iov_padding = padding;
126 atomic_sub(bytes + padding, &fifo->available);
127 fifo->head = aligned_head;
128
129 if (fifo->head == fifo->size)
130 fifo->head = 0;
131
132 return nfrags;
133 }
134
135 /* gve_tx_free_fifo - Return space to Tx FIFO
136 * @fifo: FIFO to return fragments to
137 * @bytes: Bytes to free
138 */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)139 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
140 {
141 atomic_add(bytes, &fifo->available);
142 }
143
gve_tx_clear_buffer_state(struct gve_tx_buffer_state * info)144 static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
145 {
146 size_t space_freed = 0;
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
150 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
151 info->iov[i].iov_len = 0;
152 info->iov[i].iov_padding = 0;
153 }
154 return space_freed;
155 }
156
gve_clean_xdp_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do)157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
158 u32 to_do)
159 {
160 struct gve_tx_buffer_state *info;
161 u64 pkts = 0, bytes = 0;
162 size_t space_freed = 0;
163 u32 xsk_complete = 0;
164 u32 idx;
165 int i;
166
167 for (i = 0; i < to_do; i++) {
168 idx = tx->done & tx->mask;
169 info = &tx->info[idx];
170 tx->done++;
171
172 if (unlikely(!info->xdp.size))
173 continue;
174
175 bytes += info->xdp.size;
176 pkts++;
177 xsk_complete += info->xdp.is_xsk;
178
179 info->xdp.size = 0;
180 if (info->xdp_frame) {
181 xdp_return_frame(info->xdp_frame);
182 info->xdp_frame = NULL;
183 }
184 space_freed += gve_tx_clear_buffer_state(info);
185 }
186
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
188 if (xsk_complete > 0 && tx->xsk_pool)
189 xsk_tx_completed(tx->xsk_pool, xsk_complete);
190 u64_stats_update_begin(&tx->statss);
191 tx->bytes_done += bytes;
192 tx->pkt_done += pkts;
193 u64_stats_update_end(&tx->statss);
194 return pkts;
195 }
196
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
198 u32 to_do, bool try_to_wake);
199
gve_tx_stop_ring_gqi(struct gve_priv * priv,int idx)200 void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
201 {
202 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
203 struct gve_tx_ring *tx = &priv->tx[idx];
204
205 if (!gve_tx_was_added_to_block(priv, idx))
206 return;
207
208 gve_remove_napi(priv, ntfy_idx);
209 if (tx->q_num < priv->tx_cfg.num_queues)
210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
211 else
212 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
213 netdev_tx_reset_queue(tx->netdev_txq);
214 gve_tx_remove_from_block(priv, idx);
215 }
216
gve_tx_free_ring_gqi(struct gve_priv * priv,struct gve_tx_ring * tx,struct gve_tx_alloc_rings_cfg * cfg)217 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
218 struct gve_tx_alloc_rings_cfg *cfg)
219 {
220 struct device *hdev = &priv->pdev->dev;
221 int idx = tx->q_num;
222 size_t bytes;
223 u32 qpl_id;
224 u32 slots;
225
226 slots = tx->mask + 1;
227 dma_free_coherent(hdev, sizeof(*tx->q_resources),
228 tx->q_resources, tx->q_resources_bus);
229 tx->q_resources = NULL;
230
231 if (tx->tx_fifo.qpl) {
232 if (tx->tx_fifo.base)
233 gve_tx_fifo_release(priv, &tx->tx_fifo);
234
235 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
236 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
237 tx->tx_fifo.qpl = NULL;
238 }
239
240 bytes = sizeof(*tx->desc) * slots;
241 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
242 tx->desc = NULL;
243
244 vfree(tx->info);
245 tx->info = NULL;
246
247 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
248 }
249
gve_tx_start_ring_gqi(struct gve_priv * priv,int idx)250 void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
251 {
252 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
253 struct gve_tx_ring *tx = &priv->tx[idx];
254
255 gve_tx_add_to_block(priv, idx);
256
257 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
258 gve_add_napi(priv, ntfy_idx, gve_napi_poll);
259 }
260
gve_tx_alloc_ring_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg,struct gve_tx_ring * tx,int idx)261 static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
262 struct gve_tx_alloc_rings_cfg *cfg,
263 struct gve_tx_ring *tx,
264 int idx)
265 {
266 struct device *hdev = &priv->pdev->dev;
267 int qpl_page_cnt;
268 u32 qpl_id = 0;
269 size_t bytes;
270
271 /* Make sure everything is zeroed to start */
272 memset(tx, 0, sizeof(*tx));
273 spin_lock_init(&tx->clean_lock);
274 spin_lock_init(&tx->xdp_lock);
275 tx->q_num = idx;
276
277 tx->mask = cfg->ring_size - 1;
278
279 /* alloc metadata */
280 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
281 if (!tx->info)
282 return -ENOMEM;
283
284 /* alloc tx queue */
285 bytes = sizeof(*tx->desc) * cfg->ring_size;
286 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
287 if (!tx->desc)
288 goto abort_with_info;
289
290 tx->raw_addressing = cfg->raw_addressing;
291 tx->dev = hdev;
292 if (!tx->raw_addressing) {
293 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
294 qpl_page_cnt = priv->tx_pages_per_qpl;
295
296 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
297 qpl_page_cnt);
298 if (!tx->tx_fifo.qpl)
299 goto abort_with_desc;
300
301 /* map Tx FIFO */
302 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
303 goto abort_with_qpl;
304 }
305
306 tx->q_resources =
307 dma_alloc_coherent(hdev,
308 sizeof(*tx->q_resources),
309 &tx->q_resources_bus,
310 GFP_KERNEL);
311 if (!tx->q_resources)
312 goto abort_with_fifo;
313
314 return 0;
315
316 abort_with_fifo:
317 if (!tx->raw_addressing)
318 gve_tx_fifo_release(priv, &tx->tx_fifo);
319 abort_with_qpl:
320 if (!tx->raw_addressing) {
321 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
322 tx->tx_fifo.qpl = NULL;
323 }
324 abort_with_desc:
325 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
326 tx->desc = NULL;
327 abort_with_info:
328 vfree(tx->info);
329 tx->info = NULL;
330 return -ENOMEM;
331 }
332
gve_tx_alloc_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)333 int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
334 struct gve_tx_alloc_rings_cfg *cfg)
335 {
336 struct gve_tx_ring *tx = cfg->tx;
337 int err = 0;
338 int i, j;
339
340 if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
341 netif_err(priv, drv, priv->dev,
342 "Cannot alloc more than the max num of Tx rings\n");
343 return -EINVAL;
344 }
345
346 if (cfg->start_idx == 0) {
347 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
348 GFP_KERNEL);
349 if (!tx)
350 return -ENOMEM;
351 } else if (!tx) {
352 netif_err(priv, drv, priv->dev,
353 "Cannot alloc tx rings from a nonzero start idx without tx array\n");
354 return -EINVAL;
355 }
356
357 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
358 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
359 if (err) {
360 netif_err(priv, drv, priv->dev,
361 "Failed to alloc tx ring=%d: err=%d\n",
362 i, err);
363 goto cleanup;
364 }
365 }
366
367 cfg->tx = tx;
368 return 0;
369
370 cleanup:
371 for (j = 0; j < i; j++)
372 gve_tx_free_ring_gqi(priv, &tx[j], cfg);
373 if (cfg->start_idx == 0)
374 kvfree(tx);
375 return err;
376 }
377
gve_tx_free_rings_gqi(struct gve_priv * priv,struct gve_tx_alloc_rings_cfg * cfg)378 void gve_tx_free_rings_gqi(struct gve_priv *priv,
379 struct gve_tx_alloc_rings_cfg *cfg)
380 {
381 struct gve_tx_ring *tx = cfg->tx;
382 int i;
383
384 if (!tx)
385 return;
386
387 for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
388 gve_tx_free_ring_gqi(priv, &tx[i], cfg);
389
390 if (cfg->start_idx == 0) {
391 kvfree(tx);
392 cfg->tx = NULL;
393 }
394 }
395
396 /* gve_tx_avail - Calculates the number of slots available in the ring
397 * @tx: tx ring to check
398 *
399 * Returns the number of slots available
400 *
401 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
402 **/
gve_tx_avail(struct gve_tx_ring * tx)403 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
404 {
405 return tx->mask + 1 - (tx->req - tx->done);
406 }
407
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)408 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
409 struct sk_buff *skb)
410 {
411 int pad_bytes, align_hdr_pad;
412 int bytes;
413 int hlen;
414
415 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
416 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
417
418 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
419 hlen);
420 /* We need to take into account the header alignment padding. */
421 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
422 bytes = align_hdr_pad + pad_bytes + skb->len;
423
424 return bytes;
425 }
426
427 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
428 * 1 for each skb frag
429 * 1 for the skb linear portion
430 * 1 for when tcp hdr needs to be in separate descriptor
431 * 1 if the payload wraps to the beginning of the FIFO
432 * 1 for metadata descriptor
433 */
434 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
gve_tx_unmap_buf(struct device * dev,struct gve_tx_buffer_state * info)435 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
436 {
437 if (info->skb) {
438 dma_unmap_single(dev, dma_unmap_addr(info, dma),
439 dma_unmap_len(info, len),
440 DMA_TO_DEVICE);
441 dma_unmap_len_set(info, len, 0);
442 } else {
443 dma_unmap_page(dev, dma_unmap_addr(info, dma),
444 dma_unmap_len(info, len),
445 DMA_TO_DEVICE);
446 dma_unmap_len_set(info, len, 0);
447 }
448 }
449
450 /* Check if sufficient resources (descriptor ring space, FIFO space) are
451 * available to transmit the given number of bytes.
452 */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)453 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
454 {
455 bool can_alloc = true;
456
457 if (!tx->raw_addressing)
458 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
459
460 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
461 }
462
463 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
464
465 /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)466 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
467 struct sk_buff *skb)
468 {
469 int bytes_required = 0;
470 u32 nic_done;
471 u32 to_do;
472 int ret;
473
474 if (!tx->raw_addressing)
475 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
476
477 if (likely(gve_can_tx(tx, bytes_required)))
478 return 0;
479
480 ret = -EBUSY;
481 spin_lock(&tx->clean_lock);
482 nic_done = gve_tx_load_event_counter(priv, tx);
483 to_do = nic_done - tx->done;
484
485 /* Only try to clean if there is hope for TX */
486 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
487 if (to_do > 0) {
488 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
489 gve_clean_tx_done(priv, tx, to_do, false);
490 }
491 if (likely(gve_can_tx(tx, bytes_required)))
492 ret = 0;
493 }
494 if (ret) {
495 /* No space, so stop the queue */
496 tx->stop_queue++;
497 netif_tx_stop_queue(tx->netdev_txq);
498 }
499 spin_unlock(&tx->clean_lock);
500
501 return ret;
502 }
503
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,u16 csum_offset,u8 ip_summed,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr,u16 pkt_len)504 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
505 u16 csum_offset, u8 ip_summed, bool is_gso,
506 int l4_hdr_offset, u32 desc_cnt,
507 u16 hlen, u64 addr, u16 pkt_len)
508 {
509 /* l4_hdr_offset and csum_offset are in units of 16-bit words */
510 if (is_gso) {
511 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
512 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
513 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
514 } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
515 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
516 pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
517 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
518 } else {
519 pkt_desc->pkt.type_flags = GVE_TXD_STD;
520 pkt_desc->pkt.l4_csum_offset = 0;
521 pkt_desc->pkt.l4_hdr_offset = 0;
522 }
523 pkt_desc->pkt.desc_cnt = desc_cnt;
524 pkt_desc->pkt.len = cpu_to_be16(pkt_len);
525 pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
526 pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
527 }
528
gve_tx_fill_mtd_desc(union gve_tx_desc * mtd_desc,struct sk_buff * skb)529 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
530 struct sk_buff *skb)
531 {
532 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
533
534 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
535 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
536 GVE_MTD_PATH_HASH_L4;
537 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
538 mtd_desc->mtd.reserved0 = 0;
539 mtd_desc->mtd.reserved1 = 0;
540 }
541
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,u16 l3_offset,u16 gso_size,bool is_gso_v6,bool is_gso,u16 len,u64 addr)542 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
543 u16 l3_offset, u16 gso_size,
544 bool is_gso_v6, bool is_gso,
545 u16 len, u64 addr)
546 {
547 seg_desc->seg.type_flags = GVE_TXD_SEG;
548 if (is_gso) {
549 if (is_gso_v6)
550 seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
551 seg_desc->seg.l3_offset = l3_offset >> 1;
552 seg_desc->seg.mss = cpu_to_be16(gso_size);
553 }
554 seg_desc->seg.seg_len = cpu_to_be16(len);
555 seg_desc->seg.seg_addr = cpu_to_be64(addr);
556 }
557
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)558 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
559 u64 iov_offset, u64 iov_len)
560 {
561 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
562 u64 first_page = iov_offset / PAGE_SIZE;
563 u64 page;
564
565 for (page = first_page; page <= last_page; page++)
566 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
567 }
568
gve_tx_add_skb_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)569 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
570 {
571 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
572 union gve_tx_desc *pkt_desc, *seg_desc;
573 struct gve_tx_buffer_state *info;
574 int mtd_desc_nr = !!skb->l4_hash;
575 bool is_gso = skb_is_gso(skb);
576 u32 idx = tx->req & tx->mask;
577 int payload_iov = 2;
578 int copy_offset;
579 u32 next_idx;
580 int i;
581
582 info = &tx->info[idx];
583 pkt_desc = &tx->desc[idx];
584
585 l4_hdr_offset = skb_checksum_start_offset(skb);
586 /* If the skb is gso, then we want the tcp header alone in the first segment
587 * otherwise we want the minimum required by the gVNIC spec.
588 */
589 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
590 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
591
592 info->skb = skb;
593 /* We don't want to split the header, so if necessary, pad to the end
594 * of the fifo and then put the header at the beginning of the fifo.
595 */
596 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
597 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
598 &info->iov[0]);
599 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
600 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
601 &info->iov[payload_iov]);
602
603 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
604 is_gso, l4_hdr_offset,
605 1 + mtd_desc_nr + payload_nfrags, hlen,
606 info->iov[hdr_nfrags - 1].iov_offset, skb->len);
607
608 skb_copy_bits(skb, 0,
609 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
610 hlen);
611 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
612 info->iov[hdr_nfrags - 1].iov_offset,
613 info->iov[hdr_nfrags - 1].iov_len);
614 copy_offset = hlen;
615
616 if (mtd_desc_nr) {
617 next_idx = (tx->req + 1) & tx->mask;
618 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
619 }
620
621 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
622 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
623 seg_desc = &tx->desc[next_idx];
624
625 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
626 skb_shinfo(skb)->gso_size,
627 skb_is_gso_v6(skb), is_gso,
628 info->iov[i].iov_len,
629 info->iov[i].iov_offset);
630
631 skb_copy_bits(skb, copy_offset,
632 tx->tx_fifo.base + info->iov[i].iov_offset,
633 info->iov[i].iov_len);
634 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
635 info->iov[i].iov_offset,
636 info->iov[i].iov_len);
637 copy_offset += info->iov[i].iov_len;
638 }
639
640 return 1 + mtd_desc_nr + payload_nfrags;
641 }
642
gve_tx_add_skb_no_copy(struct gve_priv * priv,struct gve_tx_ring * tx,struct sk_buff * skb)643 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
644 struct sk_buff *skb)
645 {
646 const struct skb_shared_info *shinfo = skb_shinfo(skb);
647 int hlen, num_descriptors, l4_hdr_offset;
648 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
649 struct gve_tx_buffer_state *info;
650 int mtd_desc_nr = !!skb->l4_hash;
651 bool is_gso = skb_is_gso(skb);
652 u32 idx = tx->req & tx->mask;
653 u64 addr;
654 u32 len;
655 int i;
656
657 info = &tx->info[idx];
658 pkt_desc = &tx->desc[idx];
659
660 l4_hdr_offset = skb_checksum_start_offset(skb);
661 /* If the skb is gso, then we want only up to the tcp header in the first segment
662 * to efficiently replicate on each segment otherwise we want the linear portion
663 * of the skb (which will contain the checksum because skb->csum_start and
664 * skb->csum_offset are given relative to skb->head) in the first segment.
665 */
666 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
667 len = skb_headlen(skb);
668
669 info->skb = skb;
670
671 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
672 if (unlikely(dma_mapping_error(tx->dev, addr))) {
673 tx->dma_mapping_error++;
674 goto drop;
675 }
676 dma_unmap_len_set(info, len, len);
677 dma_unmap_addr_set(info, dma, addr);
678
679 num_descriptors = 1 + shinfo->nr_frags;
680 if (hlen < len)
681 num_descriptors++;
682 if (mtd_desc_nr)
683 num_descriptors++;
684
685 gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
686 is_gso, l4_hdr_offset,
687 num_descriptors, hlen, addr, skb->len);
688
689 if (mtd_desc_nr) {
690 idx = (idx + 1) & tx->mask;
691 mtd_desc = &tx->desc[idx];
692 gve_tx_fill_mtd_desc(mtd_desc, skb);
693 }
694
695 if (hlen < len) {
696 /* For gso the rest of the linear portion of the skb needs to
697 * be in its own descriptor.
698 */
699 len -= hlen;
700 addr += hlen;
701 idx = (idx + 1) & tx->mask;
702 seg_desc = &tx->desc[idx];
703 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
704 skb_shinfo(skb)->gso_size,
705 skb_is_gso_v6(skb), is_gso, len, addr);
706 }
707
708 for (i = 0; i < shinfo->nr_frags; i++) {
709 const skb_frag_t *frag = &shinfo->frags[i];
710
711 idx = (idx + 1) & tx->mask;
712 seg_desc = &tx->desc[idx];
713 len = skb_frag_size(frag);
714 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
715 if (unlikely(dma_mapping_error(tx->dev, addr))) {
716 tx->dma_mapping_error++;
717 goto unmap_drop;
718 }
719 tx->info[idx].skb = NULL;
720 dma_unmap_len_set(&tx->info[idx], len, len);
721 dma_unmap_addr_set(&tx->info[idx], dma, addr);
722
723 gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
724 skb_shinfo(skb)->gso_size,
725 skb_is_gso_v6(skb), is_gso, len, addr);
726 }
727
728 return num_descriptors;
729
730 unmap_drop:
731 i += num_descriptors - shinfo->nr_frags;
732 while (i--) {
733 /* Skip metadata descriptor, if set */
734 if (i == 1 && mtd_desc_nr == 1)
735 continue;
736 idx--;
737 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
738 }
739 drop:
740 tx->dropped_pkt++;
741 return 0;
742 }
743
gve_tx(struct sk_buff * skb,struct net_device * dev)744 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
745 {
746 struct gve_priv *priv = netdev_priv(dev);
747 struct gve_tx_ring *tx;
748 int nsegs;
749
750 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
751 "skb queue index out of range");
752 tx = &priv->tx[skb_get_queue_mapping(skb)];
753 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
754 /* We need to ring the txq doorbell -- we have stopped the Tx
755 * queue for want of resources, but prior calls to gve_tx()
756 * may have added descriptors without ringing the doorbell.
757 */
758
759 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
760 return NETDEV_TX_BUSY;
761 }
762 if (tx->raw_addressing)
763 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
764 else
765 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
766
767 /* If the packet is getting sent, we need to update the skb */
768 if (nsegs) {
769 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
770 skb_tx_timestamp(skb);
771 tx->req += nsegs;
772 } else {
773 dev_kfree_skb_any(skb);
774 }
775
776 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
777 return NETDEV_TX_OK;
778
779 /* Give packets to NIC. Even if this packet failed to send the doorbell
780 * might need to be rung because of xmit_more.
781 */
782 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
783 return NETDEV_TX_OK;
784 }
785
gve_tx_fill_xdp(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p,bool is_xsk)786 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
787 void *data, int len, void *frame_p, bool is_xsk)
788 {
789 int pad, nfrags, ndescs, iovi, offset;
790 struct gve_tx_buffer_state *info;
791 u32 reqi = tx->req;
792
793 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
794 if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
795 pad = 0;
796 info = &tx->info[reqi & tx->mask];
797 info->xdp_frame = frame_p;
798 info->xdp.size = len;
799 info->xdp.is_xsk = is_xsk;
800
801 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
802 &info->iov[0]);
803 iovi = pad > 0;
804 ndescs = nfrags - iovi;
805 offset = 0;
806
807 while (iovi < nfrags) {
808 if (!offset)
809 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
810 CHECKSUM_NONE, false, 0, ndescs,
811 info->iov[iovi].iov_len,
812 info->iov[iovi].iov_offset, len);
813 else
814 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
815 0, 0, false, false,
816 info->iov[iovi].iov_len,
817 info->iov[iovi].iov_offset);
818
819 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
820 data + offset, info->iov[iovi].iov_len);
821 gve_dma_sync_for_device(&priv->pdev->dev,
822 tx->tx_fifo.qpl->page_buses,
823 info->iov[iovi].iov_offset,
824 info->iov[iovi].iov_len);
825 offset += info->iov[iovi].iov_len;
826 iovi++;
827 reqi++;
828 }
829
830 return ndescs;
831 }
832
gve_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)833 int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
834 u32 flags)
835 {
836 struct gve_priv *priv = netdev_priv(dev);
837 struct gve_tx_ring *tx;
838 int i, err = 0, qid;
839
840 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
841 return -EINVAL;
842
843 if (!gve_get_napi_enabled(priv))
844 return -ENETDOWN;
845
846 qid = gve_xdp_tx_queue_id(priv,
847 smp_processor_id() % priv->num_xdp_queues);
848
849 tx = &priv->tx[qid];
850
851 spin_lock(&tx->xdp_lock);
852 for (i = 0; i < n; i++) {
853 err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
854 frames[i]->len, frames[i]);
855 if (err)
856 break;
857 }
858
859 if (flags & XDP_XMIT_FLUSH)
860 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
861
862 spin_unlock(&tx->xdp_lock);
863
864 u64_stats_update_begin(&tx->statss);
865 tx->xdp_xmit += n;
866 tx->xdp_xmit_errors += n - i;
867 u64_stats_update_end(&tx->statss);
868
869 return i ? i : err;
870 }
871
gve_xdp_xmit_one(struct gve_priv * priv,struct gve_tx_ring * tx,void * data,int len,void * frame_p)872 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
873 void *data, int len, void *frame_p)
874 {
875 int nsegs;
876
877 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
878 return -EBUSY;
879
880 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
881 tx->req += nsegs;
882
883 return 0;
884 }
885
886 #define GVE_TX_START_THRESH 4096
887
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)888 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
889 u32 to_do, bool try_to_wake)
890 {
891 struct gve_tx_buffer_state *info;
892 u64 pkts = 0, bytes = 0;
893 size_t space_freed = 0;
894 struct sk_buff *skb;
895 u32 idx;
896 int j;
897
898 for (j = 0; j < to_do; j++) {
899 idx = tx->done & tx->mask;
900 netif_info(priv, tx_done, priv->dev,
901 "[%d] %s: idx=%d (req=%u done=%u)\n",
902 tx->q_num, __func__, idx, tx->req, tx->done);
903 info = &tx->info[idx];
904 skb = info->skb;
905
906 /* Unmap the buffer */
907 if (tx->raw_addressing)
908 gve_tx_unmap_buf(tx->dev, info);
909 tx->done++;
910 /* Mark as free */
911 if (skb) {
912 info->skb = NULL;
913 bytes += skb->len;
914 pkts++;
915 dev_consume_skb_any(skb);
916 if (tx->raw_addressing)
917 continue;
918 space_freed += gve_tx_clear_buffer_state(info);
919 }
920 }
921
922 if (!tx->raw_addressing)
923 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
924 u64_stats_update_begin(&tx->statss);
925 tx->bytes_done += bytes;
926 tx->pkt_done += pkts;
927 u64_stats_update_end(&tx->statss);
928 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
929
930 /* start the queue if we've stopped it */
931 #ifndef CONFIG_BQL
932 /* Make sure that the doorbells are synced */
933 smp_mb();
934 #endif
935 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
936 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
937 tx->wake_queue++;
938 netif_tx_wake_queue(tx->netdev_txq);
939 }
940
941 return pkts;
942 }
943
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)944 u32 gve_tx_load_event_counter(struct gve_priv *priv,
945 struct gve_tx_ring *tx)
946 {
947 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
948 __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
949
950 return be32_to_cpu(counter);
951 }
952
gve_xsk_tx(struct gve_priv * priv,struct gve_tx_ring * tx,int budget)953 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
954 int budget)
955 {
956 struct xdp_desc desc;
957 int sent = 0, nsegs;
958 void *data;
959
960 spin_lock(&tx->xdp_lock);
961 while (sent < budget) {
962 if (!gve_can_tx(tx, GVE_TX_START_THRESH))
963 goto out;
964
965 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
966 tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
967 goto out;
968 }
969
970 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
971 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
972 tx->req += nsegs;
973 sent++;
974 }
975 out:
976 if (sent > 0) {
977 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
978 xsk_tx_release(tx->xsk_pool);
979 }
980 spin_unlock(&tx->xdp_lock);
981 return sent;
982 }
983
gve_xsk_tx_poll(struct gve_notify_block * rx_block,int budget)984 int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
985 {
986 struct gve_rx_ring *rx = rx_block->rx;
987 struct gve_priv *priv = rx->gve;
988 struct gve_tx_ring *tx;
989 int sent = 0;
990
991 tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
992 if (tx->xsk_pool) {
993 sent = gve_xsk_tx(priv, tx, budget);
994
995 u64_stats_update_begin(&tx->statss);
996 tx->xdp_xsk_sent += sent;
997 u64_stats_update_end(&tx->statss);
998 if (xsk_uses_need_wakeup(tx->xsk_pool))
999 xsk_set_tx_need_wakeup(tx->xsk_pool);
1000 }
1001
1002 return sent;
1003 }
1004
gve_xdp_poll(struct gve_notify_block * block,int budget)1005 bool gve_xdp_poll(struct gve_notify_block *block, int budget)
1006 {
1007 struct gve_priv *priv = block->priv;
1008 struct gve_tx_ring *tx = block->tx;
1009 u32 nic_done;
1010 u32 to_do;
1011
1012 /* Find out how much work there is to be done */
1013 nic_done = gve_tx_load_event_counter(priv, tx);
1014 to_do = min_t(u32, (nic_done - tx->done), budget);
1015 gve_clean_xdp_done(priv, tx, to_do);
1016
1017 /* If we still have work we want to repoll */
1018 return nic_done != tx->done;
1019 }
1020
gve_tx_poll(struct gve_notify_block * block,int budget)1021 bool gve_tx_poll(struct gve_notify_block *block, int budget)
1022 {
1023 struct gve_priv *priv = block->priv;
1024 struct gve_tx_ring *tx = block->tx;
1025 u32 nic_done;
1026 u32 to_do;
1027
1028 /* If budget is 0, do all the work */
1029 if (budget == 0)
1030 budget = INT_MAX;
1031
1032 /* In TX path, it may try to clean completed pkts in order to xmit,
1033 * to avoid cleaning conflict, use spin_lock(), it yields better
1034 * concurrency between xmit/clean than netif's lock.
1035 */
1036 spin_lock(&tx->clean_lock);
1037 /* Find out how much work there is to be done */
1038 nic_done = gve_tx_load_event_counter(priv, tx);
1039 to_do = min_t(u32, (nic_done - tx->done), budget);
1040 gve_clean_tx_done(priv, tx, to_do, true);
1041 spin_unlock(&tx->clean_lock);
1042 /* If we still have work we want to repoll */
1043 return nic_done != tx->done;
1044 }
1045
gve_tx_clean_pending(struct gve_priv * priv,struct gve_tx_ring * tx)1046 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1047 {
1048 u32 nic_done = gve_tx_load_event_counter(priv, tx);
1049
1050 return nic_done != tx->done;
1051 }
1052