1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #include "idpf.h"
5 #include "idpf_ptp.h"
6 #include "idpf_virtchnl.h"
7 #include "xdp.h"
8 #include "xsk.h"
9
10 #define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
11 LIBETH_SQE_CHECK_PRIV(u32);
12
13 /**
14 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
15 * @skb: send buffer
16 * @max_bufs: maximum scatter gather buffers for single packet
17 * @count: number of buffers this packet needs
18 *
19 * Make sure we don't exceed maximum scatter gather buffers for a single
20 * packet.
21 * TSO case has been handled earlier from idpf_features_check().
22 *
23 * Return: %true if skb exceeds max descriptors per packet, %false otherwise.
24 */
idpf_chk_linearize(const struct sk_buff * skb,unsigned int max_bufs,unsigned int count)25 static bool idpf_chk_linearize(const struct sk_buff *skb,
26 unsigned int max_bufs,
27 unsigned int count)
28 {
29 if (likely(count <= max_bufs))
30 return false;
31
32 if (skb_is_gso(skb))
33 return false;
34
35 return true;
36 }
37
38 /**
39 * idpf_tx_timeout - Respond to a Tx Hang
40 * @netdev: network interface device structure
41 * @txqueue: TX queue
42 */
idpf_tx_timeout(struct net_device * netdev,unsigned int txqueue)43 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
44 {
45 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
46
47 adapter->tx_timeout_count++;
48
49 netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
50 adapter->tx_timeout_count, txqueue);
51 if (!idpf_is_reset_in_prog(adapter)) {
52 set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
53 queue_delayed_work(adapter->vc_event_wq,
54 &adapter->vc_event_task,
55 msecs_to_jiffies(10));
56 }
57 }
58
idpf_tx_buf_clean(struct idpf_tx_queue * txq)59 static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
60 {
61 struct libeth_sq_napi_stats ss = { };
62 struct xdp_frame_bulk bq;
63 struct libeth_cq_pp cp = {
64 .dev = txq->dev,
65 .bq = &bq,
66 .ss = &ss,
67 };
68
69 xdp_frame_bulk_init(&bq);
70
71 /* Free all the Tx buffer sk_buffs */
72 for (u32 i = 0; i < txq->buf_pool_size; i++)
73 libeth_tx_complete_any(&txq->tx_buf[i], &cp);
74
75 xdp_flush_frame_bulk(&bq);
76 }
77
78 /**
79 * idpf_tx_buf_rel_all - Free any empty Tx buffers
80 * @txq: queue to be cleaned
81 */
idpf_tx_buf_rel_all(struct idpf_tx_queue * txq)82 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
83 {
84 /* Buffers already cleared, nothing to do */
85 if (!txq->tx_buf)
86 return;
87
88 if (idpf_queue_has(XSK, txq))
89 idpf_xsksq_clean(txq);
90 else
91 idpf_tx_buf_clean(txq);
92
93 kfree(txq->tx_buf);
94 txq->tx_buf = NULL;
95 }
96
97 /**
98 * idpf_tx_desc_rel - Free Tx resources per queue
99 * @txq: Tx descriptor ring for a specific queue
100 *
101 * Free all transmit software resources
102 */
idpf_tx_desc_rel(struct idpf_tx_queue * txq)103 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
104 {
105 bool xdp = idpf_queue_has(XDP, txq);
106
107 if (xdp)
108 libeth_xdpsq_deinit_timer(txq->timer);
109
110 idpf_tx_buf_rel_all(txq);
111
112 if (!xdp)
113 netdev_tx_reset_subqueue(txq->netdev, txq->idx);
114
115 idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
116
117 if (!txq->desc_ring)
118 return;
119
120 if (!xdp && txq->refillq)
121 kfree(txq->refillq->ring);
122
123 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
124 txq->desc_ring = NULL;
125 txq->next_to_use = 0;
126 txq->next_to_clean = 0;
127 }
128
129 /**
130 * idpf_compl_desc_rel - Free completion resources per queue
131 * @complq: completion queue
132 *
133 * Free all completion software resources.
134 */
idpf_compl_desc_rel(struct idpf_compl_queue * complq)135 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
136 {
137 idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
138
139 if (!complq->desc_ring)
140 return;
141
142 dma_free_coherent(complq->netdev->dev.parent, complq->size,
143 complq->desc_ring, complq->dma);
144 complq->desc_ring = NULL;
145 complq->next_to_use = 0;
146 complq->next_to_clean = 0;
147 }
148
149 /**
150 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
151 * @rsrc: pointer to queue and vector resources
152 *
153 * Free all transmit software resources
154 */
idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc * rsrc)155 static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
156 {
157 if (!rsrc->txq_grps)
158 return;
159
160 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
161 struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
162
163 for (unsigned int j = 0; j < txq_grp->num_txq; j++)
164 idpf_tx_desc_rel(txq_grp->txqs[j]);
165
166 if (idpf_is_queue_model_split(rsrc->txq_model))
167 idpf_compl_desc_rel(txq_grp->complq);
168 }
169 }
170
171 /**
172 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
173 * @tx_q: queue for which the buffers are allocated
174 *
175 * Return: 0 on success, negative on failure
176 */
idpf_tx_buf_alloc_all(struct idpf_tx_queue * tx_q)177 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
178 {
179 /* Allocate book keeping buffers only. Buffers to be supplied to HW
180 * are allocated by kernel network stack and received as part of skb
181 */
182 if (idpf_queue_has(FLOW_SCH_EN, tx_q))
183 tx_q->buf_pool_size = U16_MAX;
184 else
185 tx_q->buf_pool_size = tx_q->desc_count;
186 tx_q->tx_buf = kzalloc_objs(*tx_q->tx_buf, tx_q->buf_pool_size);
187 if (!tx_q->tx_buf)
188 return -ENOMEM;
189
190 return 0;
191 }
192
193 /**
194 * idpf_tx_desc_alloc - Allocate the Tx descriptors
195 * @vport: vport to allocate resources for
196 * @tx_q: the tx ring to set up
197 *
198 * Return: 0 on success, negative on failure
199 */
idpf_tx_desc_alloc(const struct idpf_vport * vport,struct idpf_tx_queue * tx_q)200 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
201 struct idpf_tx_queue *tx_q)
202 {
203 struct device *dev = tx_q->dev;
204 struct idpf_sw_queue *refillq;
205 int err;
206
207 err = idpf_tx_buf_alloc_all(tx_q);
208 if (err)
209 goto err_alloc;
210
211 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
212
213 /* Allocate descriptors also round up to nearest 4K */
214 tx_q->size = ALIGN(tx_q->size, 4096);
215 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
216 GFP_KERNEL);
217 if (!tx_q->desc_ring) {
218 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
219 tx_q->size);
220 err = -ENOMEM;
221 goto err_alloc;
222 }
223
224 tx_q->next_to_use = 0;
225 tx_q->next_to_clean = 0;
226 idpf_queue_set(GEN_CHK, tx_q);
227
228 idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
229
230 if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
231 return 0;
232
233 refillq = tx_q->refillq;
234 refillq->desc_count = tx_q->buf_pool_size;
235 refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
236 GFP_KERNEL);
237 if (!refillq->ring) {
238 err = -ENOMEM;
239 goto err_alloc;
240 }
241
242 for (unsigned int i = 0; i < refillq->desc_count; i++)
243 refillq->ring[i] =
244 FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
245 FIELD_PREP(IDPF_RFL_BI_GEN_M,
246 idpf_queue_has(GEN_CHK, refillq));
247
248 /* Go ahead and flip the GEN bit since this counts as filling
249 * up the ring, i.e. we already ring wrapped.
250 */
251 idpf_queue_change(GEN_CHK, refillq);
252
253 tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
254
255 return 0;
256
257 err_alloc:
258 idpf_tx_desc_rel(tx_q);
259
260 return err;
261 }
262
263 /**
264 * idpf_compl_desc_alloc - allocate completion descriptors
265 * @vport: virtual port private structure
266 * @complq: completion queue to set up
267 *
268 * Return: 0 on success, -errno on failure.
269 */
idpf_compl_desc_alloc(const struct idpf_vport * vport,struct idpf_compl_queue * complq)270 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
271 struct idpf_compl_queue *complq)
272 {
273 u32 desc_size;
274
275 desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
276 sizeof(*complq->comp) : sizeof(*complq->comp_4b);
277 complq->size = array_size(complq->desc_count, desc_size);
278
279 complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
280 complq->size, &complq->dma,
281 GFP_KERNEL);
282 if (!complq->desc_ring)
283 return -ENOMEM;
284
285 complq->next_to_use = 0;
286 complq->next_to_clean = 0;
287 idpf_queue_set(GEN_CHK, complq);
288
289 idpf_xsk_setup_queue(vport, complq,
290 VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
291
292 return 0;
293 }
294
295 /**
296 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
297 * @vport: virtual port private structure
298 * @rsrc: pointer to queue and vector resources
299 *
300 * Return: 0 on success, negative on failure
301 */
idpf_tx_desc_alloc_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)302 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
303 struct idpf_q_vec_rsrc *rsrc)
304 {
305 int err = 0;
306
307 /* Setup buffer queues. In single queue model buffer queues and
308 * completion queues will be same
309 */
310 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
311 for (unsigned int j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
312 struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
313
314 err = idpf_tx_desc_alloc(vport, txq);
315 if (err) {
316 pci_err(vport->adapter->pdev,
317 "Allocation for Tx Queue %u failed\n",
318 i);
319 goto err_out;
320 }
321 }
322
323 if (!idpf_is_queue_model_split(rsrc->txq_model))
324 continue;
325
326 /* Setup completion queues */
327 err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
328 if (err) {
329 pci_err(vport->adapter->pdev,
330 "Allocation for Tx Completion Queue %u failed\n",
331 i);
332 goto err_out;
333 }
334 }
335
336 err_out:
337 if (err)
338 idpf_tx_desc_rel_all(rsrc);
339
340 return err;
341 }
342
343 /**
344 * idpf_rx_page_rel - Release an rx buffer page
345 * @rx_buf: the buffer to free
346 */
idpf_rx_page_rel(struct libeth_fqe * rx_buf)347 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
348 {
349 if (unlikely(!rx_buf->netmem))
350 return;
351
352 libeth_rx_recycle_slow(rx_buf->netmem);
353
354 rx_buf->netmem = 0;
355 rx_buf->offset = 0;
356 }
357
358 /**
359 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
360 * @bufq: queue to use
361 */
idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue * bufq)362 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
363 {
364 struct libeth_fq fq = {
365 .fqes = bufq->hdr_buf,
366 .pp = bufq->hdr_pp,
367 };
368
369 for (u32 i = 0; i < bufq->desc_count; i++)
370 idpf_rx_page_rel(&bufq->hdr_buf[i]);
371
372 libeth_rx_fq_destroy(&fq);
373 bufq->hdr_buf = NULL;
374 bufq->hdr_pp = NULL;
375 }
376
377 /**
378 * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
379 * @bufq: queue to be cleaned
380 */
idpf_rx_buf_rel_bufq(struct idpf_buf_queue * bufq)381 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
382 {
383 struct libeth_fq fq = {
384 .fqes = bufq->buf,
385 .pp = bufq->pp,
386 };
387
388 /* queue already cleared, nothing to do */
389 if (!bufq->buf)
390 return;
391
392 if (idpf_queue_has(XSK, bufq)) {
393 idpf_xskfq_rel(bufq);
394 return;
395 }
396
397 /* Free all the bufs allocated and given to hw on Rx queue */
398 for (u32 i = 0; i < bufq->desc_count; i++)
399 idpf_rx_page_rel(&bufq->buf[i]);
400
401 if (idpf_queue_has(HSPLIT_EN, bufq))
402 idpf_rx_hdr_buf_rel_all(bufq);
403
404 libeth_rx_fq_destroy(&fq);
405 bufq->buf = NULL;
406 bufq->pp = NULL;
407 }
408
409 /**
410 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
411 * @rxq: queue to be cleaned
412 */
idpf_rx_buf_rel_all(struct idpf_rx_queue * rxq)413 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
414 {
415 struct libeth_fq fq = {
416 .fqes = rxq->rx_buf,
417 .pp = rxq->pp,
418 };
419
420 if (!rxq->rx_buf)
421 return;
422
423 for (u32 i = 0; i < rxq->desc_count; i++)
424 idpf_rx_page_rel(&rxq->rx_buf[i]);
425
426 libeth_rx_fq_destroy(&fq);
427 rxq->rx_buf = NULL;
428 rxq->pp = NULL;
429 }
430
431 /**
432 * idpf_rx_desc_rel - Free a specific Rx q resources
433 * @rxq: queue to clean the resources from
434 * @dev: device to free DMA memory
435 * @model: single or split queue model
436 *
437 * Free a specific rx queue resources
438 */
idpf_rx_desc_rel(struct idpf_rx_queue * rxq,struct device * dev,u32 model)439 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
440 u32 model)
441 {
442 if (!rxq)
443 return;
444
445 if (!idpf_queue_has(XSK, rxq))
446 libeth_xdp_return_stash(&rxq->xdp);
447
448 if (!idpf_is_queue_model_split(model))
449 idpf_rx_buf_rel_all(rxq);
450
451 idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
452
453 rxq->next_to_alloc = 0;
454 rxq->next_to_clean = 0;
455 rxq->next_to_use = 0;
456 if (!rxq->desc_ring)
457 return;
458
459 dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
460 rxq->desc_ring = NULL;
461 }
462
463 /**
464 * idpf_rx_desc_rel_bufq - free buffer queue resources
465 * @bufq: buffer queue to clean the resources from
466 * @dev: device to free DMA memory
467 */
idpf_rx_desc_rel_bufq(struct idpf_buf_queue * bufq,struct device * dev)468 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
469 struct device *dev)
470 {
471 if (!bufq)
472 return;
473
474 idpf_rx_buf_rel_bufq(bufq);
475 idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
476
477 bufq->next_to_alloc = 0;
478 bufq->next_to_clean = 0;
479 bufq->next_to_use = 0;
480
481 if (!bufq->split_buf)
482 return;
483
484 dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
485 bufq->split_buf = NULL;
486 }
487
488 /**
489 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
490 * @vport: virtual port structure
491 * @rsrc: pointer to queue and vector resources
492 *
493 * Free all rx queues resources
494 */
idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc * rsrc)495 static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
496 {
497 struct device *dev = rsrc->dev;
498 struct idpf_rxq_group *rx_qgrp;
499 u16 num_rxq;
500
501 if (!rsrc->rxq_grps)
502 return;
503
504 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
505 rx_qgrp = &rsrc->rxq_grps[i];
506
507 if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
508 for (unsigned int j = 0; j < rx_qgrp->singleq.num_rxq; j++)
509 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
510 VIRTCHNL2_QUEUE_MODEL_SINGLE);
511 continue;
512 }
513
514 num_rxq = rx_qgrp->splitq.num_rxq_sets;
515 for (unsigned int j = 0; j < num_rxq; j++)
516 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
517 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
518
519 if (!rx_qgrp->splitq.bufq_sets)
520 continue;
521
522 for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
523 struct idpf_bufq_set *bufq_set =
524 &rx_qgrp->splitq.bufq_sets[j];
525
526 idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
527 }
528 }
529 }
530
531 /**
532 * idpf_rx_buf_hw_update - Store the new tail and head values
533 * @bufq: queue to bump
534 * @val: new head index
535 */
idpf_rx_buf_hw_update(struct idpf_buf_queue * bufq,u32 val)536 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
537 {
538 bufq->next_to_use = val;
539
540 if (unlikely(!bufq->tail))
541 return;
542
543 /* writel has an implicit memory barrier */
544 writel(val, bufq->tail);
545 }
546
547 /**
548 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
549 * @bufq: ring to use
550 *
551 * Return: 0 on success, negative on failure.
552 */
idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue * bufq)553 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
554 {
555 struct libeth_fq fq = {
556 .count = bufq->desc_count,
557 .type = LIBETH_FQE_HDR,
558 .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
559 .nid = idpf_q_vector_to_mem(bufq->q_vector),
560 };
561 int ret;
562
563 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
564 if (ret)
565 return ret;
566
567 bufq->hdr_pp = fq.pp;
568 bufq->hdr_buf = fq.fqes;
569 bufq->hdr_truesize = fq.truesize;
570 bufq->rx_hbuf_size = fq.buf_len;
571
572 return 0;
573 }
574
575 /**
576 * idpf_post_buf_refill - Post buffer id to refill queue
577 * @refillq: refill queue to post to
578 * @buf_id: buffer id to post
579 */
idpf_post_buf_refill(struct idpf_sw_queue * refillq,u16 buf_id)580 static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
581 {
582 u32 nta = refillq->next_to_use;
583
584 /* store the buffer ID and the SW maintained GEN bit to the refillq */
585 refillq->ring[nta] =
586 FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
587 FIELD_PREP(IDPF_RFL_BI_GEN_M,
588 idpf_queue_has(GEN_CHK, refillq));
589
590 if (unlikely(++nta == refillq->desc_count)) {
591 nta = 0;
592 idpf_queue_change(GEN_CHK, refillq);
593 }
594
595 refillq->next_to_use = nta;
596 }
597
598 /**
599 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
600 * @bufq: buffer queue to post to
601 * @buf_id: buffer id to post
602 *
603 * Return: %false if buffer could not be allocated, %true otherwise.
604 */
idpf_rx_post_buf_desc(struct idpf_buf_queue * bufq,u16 buf_id)605 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
606 {
607 struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
608 struct libeth_fq_fp fq = {
609 .count = bufq->desc_count,
610 };
611 u16 nta = bufq->next_to_alloc;
612 dma_addr_t addr;
613
614 splitq_rx_desc = &bufq->split_buf[nta];
615
616 if (idpf_queue_has(HSPLIT_EN, bufq)) {
617 fq.pp = bufq->hdr_pp;
618 fq.fqes = bufq->hdr_buf;
619 fq.truesize = bufq->hdr_truesize;
620
621 addr = libeth_rx_alloc(&fq, buf_id);
622 if (addr == DMA_MAPPING_ERROR)
623 return false;
624
625 splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
626 }
627
628 fq.pp = bufq->pp;
629 fq.fqes = bufq->buf;
630 fq.truesize = bufq->truesize;
631
632 addr = libeth_rx_alloc(&fq, buf_id);
633 if (addr == DMA_MAPPING_ERROR)
634 return false;
635
636 splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
637 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
638
639 nta++;
640 if (unlikely(nta == bufq->desc_count))
641 nta = 0;
642 bufq->next_to_alloc = nta;
643
644 return true;
645 }
646
647 /**
648 * idpf_rx_post_init_bufs - Post initial buffers to bufq
649 * @bufq: buffer queue to post working set to
650 * @working_set: number of buffers to put in working set
651 *
652 * Return: %true if @working_set bufs were posted successfully, %false otherwise.
653 */
idpf_rx_post_init_bufs(struct idpf_buf_queue * bufq,u16 working_set)654 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
655 u16 working_set)
656 {
657 int i;
658
659 for (i = 0; i < working_set; i++) {
660 if (!idpf_rx_post_buf_desc(bufq, i))
661 return false;
662 }
663
664 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
665 IDPF_RX_BUF_STRIDE));
666
667 return true;
668 }
669
670 /**
671 * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
672 * @rxq: queue for which the buffers are allocated
673 *
674 * Return: 0 on success, -ENOMEM on failure.
675 */
idpf_rx_buf_alloc_singleq(struct idpf_rx_queue * rxq)676 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
677 {
678 if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
679 goto err;
680
681 return 0;
682
683 err:
684 idpf_rx_buf_rel_all(rxq);
685
686 return -ENOMEM;
687 }
688
689 /**
690 * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
691 * @rxq: buffer queue to create page pool for
692 *
693 * Return: 0 on success, -errno on failure.
694 */
idpf_rx_bufs_init_singleq(struct idpf_rx_queue * rxq)695 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
696 {
697 struct libeth_fq fq = {
698 .count = rxq->desc_count,
699 .type = LIBETH_FQE_MTU,
700 .buf_len = IDPF_RX_MAX_BUF_SZ,
701 .nid = idpf_q_vector_to_mem(rxq->q_vector),
702 };
703 int ret;
704
705 ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
706 if (ret)
707 return ret;
708
709 rxq->pp = fq.pp;
710 rxq->rx_buf = fq.fqes;
711 rxq->truesize = fq.truesize;
712 rxq->rx_buf_size = fq.buf_len;
713
714 return idpf_rx_buf_alloc_singleq(rxq);
715 }
716
717 /**
718 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
719 * @rxbufq: queue for which the buffers are allocated
720 *
721 * Return: 0 on success, negative on failure
722 */
idpf_rx_buf_alloc_all(struct idpf_buf_queue * rxbufq)723 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
724 {
725 int err = 0;
726
727 if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
728 err = idpf_rx_hdr_buf_alloc_all(rxbufq);
729 if (err)
730 goto rx_buf_alloc_all_out;
731 }
732
733 /* Allocate buffers to be given to HW. */
734 if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
735 err = -ENOMEM;
736
737 rx_buf_alloc_all_out:
738 if (err)
739 idpf_rx_buf_rel_bufq(rxbufq);
740
741 return err;
742 }
743
744 /**
745 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
746 * @bufq: buffer queue to create page pool for
747 * @type: type of Rx buffers to allocate
748 *
749 * Return: 0 on success, negative on failure
750 */
idpf_rx_bufs_init(struct idpf_buf_queue * bufq,enum libeth_fqe_type type)751 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
752 enum libeth_fqe_type type)
753 {
754 struct libeth_fq fq = {
755 .truesize = bufq->truesize,
756 .count = bufq->desc_count,
757 .type = type,
758 .buf_len = IDPF_RX_MAX_BUF_SZ,
759 .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
760 .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
761 .nid = idpf_q_vector_to_mem(bufq->q_vector),
762 };
763 int ret;
764
765 if (idpf_queue_has(XSK, bufq))
766 return idpf_xskfq_init(bufq);
767
768 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
769 if (ret)
770 return ret;
771
772 bufq->pp = fq.pp;
773 bufq->buf = fq.fqes;
774 bufq->truesize = fq.truesize;
775 bufq->rx_buf_size = fq.buf_len;
776
777 return idpf_rx_buf_alloc_all(bufq);
778 }
779
780 /**
781 * idpf_rx_bufs_init_all - Initialize all RX bufs
782 * @vport: pointer to vport struct
783 * @rsrc: pointer to queue and vector resources
784 *
785 * Return: 0 on success, negative on failure
786 */
idpf_rx_bufs_init_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)787 int idpf_rx_bufs_init_all(struct idpf_vport *vport,
788 struct idpf_q_vec_rsrc *rsrc)
789 {
790 bool split = idpf_is_queue_model_split(rsrc->rxq_model);
791 int err;
792
793 idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
794
795 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
796 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
797 u32 truesize = 0;
798
799 /* Allocate bufs for the rxq itself in singleq */
800 if (!split) {
801 int num_rxq = rx_qgrp->singleq.num_rxq;
802
803 for (unsigned int j = 0; j < num_rxq; j++) {
804 struct idpf_rx_queue *q;
805
806 q = rx_qgrp->singleq.rxqs[j];
807 err = idpf_rx_bufs_init_singleq(q);
808 if (err)
809 return err;
810 }
811
812 continue;
813 }
814
815 /* Otherwise, allocate bufs for the buffer queues */
816 for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
817 enum libeth_fqe_type type;
818 struct idpf_buf_queue *q;
819
820 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
821 q->truesize = truesize;
822
823 type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
824
825 err = idpf_rx_bufs_init(q, type);
826 if (err)
827 return err;
828
829 truesize = q->truesize >> 1;
830 }
831 }
832
833 return 0;
834 }
835
836 /**
837 * idpf_rx_desc_alloc - Allocate queue Rx resources
838 * @vport: vport to allocate resources for
839 * @rxq: Rx queue for which the resources are setup
840 *
841 * Return: 0 on success, negative on failure
842 */
idpf_rx_desc_alloc(const struct idpf_vport * vport,struct idpf_rx_queue * rxq)843 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
844 struct idpf_rx_queue *rxq)
845 {
846 struct device *dev = &vport->adapter->pdev->dev;
847
848 rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
849
850 /* Allocate descriptors and also round up to nearest 4K */
851 rxq->size = ALIGN(rxq->size, 4096);
852 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
853 &rxq->dma, GFP_KERNEL);
854 if (!rxq->desc_ring) {
855 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
856 rxq->size);
857 return -ENOMEM;
858 }
859
860 rxq->next_to_alloc = 0;
861 rxq->next_to_clean = 0;
862 rxq->next_to_use = 0;
863 idpf_queue_set(GEN_CHK, rxq);
864
865 idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
866
867 return 0;
868 }
869
870 /**
871 * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
872 * @vport: vport to allocate resources for
873 * @bufq: buffer queue for which the resources are set up
874 *
875 * Return: 0 on success, -ENOMEM on failure.
876 */
idpf_bufq_desc_alloc(const struct idpf_vport * vport,struct idpf_buf_queue * bufq)877 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
878 struct idpf_buf_queue *bufq)
879 {
880 struct device *dev = &vport->adapter->pdev->dev;
881
882 bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
883
884 bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
885 GFP_KERNEL);
886 if (!bufq->split_buf)
887 return -ENOMEM;
888
889 bufq->next_to_alloc = 0;
890 bufq->next_to_clean = 0;
891 bufq->next_to_use = 0;
892 idpf_queue_set(GEN_CHK, bufq);
893
894 idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
895
896 return 0;
897 }
898
899 /**
900 * idpf_rx_desc_alloc_all - allocate all RX queues resources
901 * @vport: virtual port structure
902 * @rsrc: pointer to queue and vector resources
903 *
904 * Return: 0 on success, negative on failure
905 */
idpf_rx_desc_alloc_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)906 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
907 struct idpf_q_vec_rsrc *rsrc)
908 {
909 struct idpf_rxq_group *rx_qgrp;
910 u16 num_rxq;
911 int err;
912
913 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
914 rx_qgrp = &rsrc->rxq_grps[i];
915 if (idpf_is_queue_model_split(rsrc->rxq_model))
916 num_rxq = rx_qgrp->splitq.num_rxq_sets;
917 else
918 num_rxq = rx_qgrp->singleq.num_rxq;
919
920 for (unsigned int j = 0; j < num_rxq; j++) {
921 struct idpf_rx_queue *q;
922
923 if (idpf_is_queue_model_split(rsrc->rxq_model))
924 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
925 else
926 q = rx_qgrp->singleq.rxqs[j];
927
928 err = idpf_rx_desc_alloc(vport, q);
929 if (err) {
930 pci_err(vport->adapter->pdev,
931 "Memory allocation for Rx queue %u from queue group %u failed\n",
932 j, i);
933 goto err_out;
934 }
935 }
936
937 if (!idpf_is_queue_model_split(rsrc->rxq_model))
938 continue;
939
940 for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
941 struct idpf_buf_queue *q;
942
943 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
944
945 err = idpf_bufq_desc_alloc(vport, q);
946 if (err) {
947 pci_err(vport->adapter->pdev,
948 "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
949 j, i);
950 goto err_out;
951 }
952 }
953 }
954
955 return 0;
956
957 err_out:
958 idpf_rx_desc_rel_all(rsrc);
959
960 return err;
961 }
962
idpf_init_queue_set(const struct idpf_vport * vport,const struct idpf_queue_set * qs)963 static int idpf_init_queue_set(const struct idpf_vport *vport,
964 const struct idpf_queue_set *qs)
965 {
966 bool splitq;
967 int err;
968
969 splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
970
971 for (u32 i = 0; i < qs->num; i++) {
972 const struct idpf_queue_ptr *q = &qs->qs[i];
973 struct idpf_buf_queue *bufq;
974
975 switch (q->type) {
976 case VIRTCHNL2_QUEUE_TYPE_RX:
977 err = idpf_rx_desc_alloc(vport, q->rxq);
978 if (err)
979 break;
980
981 err = idpf_xdp_rxq_info_init(q->rxq);
982 if (err)
983 break;
984
985 if (!splitq)
986 err = idpf_rx_bufs_init_singleq(q->rxq);
987
988 break;
989 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
990 bufq = q->bufq;
991
992 err = idpf_bufq_desc_alloc(vport, bufq);
993 if (err)
994 break;
995
996 for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
997 struct idpf_buf_queue * const *bufqs;
998 enum libeth_fqe_type type;
999 u32 ts;
1000
1001 bufqs = bufq->q_vector->bufq;
1002 if (bufqs[j] != bufq)
1003 continue;
1004
1005 if (j) {
1006 type = LIBETH_FQE_SHORT;
1007 ts = bufqs[j - 1]->truesize >> 1;
1008 } else {
1009 type = LIBETH_FQE_MTU;
1010 ts = 0;
1011 }
1012
1013 bufq->truesize = ts;
1014
1015 err = idpf_rx_bufs_init(bufq, type);
1016 break;
1017 }
1018
1019 break;
1020 case VIRTCHNL2_QUEUE_TYPE_TX:
1021 err = idpf_tx_desc_alloc(vport, q->txq);
1022 break;
1023 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1024 err = idpf_compl_desc_alloc(vport, q->complq);
1025 break;
1026 default:
1027 continue;
1028 }
1029
1030 if (err)
1031 return err;
1032 }
1033
1034 return 0;
1035 }
1036
idpf_clean_queue_set(const struct idpf_queue_set * qs)1037 static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
1038 {
1039 const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
1040
1041 for (u32 i = 0; i < qs->num; i++) {
1042 const struct idpf_queue_ptr *q = &qs->qs[i];
1043
1044 switch (q->type) {
1045 case VIRTCHNL2_QUEUE_TYPE_RX:
1046 idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
1047 idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
1048 break;
1049 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1050 idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
1051 break;
1052 case VIRTCHNL2_QUEUE_TYPE_TX:
1053 idpf_tx_desc_rel(q->txq);
1054
1055 if (idpf_queue_has(XDP, q->txq)) {
1056 q->txq->pending = 0;
1057 q->txq->xdp_tx = 0;
1058 } else {
1059 q->txq->txq_grp->num_completions_pending = 0;
1060 }
1061
1062 writel(q->txq->next_to_use, q->txq->tail);
1063 break;
1064 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
1065 idpf_compl_desc_rel(q->complq);
1066 q->complq->num_completions = 0;
1067 break;
1068 default:
1069 break;
1070 }
1071 }
1072 }
1073
idpf_qvec_ena_irq(struct idpf_q_vector * qv)1074 static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
1075 {
1076 if (qv->num_txq) {
1077 u32 itr;
1078
1079 if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
1080 itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
1081 else
1082 itr = qv->tx_itr_value;
1083
1084 idpf_vport_intr_write_itr(qv, itr, true);
1085 }
1086
1087 if (qv->num_rxq) {
1088 u32 itr;
1089
1090 if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
1091 itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
1092 else
1093 itr = qv->rx_itr_value;
1094
1095 idpf_vport_intr_write_itr(qv, itr, false);
1096 }
1097
1098 if (qv->num_txq || qv->num_rxq)
1099 idpf_vport_intr_update_itr_ena_irq(qv);
1100 }
1101
1102 /**
1103 * idpf_vector_to_queue_set - create a queue set associated with the given
1104 * queue vector
1105 * @qv: queue vector corresponding to the queue pair
1106 *
1107 * Returns a pointer to a dynamically allocated array of pointers to all
1108 * queues associated with a given queue vector (@qv).
1109 * Please note that the caller is responsible to free the memory allocated
1110 * by this function using kfree().
1111 *
1112 * Return: &idpf_queue_set on success, %NULL in case of error.
1113 */
1114 static struct idpf_queue_set *
idpf_vector_to_queue_set(struct idpf_q_vector * qv)1115 idpf_vector_to_queue_set(struct idpf_q_vector *qv)
1116 {
1117 u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
1118 bool xdp = xdp_txq_offset && !qv->num_xsksq;
1119 struct idpf_vport *vport = qv->vport;
1120 struct idpf_queue_set *qs;
1121 u32 num;
1122
1123 num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
1124 num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
1125 if (!num)
1126 return NULL;
1127
1128 qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
1129 vport->vport_id, num);
1130 if (!qs)
1131 return NULL;
1132
1133 num = 0;
1134
1135 for (u32 i = 0; i < qv->num_bufq; i++) {
1136 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1137 qs->qs[num++].bufq = qv->bufq[i];
1138 }
1139
1140 for (u32 i = 0; i < qv->num_rxq; i++) {
1141 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
1142 qs->qs[num++].rxq = qv->rx[i];
1143 }
1144
1145 for (u32 i = 0; i < qv->num_txq; i++) {
1146 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1147 qs->qs[num++].txq = qv->tx[i];
1148 }
1149
1150 for (u32 i = 0; i < qv->num_complq; i++) {
1151 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1152 qs->qs[num++].complq = qv->complq[i];
1153 }
1154
1155 if (!xdp_txq_offset)
1156 goto finalize;
1157
1158 if (xdp) {
1159 for (u32 i = 0; i < qv->num_rxq; i++) {
1160 u32 idx = xdp_txq_offset + qv->rx[i]->idx;
1161
1162 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1163 qs->qs[num++].txq = vport->txqs[idx];
1164
1165 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1166 qs->qs[num++].complq = vport->txqs[idx]->complq;
1167 }
1168 } else {
1169 for (u32 i = 0; i < qv->num_xsksq; i++) {
1170 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
1171 qs->qs[num++].txq = qv->xsksq[i];
1172
1173 qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1174 qs->qs[num++].complq = qv->xsksq[i]->complq;
1175 }
1176 }
1177
1178 finalize:
1179 if (num != qs->num) {
1180 kfree(qs);
1181 return NULL;
1182 }
1183
1184 return qs;
1185 }
1186
idpf_qp_enable(const struct idpf_vport * vport,const struct idpf_queue_set * qs,u32 qid)1187 static int idpf_qp_enable(const struct idpf_vport *vport,
1188 const struct idpf_queue_set *qs, u32 qid)
1189 {
1190 const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1191 struct idpf_q_vector *q_vector;
1192 int err;
1193
1194 q_vector = idpf_find_rxq_vec(vport, qid);
1195
1196 err = idpf_init_queue_set(vport, qs);
1197 if (err) {
1198 netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
1199 qid, ERR_PTR(err));
1200 return err;
1201 }
1202
1203 if (!rsrc->xdp_txq_offset)
1204 goto config;
1205
1206 q_vector->xsksq = kzalloc_objs(*q_vector->xsksq,
1207 DIV_ROUND_UP(rsrc->num_rxq_grp, rsrc->num_q_vectors));
1208 if (!q_vector->xsksq)
1209 return -ENOMEM;
1210
1211 for (u32 i = 0; i < qs->num; i++) {
1212 const struct idpf_queue_ptr *q = &qs->qs[i];
1213
1214 if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
1215 continue;
1216
1217 if (!idpf_queue_has(XSK, q->txq))
1218 continue;
1219
1220 idpf_xsk_init_wakeup(q_vector);
1221
1222 q->txq->q_vector = q_vector;
1223 q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
1224 }
1225
1226 config:
1227 err = idpf_send_config_queue_set_msg(qs);
1228 if (err) {
1229 netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
1230 qid, ERR_PTR(err));
1231 return err;
1232 }
1233
1234 err = idpf_send_enable_queue_set_msg(qs);
1235 if (err) {
1236 netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
1237 qid, ERR_PTR(err));
1238 return err;
1239 }
1240
1241 napi_enable(&q_vector->napi);
1242 idpf_qvec_ena_irq(q_vector);
1243
1244 netif_start_subqueue(vport->netdev, qid);
1245
1246 return 0;
1247 }
1248
idpf_qp_disable(const struct idpf_vport * vport,const struct idpf_queue_set * qs,u32 qid)1249 static int idpf_qp_disable(const struct idpf_vport *vport,
1250 const struct idpf_queue_set *qs, u32 qid)
1251 {
1252 struct idpf_q_vector *q_vector;
1253 int err;
1254
1255 q_vector = idpf_find_rxq_vec(vport, qid);
1256 netif_stop_subqueue(vport->netdev, qid);
1257
1258 writel(0, q_vector->intr_reg.dyn_ctl);
1259 napi_disable(&q_vector->napi);
1260
1261 err = idpf_send_disable_queue_set_msg(qs);
1262 if (err) {
1263 netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
1264 qid, ERR_PTR(err));
1265 return err;
1266 }
1267
1268 idpf_clean_queue_set(qs);
1269
1270 kfree(q_vector->xsksq);
1271 q_vector->num_xsksq = 0;
1272
1273 return 0;
1274 }
1275
1276 /**
1277 * idpf_qp_switch - enable or disable queues associated with queue pair
1278 * @vport: vport to switch the pair for
1279 * @qid: index of the queue pair to switch
1280 * @en: whether to enable or disable the pair
1281 *
1282 * Return: 0 on success, -errno on failure.
1283 */
idpf_qp_switch(struct idpf_vport * vport,u32 qid,bool en)1284 int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
1285 {
1286 struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
1287 struct idpf_queue_set *qs __free(kfree) = NULL;
1288
1289 if (idpf_find_txq_vec(vport, qid) != q_vector)
1290 return -EINVAL;
1291
1292 qs = idpf_vector_to_queue_set(q_vector);
1293 if (!qs)
1294 return -ENOMEM;
1295
1296 return en ? idpf_qp_enable(vport, qs, qid) :
1297 idpf_qp_disable(vport, qs, qid);
1298 }
1299
1300 /**
1301 * idpf_txq_group_rel - Release all resources for txq groups
1302 * @rsrc: pointer to queue and vector resources
1303 */
idpf_txq_group_rel(struct idpf_q_vec_rsrc * rsrc)1304 static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
1305 {
1306 bool split;
1307
1308 if (!rsrc->txq_grps)
1309 return;
1310
1311 split = idpf_is_queue_model_split(rsrc->txq_model);
1312
1313 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1314 struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
1315
1316 for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
1317 if (!txq_grp->txqs[j])
1318 continue;
1319
1320 if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
1321 kfree(txq_grp->txqs[j]->refillq);
1322 txq_grp->txqs[j]->refillq = NULL;
1323 }
1324
1325 kfree(txq_grp->txqs[j]);
1326 txq_grp->txqs[j] = NULL;
1327 }
1328
1329 if (!split)
1330 continue;
1331
1332 kfree(txq_grp->complq);
1333 txq_grp->complq = NULL;
1334 }
1335 kfree(rsrc->txq_grps);
1336 rsrc->txq_grps = NULL;
1337 }
1338
1339 /**
1340 * idpf_rxq_sw_queue_rel - Release software queue resources
1341 * @rx_qgrp: rx queue group with software queues
1342 */
idpf_rxq_sw_queue_rel(struct idpf_rxq_group * rx_qgrp)1343 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1344 {
1345 if (!rx_qgrp->splitq.bufq_sets)
1346 return;
1347
1348 for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
1349 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1350
1351 for (unsigned int j = 0; j < bufq_set->num_refillqs; j++) {
1352 kfree(bufq_set->refillqs[j].ring);
1353 bufq_set->refillqs[j].ring = NULL;
1354 }
1355 kfree(bufq_set->refillqs);
1356 bufq_set->refillqs = NULL;
1357 }
1358 }
1359
1360 /**
1361 * idpf_rxq_group_rel - Release all resources for rxq groups
1362 * @rsrc: pointer to queue and vector resources
1363 */
idpf_rxq_group_rel(struct idpf_q_vec_rsrc * rsrc)1364 static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
1365 {
1366 if (!rsrc->rxq_grps)
1367 return;
1368
1369 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
1370 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1371 u16 num_rxq;
1372
1373 if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1374 num_rxq = rx_qgrp->splitq.num_rxq_sets;
1375 for (unsigned int j = 0; j < num_rxq; j++) {
1376 kfree(rx_qgrp->splitq.rxq_sets[j]);
1377 rx_qgrp->splitq.rxq_sets[j] = NULL;
1378 }
1379
1380 idpf_rxq_sw_queue_rel(rx_qgrp);
1381 kfree(rx_qgrp->splitq.bufq_sets);
1382 rx_qgrp->splitq.bufq_sets = NULL;
1383 } else {
1384 num_rxq = rx_qgrp->singleq.num_rxq;
1385 for (unsigned int j = 0; j < num_rxq; j++) {
1386 kfree(rx_qgrp->singleq.rxqs[j]);
1387 rx_qgrp->singleq.rxqs[j] = NULL;
1388 }
1389 }
1390 }
1391 kfree(rsrc->rxq_grps);
1392 rsrc->rxq_grps = NULL;
1393 }
1394
1395 /**
1396 * idpf_vport_queue_grp_rel_all - Release all queue groups
1397 * @vport: vport to release queue groups for
1398 * @rsrc: pointer to queue and vector resources
1399 */
idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc * rsrc)1400 static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
1401 {
1402 idpf_txq_group_rel(rsrc);
1403 idpf_rxq_group_rel(rsrc);
1404 }
1405
1406 /**
1407 * idpf_vport_queues_rel - Free memory for all queues
1408 * @vport: virtual port
1409 * @rsrc: pointer to queue and vector resources
1410 *
1411 * Free the memory allocated for queues associated to a vport
1412 */
idpf_vport_queues_rel(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)1413 void idpf_vport_queues_rel(struct idpf_vport *vport,
1414 struct idpf_q_vec_rsrc *rsrc)
1415 {
1416 idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
1417
1418 idpf_tx_desc_rel_all(rsrc);
1419 idpf_rx_desc_rel_all(rsrc);
1420
1421 idpf_xdpsqs_put(vport);
1422 idpf_vport_queue_grp_rel_all(rsrc);
1423
1424 kfree(vport->txqs);
1425 vport->txqs = NULL;
1426 }
1427
1428 /**
1429 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1430 * @vport: vport to init txqs on
1431 * @rsrc: pointer to queue and vector resources
1432 *
1433 * We get a queue index from skb->queue_mapping and we need a fast way to
1434 * dereference the queue from queue groups. This allows us to quickly pull a
1435 * txq based on a queue index.
1436 *
1437 * Return: 0 on success, negative on failure
1438 */
idpf_vport_init_fast_path_txqs(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)1439 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
1440 struct idpf_q_vec_rsrc *rsrc)
1441 {
1442 struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
1443 struct work_struct *tstamp_task = &vport->tstamp_task;
1444 int k = 0;
1445
1446 vport->txqs = kzalloc_objs(*vport->txqs, rsrc->num_txq);
1447 if (!vport->txqs)
1448 return -ENOMEM;
1449
1450 vport->num_txq = rsrc->num_txq;
1451 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1452 struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
1453
1454 for (unsigned int j = 0; j < tx_grp->num_txq; j++, k++) {
1455 vport->txqs[k] = tx_grp->txqs[j];
1456 vport->txqs[k]->idx = k;
1457
1458 if (!caps)
1459 continue;
1460
1461 vport->txqs[k]->cached_tstamp_caps = caps;
1462 vport->txqs[k]->tstamp_task = tstamp_task;
1463 }
1464 }
1465
1466 return 0;
1467 }
1468
1469 /**
1470 * idpf_vport_init_num_qs - Initialize number of queues
1471 * @vport: vport to initialize queues
1472 * @vport_msg: data to be filled into vport
1473 * @rsrc: pointer to queue and vector resources
1474 */
idpf_vport_init_num_qs(struct idpf_vport * vport,struct virtchnl2_create_vport * vport_msg,struct idpf_q_vec_rsrc * rsrc)1475 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1476 struct virtchnl2_create_vport *vport_msg,
1477 struct idpf_q_vec_rsrc *rsrc)
1478 {
1479 struct idpf_vport_user_config_data *config_data;
1480 u16 idx = vport->idx;
1481
1482 config_data = &vport->adapter->vport_config[idx]->user_config;
1483 rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1484 rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1485 /* number of txqs and rxqs in config data will be zeros only in the
1486 * driver load path and we dont update them there after
1487 */
1488 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1489 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1490 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1491 }
1492
1493 if (idpf_is_queue_model_split(rsrc->txq_model))
1494 rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1495 if (idpf_is_queue_model_split(rsrc->rxq_model))
1496 rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1497
1498 vport->xdp_prog = config_data->xdp_prog;
1499 if (idpf_xdp_enabled(vport)) {
1500 rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
1501 vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
1502 rsrc->xdp_txq_offset;
1503 vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
1504 } else {
1505 rsrc->xdp_txq_offset = 0;
1506 vport->num_xdp_txq = 0;
1507 vport->xdpsq_share = false;
1508 }
1509
1510 /* Adjust number of buffer queues per Rx queue group. */
1511 if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1512 rsrc->num_bufqs_per_qgrp = 0;
1513
1514 return;
1515 }
1516
1517 rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1518 }
1519
1520 /**
1521 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1522 * @vport: vport to calculate q groups for
1523 * @rsrc: pointer to queue and vector resources
1524 */
idpf_vport_calc_num_q_desc(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)1525 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
1526 struct idpf_q_vec_rsrc *rsrc)
1527 {
1528 struct idpf_vport_user_config_data *config_data;
1529 u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
1530 u32 num_req_txq_desc, num_req_rxq_desc;
1531 u16 idx = vport->idx;
1532
1533 config_data = &vport->adapter->vport_config[idx]->user_config;
1534 num_req_txq_desc = config_data->num_req_txq_desc;
1535 num_req_rxq_desc = config_data->num_req_rxq_desc;
1536
1537 rsrc->complq_desc_count = 0;
1538 if (num_req_txq_desc) {
1539 rsrc->txq_desc_count = num_req_txq_desc;
1540 if (idpf_is_queue_model_split(rsrc->txq_model)) {
1541 rsrc->complq_desc_count = num_req_txq_desc;
1542 if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1543 rsrc->complq_desc_count =
1544 IDPF_MIN_TXQ_COMPLQ_DESC;
1545 }
1546 } else {
1547 rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
1548 if (idpf_is_queue_model_split(rsrc->txq_model))
1549 rsrc->complq_desc_count =
1550 IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1551 }
1552
1553 if (num_req_rxq_desc)
1554 rsrc->rxq_desc_count = num_req_rxq_desc;
1555 else
1556 rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1557
1558 for (unsigned int i = 0; i < num_bufqs; i++) {
1559 if (!rsrc->bufq_desc_count[i])
1560 rsrc->bufq_desc_count[i] =
1561 IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
1562 num_bufqs);
1563 }
1564 }
1565
1566 /**
1567 * idpf_vport_calc_total_qs - Calculate total number of queues
1568 * @adapter: private data struct
1569 * @vport_idx: vport idx to retrieve vport pointer
1570 * @vport_msg: message to fill with data
1571 * @max_q: vport max queue info
1572 *
1573 * Return: 0 on success, error value on failure.
1574 */
idpf_vport_calc_total_qs(struct idpf_adapter * adapter,u16 vport_idx,struct virtchnl2_create_vport * vport_msg,struct idpf_vport_max_q * max_q)1575 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1576 struct virtchnl2_create_vport *vport_msg,
1577 struct idpf_vport_max_q *max_q)
1578 {
1579 int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1580 int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1581 u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1582 struct idpf_vport_user_config_data *user;
1583 struct idpf_vport_config *vport_config;
1584 u16 num_txq_grps, num_rxq_grps;
1585 u32 num_qs, num_xdpsq;
1586
1587 vport_config = adapter->vport_config[vport_idx];
1588 if (vport_config) {
1589 num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1590 num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1591 } else {
1592 u32 num_cpus = netif_get_num_default_rss_queues();
1593
1594 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1595 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1596 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1597 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1598 }
1599
1600 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1601 num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1602 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1603 IDPF_COMPLQ_PER_GROUP);
1604 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1605 IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1606 } else {
1607 num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1608 num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1609 dflt_singleq_txqs);
1610 vport_msg->num_tx_q = cpu_to_le16(num_qs);
1611 vport_msg->num_tx_complq = 0;
1612 }
1613 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1614 num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1615 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1616 IDPF_MAX_BUFQS_PER_RXQ_GRP);
1617 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1618 IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1619 } else {
1620 num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1621 num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1622 dflt_singleq_rxqs);
1623 vport_msg->num_rx_q = cpu_to_le16(num_qs);
1624 vport_msg->num_rx_bufq = 0;
1625 }
1626
1627 if (!vport_config)
1628 return 0;
1629
1630 user = &vport_config->user_config;
1631 user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1632 user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1633
1634 if (vport_config->user_config.xdp_prog)
1635 num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
1636 user->num_req_tx_qs,
1637 vport_config->max_q.max_txq);
1638 else
1639 num_xdpsq = 0;
1640
1641 vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
1642 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
1643 vport_msg->num_tx_complq = vport_msg->num_tx_q;
1644
1645 return 0;
1646 }
1647
1648 /**
1649 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1650 * @rsrc: pointer to queue and vector resources
1651 */
idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc * rsrc)1652 void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
1653 {
1654 if (idpf_is_queue_model_split(rsrc->txq_model))
1655 rsrc->num_txq_grp = rsrc->num_txq;
1656 else
1657 rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1658
1659 if (idpf_is_queue_model_split(rsrc->rxq_model))
1660 rsrc->num_rxq_grp = rsrc->num_rxq;
1661 else
1662 rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1663 }
1664
1665 /**
1666 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1667 * @rsrc: pointer to queue and vector resources
1668 * @num_txq: return parameter for number of TX queues
1669 * @num_rxq: return parameter for number of RX queues
1670 */
idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc * rsrc,u16 * num_txq,u16 * num_rxq)1671 static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
1672 u16 *num_txq, u16 *num_rxq)
1673 {
1674 if (idpf_is_queue_model_split(rsrc->txq_model))
1675 *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1676 else
1677 *num_txq = rsrc->num_txq;
1678
1679 if (idpf_is_queue_model_split(rsrc->rxq_model))
1680 *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1681 else
1682 *num_rxq = rsrc->num_rxq;
1683 }
1684
1685 /**
1686 * idpf_rxq_set_descids - set the descids supported by this queue
1687 * @rsrc: pointer to queue and vector resources
1688 * @q: rx queue for which descids are set
1689 *
1690 */
idpf_rxq_set_descids(struct idpf_q_vec_rsrc * rsrc,struct idpf_rx_queue * q)1691 static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
1692 struct idpf_rx_queue *q)
1693 {
1694 if (idpf_is_queue_model_split(rsrc->rxq_model))
1695 return;
1696
1697 if (rsrc->base_rxd)
1698 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1699 else
1700 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1701 }
1702
1703 /**
1704 * idpf_txq_group_alloc - Allocate all txq group resources
1705 * @vport: vport to allocate txq groups for
1706 * @rsrc: pointer to queue and vector resources
1707 * @num_txq: number of txqs to allocate for each group
1708 *
1709 * Return: 0 on success, negative on failure
1710 */
idpf_txq_group_alloc(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,u16 num_txq)1711 static int idpf_txq_group_alloc(struct idpf_vport *vport,
1712 struct idpf_q_vec_rsrc *rsrc,
1713 u16 num_txq)
1714 {
1715 bool split, flow_sch_en;
1716
1717 rsrc->txq_grps = kzalloc_objs(*rsrc->txq_grps, rsrc->num_txq_grp);
1718 if (!rsrc->txq_grps)
1719 return -ENOMEM;
1720
1721 split = idpf_is_queue_model_split(rsrc->txq_model);
1722 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1723 VIRTCHNL2_CAP_SPLITQ_QSCHED);
1724
1725 for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
1726 struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1727 struct idpf_adapter *adapter = vport->adapter;
1728
1729 tx_qgrp->vport = vport;
1730 tx_qgrp->num_txq = num_txq;
1731
1732 for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
1733 tx_qgrp->txqs[j] = kzalloc_obj(*tx_qgrp->txqs[j]);
1734 if (!tx_qgrp->txqs[j])
1735 goto err_alloc;
1736 }
1737
1738 for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
1739 struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1740
1741 q->dev = &adapter->pdev->dev;
1742 q->desc_count = rsrc->txq_desc_count;
1743 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1744 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1745 q->netdev = vport->netdev;
1746 q->txq_grp = tx_qgrp;
1747 q->rel_q_id = j;
1748
1749 if (!split) {
1750 q->clean_budget = vport->compln_clean_budget;
1751 idpf_queue_assign(CRC_EN, q,
1752 vport->crc_enable);
1753 }
1754
1755 if (!flow_sch_en)
1756 continue;
1757
1758 idpf_queue_set(FLOW_SCH_EN, q);
1759
1760 q->refillq = kzalloc_obj(*q->refillq);
1761 if (!q->refillq)
1762 goto err_alloc;
1763
1764 idpf_queue_set(GEN_CHK, q->refillq);
1765 idpf_queue_set(RFL_GEN_CHK, q->refillq);
1766 }
1767
1768 if (!split)
1769 continue;
1770
1771 tx_qgrp->complq = kzalloc_objs(*tx_qgrp->complq,
1772 IDPF_COMPLQ_PER_GROUP);
1773 if (!tx_qgrp->complq)
1774 goto err_alloc;
1775
1776 tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
1777 tx_qgrp->complq->txq_grp = tx_qgrp;
1778 tx_qgrp->complq->netdev = vport->netdev;
1779 tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1780
1781 if (flow_sch_en)
1782 idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1783 }
1784
1785 return 0;
1786
1787 err_alloc:
1788 idpf_txq_group_rel(rsrc);
1789
1790 return -ENOMEM;
1791 }
1792
1793 /**
1794 * idpf_rxq_group_alloc - Allocate all rxq group resources
1795 * @vport: vport to allocate rxq groups for
1796 * @rsrc: pointer to queue and vector resources
1797 * @num_rxq: number of rxqs to allocate for each group
1798 *
1799 * Return: 0 on success, negative on failure
1800 */
idpf_rxq_group_alloc(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc,u16 num_rxq)1801 static int idpf_rxq_group_alloc(struct idpf_vport *vport,
1802 struct idpf_q_vec_rsrc *rsrc,
1803 u16 num_rxq)
1804 {
1805 struct idpf_adapter *adapter = vport->adapter;
1806 bool hs, rsc;
1807 int err = 0;
1808
1809 rsrc->rxq_grps = kzalloc_objs(struct idpf_rxq_group, rsrc->num_rxq_grp);
1810 if (!rsrc->rxq_grps)
1811 return -ENOMEM;
1812
1813 hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1814 rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
1815
1816 for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
1817 struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1818
1819 rx_qgrp->vport = vport;
1820 if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1821 rx_qgrp->singleq.num_rxq = num_rxq;
1822 for (unsigned int j = 0; j < num_rxq; j++) {
1823 rx_qgrp->singleq.rxqs[j] = kzalloc_obj(*rx_qgrp->singleq.rxqs[j]);
1824 if (!rx_qgrp->singleq.rxqs[j]) {
1825 err = -ENOMEM;
1826 goto err_alloc;
1827 }
1828 }
1829 goto skip_splitq_rx_init;
1830 }
1831 rx_qgrp->splitq.num_rxq_sets = num_rxq;
1832
1833 for (unsigned int j = 0; j < num_rxq; j++) {
1834 rx_qgrp->splitq.rxq_sets[j] =
1835 kzalloc_obj(struct idpf_rxq_set);
1836 if (!rx_qgrp->splitq.rxq_sets[j]) {
1837 err = -ENOMEM;
1838 goto err_alloc;
1839 }
1840 }
1841
1842 rx_qgrp->splitq.bufq_sets = kzalloc_objs(struct idpf_bufq_set,
1843 rsrc->num_bufqs_per_qgrp);
1844 if (!rx_qgrp->splitq.bufq_sets) {
1845 err = -ENOMEM;
1846 goto err_alloc;
1847 }
1848 rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
1849
1850 for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
1851 struct idpf_bufq_set *bufq_set =
1852 &rx_qgrp->splitq.bufq_sets[j];
1853 int swq_size = sizeof(struct idpf_sw_queue);
1854 struct idpf_buf_queue *q;
1855
1856 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1857 q->desc_count = rsrc->bufq_desc_count[j];
1858 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1859
1860 idpf_queue_assign(HSPLIT_EN, q, hs);
1861 idpf_queue_assign(RSC_EN, q, rsc);
1862
1863 bufq_set->num_refillqs = num_rxq;
1864 bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1865 GFP_KERNEL);
1866 if (!bufq_set->refillqs) {
1867 err = -ENOMEM;
1868 goto err_alloc;
1869 }
1870 for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) {
1871 struct idpf_sw_queue *refillq =
1872 &bufq_set->refillqs[k];
1873
1874 refillq->desc_count =
1875 rsrc->bufq_desc_count[j];
1876 idpf_queue_set(GEN_CHK, refillq);
1877 idpf_queue_set(RFL_GEN_CHK, refillq);
1878 refillq->ring = kzalloc_objs(*refillq->ring,
1879 refillq->desc_count);
1880 if (!refillq->ring) {
1881 err = -ENOMEM;
1882 goto err_alloc;
1883 }
1884 }
1885 }
1886
1887 skip_splitq_rx_init:
1888 for (unsigned int j = 0; j < num_rxq; j++) {
1889 struct idpf_rx_queue *q;
1890
1891 if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1892 q = rx_qgrp->singleq.rxqs[j];
1893 q->rx_ptype_lkup = adapter->singleq_pt_lkup;
1894 goto setup_rxq;
1895 }
1896 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1897 rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1898 &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1899 if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1900 rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1901 &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1902
1903 idpf_queue_assign(HSPLIT_EN, q, hs);
1904 idpf_queue_assign(RSC_EN, q, rsc);
1905 q->rx_ptype_lkup = adapter->splitq_pt_lkup;
1906
1907 setup_rxq:
1908 q->desc_count = rsrc->rxq_desc_count;
1909 q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1910 q->idx = (i * num_rxq) + j;
1911 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1912 q->rx_max_pkt_size = vport->netdev->mtu +
1913 LIBETH_RX_LL_LEN;
1914 idpf_rxq_set_descids(rsrc, q);
1915 }
1916 }
1917
1918 err_alloc:
1919 if (err)
1920 idpf_rxq_group_rel(rsrc);
1921
1922 return err;
1923 }
1924
1925 /**
1926 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1927 * @vport: vport with qgrps to allocate
1928 * @rsrc: pointer to queue and vector resources
1929 *
1930 * Return: 0 on success, negative on failure
1931 */
idpf_vport_queue_grp_alloc_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)1932 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
1933 struct idpf_q_vec_rsrc *rsrc)
1934 {
1935 u16 num_txq, num_rxq;
1936 int err;
1937
1938 idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
1939
1940 err = idpf_txq_group_alloc(vport, rsrc, num_txq);
1941 if (err)
1942 goto err_out;
1943
1944 err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
1945 if (err)
1946 goto err_out;
1947
1948 return 0;
1949
1950 err_out:
1951 idpf_vport_queue_grp_rel_all(rsrc);
1952
1953 return err;
1954 }
1955
1956 /**
1957 * idpf_vport_queues_alloc - Allocate memory for all queues
1958 * @vport: virtual port
1959 * @rsrc: pointer to queue and vector resources
1960 *
1961 * Allocate memory for queues associated with a vport.
1962 *
1963 * Return: 0 on success, negative on failure.
1964 */
idpf_vport_queues_alloc(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)1965 int idpf_vport_queues_alloc(struct idpf_vport *vport,
1966 struct idpf_q_vec_rsrc *rsrc)
1967 {
1968 int err;
1969
1970 err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
1971 if (err)
1972 goto err_out;
1973
1974 err = idpf_vport_init_fast_path_txqs(vport, rsrc);
1975 if (err)
1976 goto err_out;
1977
1978 err = idpf_xdpsqs_get(vport);
1979 if (err)
1980 goto err_out;
1981
1982 err = idpf_tx_desc_alloc_all(vport, rsrc);
1983 if (err)
1984 goto err_out;
1985
1986 err = idpf_rx_desc_alloc_all(vport, rsrc);
1987 if (err)
1988 goto err_out;
1989
1990 return 0;
1991
1992 err_out:
1993 idpf_vport_queues_rel(vport, rsrc);
1994
1995 return err;
1996 }
1997
1998 /**
1999 * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
2000 * @txq: queue to read the timestamp from
2001 * @skb: socket buffer to provide Tx timestamp value
2002 *
2003 * Schedule a work to read Tx timestamp value generated once the packet is
2004 * transmitted.
2005 */
idpf_tx_read_tstamp(struct idpf_tx_queue * txq,struct sk_buff * skb)2006 static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
2007 {
2008 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
2009 struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
2010
2011 tx_tstamp_caps = txq->cached_tstamp_caps;
2012 spin_lock_bh(&tx_tstamp_caps->status_lock);
2013
2014 for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
2015 tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
2016 if (tx_tstamp_status->state != IDPF_PTP_FREE)
2017 continue;
2018
2019 tx_tstamp_status->skb = skb;
2020 tx_tstamp_status->state = IDPF_PTP_REQUEST;
2021
2022 /* Fetch timestamp from completion descriptor through
2023 * virtchnl msg to report to stack.
2024 */
2025 queue_work(system_unbound_wq, txq->tstamp_task);
2026 break;
2027 }
2028
2029 spin_unlock_bh(&tx_tstamp_caps->status_lock);
2030 }
2031
2032 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
2033 do { \
2034 if (unlikely(++(ntc) == (txq)->desc_count)) { \
2035 ntc = 0; \
2036 buf = (txq)->tx_buf; \
2037 desc = &(txq)->flex_tx[0]; \
2038 } else { \
2039 (buf)++; \
2040 (desc)++; \
2041 } \
2042 } while (0)
2043
2044 /**
2045 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
2046 * @tx_q: Tx queue to clean
2047 * @end: queue index until which it should be cleaned
2048 * @napi_budget: Used to determine if we are in netpoll
2049 * @cleaned: pointer to stats struct to track cleaned packets/bytes
2050 * @descs_only: true if queue is using flow-based scheduling and should
2051 * not clean buffers at this time
2052 *
2053 * Cleans the queue descriptor ring. If the queue is using queue-based
2054 * scheduling, the buffers will be cleaned as well. If the queue is using
2055 * flow-based scheduling, only the descriptors are cleaned at this time.
2056 * Separate packet completion events will be reported on the completion queue,
2057 * and the buffers will be cleaned separately. The stats are not updated from
2058 * this function when using flow-based scheduling.
2059 */
idpf_tx_splitq_clean(struct idpf_tx_queue * tx_q,u16 end,int napi_budget,struct libeth_sq_napi_stats * cleaned,bool descs_only)2060 static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
2061 int napi_budget,
2062 struct libeth_sq_napi_stats *cleaned,
2063 bool descs_only)
2064 {
2065 union idpf_tx_flex_desc *next_pending_desc = NULL;
2066 union idpf_tx_flex_desc *tx_desc;
2067 u32 ntc = tx_q->next_to_clean;
2068 struct libeth_cq_pp cp = {
2069 .dev = tx_q->dev,
2070 .ss = cleaned,
2071 .napi = napi_budget,
2072 };
2073 struct idpf_tx_buf *tx_buf;
2074
2075 if (descs_only) {
2076 /* Bump ring index to mark as cleaned. */
2077 tx_q->next_to_clean = end;
2078 return;
2079 }
2080
2081 tx_desc = &tx_q->flex_tx[ntc];
2082 next_pending_desc = &tx_q->flex_tx[end];
2083 tx_buf = &tx_q->tx_buf[ntc];
2084
2085 while (tx_desc != next_pending_desc) {
2086 u32 eop_idx;
2087
2088 /* If this entry in the ring was used as a context descriptor,
2089 * it's corresponding entry in the buffer ring is reserved. We
2090 * can skip this descriptor since there is no buffer to clean.
2091 */
2092 if (tx_buf->type <= LIBETH_SQE_CTX)
2093 goto fetch_next_txq_desc;
2094
2095 if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
2096 break;
2097
2098 eop_idx = tx_buf->rs_idx;
2099 libeth_tx_complete(tx_buf, &cp);
2100
2101 /* unmap remaining buffers */
2102 while (ntc != eop_idx) {
2103 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
2104 tx_desc, tx_buf);
2105
2106 /* unmap any remaining paged data */
2107 libeth_tx_complete(tx_buf, &cp);
2108 }
2109
2110 fetch_next_txq_desc:
2111 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
2112 }
2113
2114 tx_q->next_to_clean = ntc;
2115 }
2116
2117 /**
2118 * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
2119 * @txq: queue to clean
2120 * @buf_id: packet's starting buffer ID, from completion descriptor
2121 * @cleaned: pointer to stats struct to track cleaned packets/bytes
2122 * @budget: Used to determine if we are in netpoll
2123 *
2124 * Clean all buffers associated with the packet starting at buf_id. Returns the
2125 * byte/segment count for the cleaned packet.
2126 */
idpf_tx_clean_bufs(struct idpf_tx_queue * txq,u32 buf_id,struct libeth_sq_napi_stats * cleaned,int budget)2127 static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
2128 struct libeth_sq_napi_stats *cleaned,
2129 int budget)
2130 {
2131 struct idpf_tx_buf *tx_buf = NULL;
2132 struct libeth_cq_pp cp = {
2133 .dev = txq->dev,
2134 .ss = cleaned,
2135 .napi = budget,
2136 };
2137
2138 tx_buf = &txq->tx_buf[buf_id];
2139 if (tx_buf->type == LIBETH_SQE_SKB) {
2140 if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
2141 idpf_tx_read_tstamp(txq, tx_buf->skb);
2142
2143 libeth_tx_complete(tx_buf, &cp);
2144 idpf_post_buf_refill(txq->refillq, buf_id);
2145 }
2146
2147 while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
2148 buf_id = idpf_tx_buf_next(tx_buf);
2149
2150 tx_buf = &txq->tx_buf[buf_id];
2151 libeth_tx_complete(tx_buf, &cp);
2152 idpf_post_buf_refill(txq->refillq, buf_id);
2153 }
2154 }
2155
2156 /**
2157 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
2158 * whether on the buffer ring or in the hash table
2159 * @txq: Tx ring to clean
2160 * @desc: pointer to completion queue descriptor to extract completion
2161 * information from
2162 * @cleaned: pointer to stats struct to track cleaned packets/bytes
2163 * @budget: Used to determine if we are in netpoll
2164 *
2165 * Returns bytes/packets cleaned
2166 */
idpf_tx_handle_rs_completion(struct idpf_tx_queue * txq,struct idpf_splitq_tx_compl_desc * desc,struct libeth_sq_napi_stats * cleaned,int budget)2167 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
2168 struct idpf_splitq_tx_compl_desc *desc,
2169 struct libeth_sq_napi_stats *cleaned,
2170 int budget)
2171 {
2172 /* RS completion contains queue head for queue based scheduling or
2173 * completion tag for flow based scheduling.
2174 */
2175 u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
2176
2177 if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
2178 idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
2179 return;
2180 }
2181
2182 idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
2183 }
2184
2185 /**
2186 * idpf_tx_clean_complq - Reclaim resources on completion queue
2187 * @complq: Tx ring to clean
2188 * @budget: Used to determine if we are in netpoll
2189 * @cleaned: returns number of packets cleaned
2190 *
2191 * Return: %true if there's any budget left (e.g. the clean is finished)
2192 */
idpf_tx_clean_complq(struct idpf_compl_queue * complq,int budget,int * cleaned)2193 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
2194 int *cleaned)
2195 {
2196 struct idpf_splitq_tx_compl_desc *tx_desc;
2197 s16 ntc = complq->next_to_clean;
2198 struct idpf_netdev_priv *np;
2199 unsigned int complq_budget;
2200 bool complq_ok = true;
2201 int i;
2202
2203 complq_budget = complq->clean_budget;
2204 tx_desc = &complq->comp[ntc];
2205 ntc -= complq->desc_count;
2206
2207 do {
2208 struct libeth_sq_napi_stats cleaned_stats = { };
2209 struct idpf_tx_queue *tx_q;
2210 __le16 hw_head;
2211 int rel_tx_qid;
2212 u8 ctype; /* completion type */
2213 u16 gen;
2214
2215 /* if the descriptor isn't done, no work yet to do */
2216 gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
2217 IDPF_TXD_COMPLQ_GEN_M);
2218 if (idpf_queue_has(GEN_CHK, complq) != gen)
2219 break;
2220
2221 /* Find necessary info of TX queue to clean buffers */
2222 rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
2223 IDPF_TXD_COMPLQ_QID_M);
2224 if (rel_tx_qid >= complq->txq_grp->num_txq ||
2225 !complq->txq_grp->txqs[rel_tx_qid]) {
2226 netdev_err(complq->netdev, "TxQ not found\n");
2227 goto fetch_next_desc;
2228 }
2229 tx_q = complq->txq_grp->txqs[rel_tx_qid];
2230
2231 /* Determine completion type */
2232 ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
2233 IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2234 switch (ctype) {
2235 case IDPF_TXD_COMPLT_RE:
2236 hw_head = tx_desc->common.q_head_compl_tag.q_head;
2237
2238 idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
2239 budget, &cleaned_stats, true);
2240 break;
2241 case IDPF_TXD_COMPLT_RS:
2242 idpf_tx_handle_rs_completion(tx_q, tx_desc,
2243 &cleaned_stats, budget);
2244 break;
2245 default:
2246 netdev_err(tx_q->netdev,
2247 "Unknown TX completion type: %d\n", ctype);
2248 goto fetch_next_desc;
2249 }
2250
2251 u64_stats_update_begin(&tx_q->stats_sync);
2252 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2253 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2254 tx_q->cleaned_pkts += cleaned_stats.packets;
2255 tx_q->cleaned_bytes += cleaned_stats.bytes;
2256 complq->num_completions++;
2257 u64_stats_update_end(&tx_q->stats_sync);
2258
2259 fetch_next_desc:
2260 tx_desc++;
2261 ntc++;
2262 if (unlikely(!ntc)) {
2263 ntc -= complq->desc_count;
2264 tx_desc = &complq->comp[0];
2265 idpf_queue_change(GEN_CHK, complq);
2266 }
2267
2268 prefetch(tx_desc);
2269
2270 /* update budget accounting */
2271 complq_budget--;
2272 } while (likely(complq_budget));
2273
2274 /* Store the state of the complq to be used later in deciding if a
2275 * TXQ can be started again
2276 */
2277 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2278 IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2279 complq_ok = false;
2280
2281 np = netdev_priv(complq->netdev);
2282 for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2283 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2284 struct netdev_queue *nq;
2285 bool dont_wake;
2286
2287 /* We didn't clean anything on this queue, move along */
2288 if (!tx_q->cleaned_bytes)
2289 continue;
2290
2291 *cleaned += tx_q->cleaned_pkts;
2292
2293 /* Update BQL */
2294 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2295
2296 dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
2297 !netif_carrier_ok(tx_q->netdev);
2298 /* Check if the TXQ needs to and can be restarted */
2299 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2300 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2301 dont_wake);
2302
2303 /* Reset cleaned stats for the next time this queue is
2304 * cleaned
2305 */
2306 tx_q->cleaned_bytes = 0;
2307 tx_q->cleaned_pkts = 0;
2308 }
2309
2310 ntc += complq->desc_count;
2311 complq->next_to_clean = ntc;
2312
2313 return !!complq_budget;
2314 }
2315
2316 /**
2317 * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
2318 * @txq: disabled Tx queue
2319 *
2320 * When Tx queue is requested for disabling, the CP sends a special completion
2321 * descriptor called "SW marker", meaning the queue is ready to be destroyed.
2322 * If, for some reason, the marker is not received within 500 ms, break the
2323 * polling to not hang the driver.
2324 */
idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue * txq)2325 void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
2326 {
2327 struct idpf_compl_queue *complq;
2328 unsigned long timeout;
2329 bool flow, gen_flag;
2330 u32 ntc;
2331
2332 if (!idpf_queue_has(SW_MARKER, txq))
2333 return;
2334
2335 complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
2336 ntc = complq->next_to_clean;
2337
2338 flow = idpf_queue_has(FLOW_SCH_EN, complq);
2339 gen_flag = idpf_queue_has(GEN_CHK, complq);
2340
2341 timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
2342
2343 do {
2344 struct idpf_splitq_4b_tx_compl_desc *tx_desc;
2345 struct idpf_tx_queue *target = NULL;
2346 u32 ctype_gen, id;
2347
2348 tx_desc = flow ? &complq->comp[ntc].common :
2349 &complq->comp_4b[ntc];
2350 ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
2351
2352 if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
2353 usleep_range(500, 1000);
2354 continue;
2355 }
2356
2357 if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
2358 IDPF_TXD_COMPLT_SW_MARKER)
2359 goto next;
2360
2361 id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
2362 target = complq->txq_grp->txqs[id];
2363
2364 idpf_queue_clear(SW_MARKER, target);
2365
2366 next:
2367 if (unlikely(++ntc == complq->desc_count)) {
2368 ntc = 0;
2369 gen_flag = !gen_flag;
2370 }
2371 if (target == txq)
2372 break;
2373 } while (time_before(jiffies, timeout));
2374
2375 idpf_queue_assign(GEN_CHK, complq, gen_flag);
2376 complq->next_to_clean = ntc;
2377 }
2378
2379 /**
2380 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2381 * based scheduling descriptors
2382 * @desc: descriptor to populate
2383 * @params: pointer to tx params struct
2384 * @td_cmd: command to be filled in desc
2385 * @size: size of buffer
2386 */
idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)2387 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2388 struct idpf_tx_splitq_params *params,
2389 u16 td_cmd, u16 size)
2390 {
2391 desc->q.qw1.cmd_dtype =
2392 le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2393 desc->q.qw1.cmd_dtype |=
2394 le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2395 desc->q.qw1.buf_size = cpu_to_le16(size);
2396 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2397 }
2398
2399 /**
2400 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2401 * scheduling descriptors
2402 * @desc: descriptor to populate
2403 * @params: pointer to tx params struct
2404 * @td_cmd: command to be filled in desc
2405 * @size: size of buffer
2406 */
idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)2407 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2408 struct idpf_tx_splitq_params *params,
2409 u16 td_cmd, u16 size)
2410 {
2411 *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
2412 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2413 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2414 }
2415
2416 /**
2417 * idpf_txq_has_room - check if enough Tx splitq resources are available
2418 * @tx_q: the queue to be checked
2419 * @descs_needed: number of descriptors required for this packet
2420 * @bufs_needed: number of Tx buffers required for this packet
2421 *
2422 * Return: 0 if no room available, 1 otherwise
2423 */
idpf_txq_has_room(struct idpf_tx_queue * tx_q,u32 descs_needed,u32 bufs_needed)2424 static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
2425 u32 bufs_needed)
2426 {
2427 if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
2428 IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2429 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
2430 idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
2431 return 0;
2432 return 1;
2433 }
2434
2435 /**
2436 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2437 * @tx_q: the queue to be checked
2438 * @descs_needed: number of descriptors required for this packet
2439 * @bufs_needed: number of buffers needed for this packet
2440 *
2441 * Return: 0 if stop is not needed
2442 */
idpf_tx_maybe_stop_splitq(struct idpf_tx_queue * tx_q,u32 descs_needed,u32 bufs_needed)2443 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2444 u32 descs_needed,
2445 u32 bufs_needed)
2446 {
2447 /* Since we have multiple resources to check for splitq, our
2448 * start,stop_thrs becomes a boolean check instead of a count
2449 * threshold.
2450 */
2451 if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
2452 idpf_txq_has_room(tx_q, descs_needed,
2453 bufs_needed),
2454 1, 1))
2455 return 0;
2456
2457 u64_stats_update_begin(&tx_q->stats_sync);
2458 u64_stats_inc(&tx_q->q_stats.q_busy);
2459 u64_stats_update_end(&tx_q->stats_sync);
2460
2461 return -EBUSY;
2462 }
2463
2464 /**
2465 * idpf_tx_buf_hw_update - Store the new tail value
2466 * @tx_q: queue to bump
2467 * @val: new tail index
2468 * @xmit_more: more skb's pending
2469 *
2470 * The naming here is special in that 'hw' signals that this function is about
2471 * to do a register write to update our queue status. We know this can only
2472 * mean tail here as HW should be owning head for TX.
2473 */
idpf_tx_buf_hw_update(struct idpf_tx_queue * tx_q,u32 val,bool xmit_more)2474 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2475 bool xmit_more)
2476 {
2477 struct netdev_queue *nq;
2478
2479 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2480 tx_q->next_to_use = val;
2481
2482 /* Force memory writes to complete before letting h/w
2483 * know there are new descriptors to fetch. (Only
2484 * applicable for weak-ordered memory model archs,
2485 * such as IA-64).
2486 */
2487 wmb();
2488
2489 /* notify HW of packet */
2490 if (netif_xmit_stopped(nq) || !xmit_more)
2491 writel(val, tx_q->tail);
2492 }
2493
2494 /**
2495 * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
2496 * @txq: queue to send buffer on
2497 * @skb: send buffer
2498 * @bufs_needed: (output) number of buffers needed for this skb.
2499 *
2500 * Return: number of data descriptors and buffers needed for this skb.
2501 */
idpf_tx_res_count_required(struct idpf_tx_queue * txq,struct sk_buff * skb,u32 * bufs_needed)2502 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
2503 struct sk_buff *skb,
2504 u32 *bufs_needed)
2505 {
2506 const struct skb_shared_info *shinfo;
2507 unsigned int count = 0, i;
2508
2509 count += !!skb_headlen(skb);
2510
2511 if (!skb_is_nonlinear(skb))
2512 return count;
2513
2514 shinfo = skb_shinfo(skb);
2515 *bufs_needed += shinfo->nr_frags;
2516 for (i = 0; i < shinfo->nr_frags; i++) {
2517 unsigned int size;
2518
2519 size = skb_frag_size(&shinfo->frags[i]);
2520
2521 /* We only need to use the idpf_size_to_txd_count check if the
2522 * fragment is going to span multiple descriptors,
2523 * i.e. size >= 16K.
2524 */
2525 if (size >= SZ_16K)
2526 count += idpf_size_to_txd_count(size);
2527 else
2528 count++;
2529 }
2530
2531 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2532 if (__skb_linearize(skb))
2533 return 0;
2534
2535 count = idpf_size_to_txd_count(skb->len);
2536 u64_stats_update_begin(&txq->stats_sync);
2537 u64_stats_inc(&txq->q_stats.linearize);
2538 u64_stats_update_end(&txq->stats_sync);
2539 }
2540
2541 return count;
2542 }
2543
2544 /**
2545 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2546 * @txq: the tx ring to wrap
2547 * @ntu: ring index to bump
2548 *
2549 * Return: the next ring index hopping to 0 when wraps around
2550 */
idpf_tx_splitq_bump_ntu(struct idpf_tx_queue * txq,u16 ntu)2551 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2552 {
2553 ntu++;
2554
2555 if (ntu == txq->desc_count)
2556 ntu = 0;
2557
2558 return ntu;
2559 }
2560
2561 /**
2562 * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
2563 * @refillq: refill queue to get buffer ID from
2564 * @buf_id: return buffer ID
2565 *
2566 * Return: true if a buffer ID was found, false if not
2567 */
idpf_tx_get_free_buf_id(struct idpf_sw_queue * refillq,u32 * buf_id)2568 static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
2569 u32 *buf_id)
2570 {
2571 u32 ntc = refillq->next_to_clean;
2572 u32 refill_desc;
2573
2574 refill_desc = refillq->ring[ntc];
2575
2576 if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
2577 !!(refill_desc & IDPF_RFL_BI_GEN_M)))
2578 return false;
2579
2580 *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
2581
2582 if (unlikely(++ntc == refillq->desc_count)) {
2583 idpf_queue_change(RFL_GEN_CHK, refillq);
2584 ntc = 0;
2585 }
2586
2587 refillq->next_to_clean = ntc;
2588
2589 return true;
2590 }
2591
2592 /**
2593 * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2594 * @txq: Tx queue to unwind
2595 * @params: pointer to splitq params struct
2596 * @first: starting buffer for packet to unmap
2597 */
idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue * txq,struct idpf_tx_splitq_params * params,struct idpf_tx_buf * first)2598 static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2599 struct idpf_tx_splitq_params *params,
2600 struct idpf_tx_buf *first)
2601 {
2602 struct idpf_sw_queue *refillq = txq->refillq;
2603 struct libeth_sq_napi_stats ss = { };
2604 struct idpf_tx_buf *tx_buf = first;
2605 struct libeth_cq_pp cp = {
2606 .dev = txq->dev,
2607 .ss = &ss,
2608 };
2609
2610 u64_stats_update_begin(&txq->stats_sync);
2611 u64_stats_inc(&txq->q_stats.dma_map_errs);
2612 u64_stats_update_end(&txq->stats_sync);
2613
2614 libeth_tx_complete(tx_buf, &cp);
2615 while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
2616 tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
2617 libeth_tx_complete(tx_buf, &cp);
2618 }
2619
2620 /* Update tail in case netdev_xmit_more was previously true. */
2621 idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2622
2623 if (!refillq)
2624 return;
2625
2626 /* Restore refillq state to avoid leaking tags. */
2627 if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
2628 idpf_queue_change(RFL_GEN_CHK, refillq);
2629 refillq->next_to_clean = params->prev_refill_ntc;
2630 }
2631
2632 /**
2633 * idpf_tx_splitq_map - Build the Tx flex descriptor
2634 * @tx_q: queue to send buffer on
2635 * @params: pointer to splitq params struct
2636 * @first: first buffer info buffer to use
2637 *
2638 * This function loops over the skb data pointed to by *first
2639 * and gets a physical address for each memory location and programs
2640 * it and the length into the transmit flex descriptor.
2641 */
idpf_tx_splitq_map(struct idpf_tx_queue * tx_q,struct idpf_tx_splitq_params * params,struct idpf_tx_buf * first)2642 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2643 struct idpf_tx_splitq_params *params,
2644 struct idpf_tx_buf *first)
2645 {
2646 union idpf_tx_flex_desc *tx_desc;
2647 unsigned int data_len, size;
2648 struct idpf_tx_buf *tx_buf;
2649 u16 i = tx_q->next_to_use;
2650 struct netdev_queue *nq;
2651 struct sk_buff *skb;
2652 skb_frag_t *frag;
2653 u32 next_buf_id;
2654 u16 td_cmd = 0;
2655 dma_addr_t dma;
2656
2657 skb = first->skb;
2658
2659 td_cmd = params->offload.td_cmd;
2660
2661 data_len = skb->data_len;
2662 size = skb_headlen(skb);
2663
2664 tx_desc = &tx_q->flex_tx[i];
2665
2666 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2667
2668 tx_buf = first;
2669 first->nr_frags = 0;
2670
2671 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2672 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2673
2674 if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
2675 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2676 return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2677 first);
2678 }
2679
2680 first->nr_frags++;
2681 tx_buf->type = LIBETH_SQE_FRAG;
2682
2683 /* record length, and DMA address */
2684 dma_unmap_len_set(tx_buf, len, size);
2685 dma_unmap_addr_set(tx_buf, dma, dma);
2686
2687 /* buf_addr is in same location for both desc types */
2688 tx_desc->q.buf_addr = cpu_to_le64(dma);
2689
2690 /* The stack can send us fragments that are too large for a
2691 * single descriptor i.e. frag size > 16K-1. We will need to
2692 * split the fragment across multiple descriptors in this case.
2693 * To adhere to HW alignment restrictions, the fragment needs
2694 * to be split such that the first chunk ends on a 4K boundary
2695 * and all subsequent chunks start on a 4K boundary. We still
2696 * want to send as much data as possible though, so our
2697 * intermediate descriptor chunk size will be 12K.
2698 *
2699 * For example, consider a 32K fragment mapped to DMA addr 2600.
2700 * ------------------------------------------------------------
2701 * | frag_size = 32K |
2702 * ------------------------------------------------------------
2703 * |2600 |16384 |28672
2704 *
2705 * 3 descriptors will be used for this fragment. The HW expects
2706 * the descriptors to contain the following:
2707 * ------------------------------------------------------------
2708 * | size = 13784 | size = 12K | size = 6696 |
2709 * | dma = 2600 | dma = 16384 | dma = 28672 |
2710 * ------------------------------------------------------------
2711 *
2712 * We need to first adjust the max_data for the first chunk so
2713 * that it ends on a 4K boundary. By negating the value of the
2714 * DMA address and taking only the low order bits, we're
2715 * effectively calculating
2716 * 4K - (DMA addr lower order bits) =
2717 * bytes to next boundary.
2718 *
2719 * Add that to our base aligned max_data (12K) and we have
2720 * our first chunk size. In the example above,
2721 * 13784 = 12K + (4096-2600)
2722 *
2723 * After guaranteeing the first chunk ends on a 4K boundary, we
2724 * will give the intermediate descriptors 12K chunks and
2725 * whatever is left to the final descriptor. This ensures that
2726 * all descriptors used for the remaining chunks of the
2727 * fragment start on a 4K boundary and we use as few
2728 * descriptors as possible.
2729 */
2730 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2731 while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2732 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2733 max_data);
2734
2735 if (unlikely(++i == tx_q->desc_count)) {
2736 tx_desc = &tx_q->flex_tx[0];
2737 i = 0;
2738 } else {
2739 tx_desc++;
2740 }
2741
2742 /* Adjust the DMA offset and the remaining size of the
2743 * fragment. On the first iteration of this loop,
2744 * max_data will be >= 12K and <= 16K-1. On any
2745 * subsequent iteration of this loop, max_data will
2746 * always be 12K.
2747 */
2748 dma += max_data;
2749 size -= max_data;
2750
2751 /* Reset max_data since remaining chunks will be 12K
2752 * at most
2753 */
2754 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2755
2756 /* buf_addr is in same location for both desc types */
2757 tx_desc->q.buf_addr = cpu_to_le64(dma);
2758 }
2759
2760 if (!data_len)
2761 break;
2762
2763 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2764
2765 if (unlikely(++i == tx_q->desc_count)) {
2766 tx_desc = &tx_q->flex_tx[0];
2767 i = 0;
2768 } else {
2769 tx_desc++;
2770 }
2771
2772 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2773 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
2774 &next_buf_id))) {
2775 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2776 return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2777 first);
2778 }
2779 } else {
2780 next_buf_id = i;
2781 }
2782 idpf_tx_buf_next(tx_buf) = next_buf_id;
2783 tx_buf = &tx_q->tx_buf[next_buf_id];
2784
2785 size = skb_frag_size(frag);
2786 data_len -= size;
2787
2788 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2789 DMA_TO_DEVICE);
2790 }
2791
2792 /* record SW timestamp if HW timestamp is not available */
2793 skb_tx_timestamp(skb);
2794
2795 first->type = LIBETH_SQE_SKB;
2796
2797 /* write last descriptor with RS and EOP bits */
2798 first->rs_idx = i;
2799 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2800 td_cmd |= params->eop_cmd;
2801 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2802 i = idpf_tx_splitq_bump_ntu(tx_q, i);
2803
2804 tx_q->txq_grp->num_completions_pending++;
2805
2806 /* record bytecount for BQL */
2807 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2808 netdev_tx_sent_queue(nq, first->bytes);
2809
2810 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2811 }
2812
2813 /**
2814 * idpf_tso - computes mss and TSO length to prepare for TSO
2815 * @skb: pointer to skb
2816 * @off: pointer to struct that holds offload parameters
2817 *
2818 * Return: error (negative) if TSO was requested but cannot be applied to the
2819 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2820 */
idpf_tso(struct sk_buff * skb,struct idpf_tx_offload_params * off)2821 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2822 {
2823 const struct skb_shared_info *shinfo;
2824 union {
2825 struct iphdr *v4;
2826 struct ipv6hdr *v6;
2827 unsigned char *hdr;
2828 } ip;
2829 union {
2830 struct tcphdr *tcp;
2831 struct udphdr *udp;
2832 unsigned char *hdr;
2833 } l4;
2834 u32 paylen, l4_start;
2835 int err;
2836
2837 if (!skb_is_gso(skb))
2838 return 0;
2839
2840 err = skb_cow_head(skb, 0);
2841 if (err < 0)
2842 return err;
2843
2844 shinfo = skb_shinfo(skb);
2845
2846 ip.hdr = skb_network_header(skb);
2847 l4.hdr = skb_transport_header(skb);
2848
2849 /* initialize outer IP header fields */
2850 if (ip.v4->version == 4) {
2851 ip.v4->tot_len = 0;
2852 ip.v4->check = 0;
2853 } else if (ip.v6->version == 6) {
2854 ip.v6->payload_len = 0;
2855 }
2856
2857 l4_start = skb_transport_offset(skb);
2858
2859 /* remove payload length from checksum */
2860 paylen = skb->len - l4_start;
2861
2862 switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2863 case SKB_GSO_TCPV4:
2864 case SKB_GSO_TCPV6:
2865 csum_replace_by_diff(&l4.tcp->check,
2866 (__force __wsum)htonl(paylen));
2867 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2868 break;
2869 case SKB_GSO_UDP_L4:
2870 csum_replace_by_diff(&l4.udp->check,
2871 (__force __wsum)htonl(paylen));
2872 /* compute length of segmentation header */
2873 off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2874 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2875 break;
2876 default:
2877 return -EINVAL;
2878 }
2879
2880 off->tso_len = skb->len - off->tso_hdr_len;
2881 off->mss = shinfo->gso_size;
2882 off->tso_segs = shinfo->gso_segs;
2883
2884 off->tx_flags |= IDPF_TX_FLAGS_TSO;
2885
2886 return 1;
2887 }
2888
2889
2890 /**
2891 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2892 * @txq: queue to put context descriptor on
2893 *
2894 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2895 * ring entry to reflect that this index is a context descriptor
2896 *
2897 * Return: pointer to the next descriptor
2898 */
2899 static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue * txq)2900 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2901 {
2902 union idpf_flex_tx_ctx_desc *desc;
2903 int i = txq->next_to_use;
2904
2905 /* grab the next descriptor */
2906 desc = &txq->flex_ctx[i];
2907 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2908
2909 return desc;
2910 }
2911
2912 /**
2913 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2914 * @tx_q: queue to send buffer on
2915 * @skb: pointer to skb
2916 *
2917 * Return: always NETDEV_TX_OK
2918 */
idpf_tx_drop_skb(struct idpf_tx_queue * tx_q,struct sk_buff * skb)2919 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2920 {
2921 u64_stats_update_begin(&tx_q->stats_sync);
2922 u64_stats_inc(&tx_q->q_stats.skb_drops);
2923 u64_stats_update_end(&tx_q->stats_sync);
2924
2925 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2926
2927 dev_kfree_skb(skb);
2928
2929 return NETDEV_TX_OK;
2930 }
2931
2932 #if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
2933 /**
2934 * idpf_tx_tstamp - set up context descriptor for hardware timestamp
2935 * @tx_q: queue to send buffer on
2936 * @skb: pointer to the SKB we're sending
2937 * @off: pointer to the offload struct
2938 *
2939 * Return: Positive index number on success, negative otherwise.
2940 */
idpf_tx_tstamp(struct idpf_tx_queue * tx_q,struct sk_buff * skb,struct idpf_tx_offload_params * off)2941 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2942 struct idpf_tx_offload_params *off)
2943 {
2944 int err, idx;
2945
2946 /* only timestamp the outbound packet if the user has requested it */
2947 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2948 return -1;
2949
2950 if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
2951 return -1;
2952
2953 /* Tx timestamps cannot be sampled when doing TSO */
2954 if (off->tx_flags & IDPF_TX_FLAGS_TSO)
2955 return -1;
2956
2957 /* Grab an open timestamp slot */
2958 err = idpf_ptp_request_ts(tx_q, skb, &idx);
2959 if (err) {
2960 u64_stats_update_begin(&tx_q->stats_sync);
2961 u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
2962 u64_stats_update_end(&tx_q->stats_sync);
2963
2964 return -1;
2965 }
2966
2967 off->tx_flags |= IDPF_TX_FLAGS_TSYN;
2968
2969 return idx;
2970 }
2971
2972 /**
2973 * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
2974 * PHY Tx timestamp
2975 * @ctx_desc: Context descriptor
2976 * @idx: Index of the Tx timestamp latch
2977 */
idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc * ctx_desc,u32 idx)2978 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2979 u32 idx)
2980 {
2981 ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
2982 IDPF_TX_CTX_DTYPE_M) |
2983 le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
2984 IDPF_TX_CTX_CMD_M) |
2985 le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
2986 }
2987 #else /* CONFIG_PTP_1588_CLOCK */
idpf_tx_tstamp(struct idpf_tx_queue * tx_q,struct sk_buff * skb,struct idpf_tx_offload_params * off)2988 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2989 struct idpf_tx_offload_params *off)
2990 {
2991 return -1;
2992 }
2993
idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc * ctx_desc,u32 idx)2994 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2995 u32 idx)
2996 { }
2997 #endif /* CONFIG_PTP_1588_CLOCK */
2998
2999 /**
3000 * idpf_tx_splitq_need_re - check whether RE bit needs to be set
3001 * @tx_q: pointer to Tx queue
3002 *
3003 * Return: true if RE bit needs to be set, false otherwise
3004 */
idpf_tx_splitq_need_re(struct idpf_tx_queue * tx_q)3005 static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
3006 {
3007 int gap = tx_q->next_to_use - tx_q->last_re;
3008
3009 gap += (gap < 0) ? tx_q->desc_count : 0;
3010
3011 return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
3012 }
3013
3014 /**
3015 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
3016 * @skb: send buffer
3017 * @tx_q: queue to send buffer on
3018 *
3019 * Return: NETDEV_TX_OK if sent, else an error code
3020 */
idpf_tx_splitq_frame(struct sk_buff * skb,struct idpf_tx_queue * tx_q)3021 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
3022 struct idpf_tx_queue *tx_q)
3023 {
3024 struct idpf_tx_splitq_params tx_params = {
3025 .prev_ntu = tx_q->next_to_use,
3026 };
3027 union idpf_flex_tx_ctx_desc *ctx_desc;
3028 struct idpf_tx_buf *first;
3029 u32 count, buf_count = 1;
3030 int tso, idx;
3031 u32 buf_id;
3032
3033 count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
3034 if (unlikely(!count))
3035 return idpf_tx_drop_skb(tx_q, skb);
3036
3037 tso = idpf_tso(skb, &tx_params.offload);
3038 if (unlikely(tso < 0))
3039 return idpf_tx_drop_skb(tx_q, skb);
3040
3041 /* Check for splitq specific TX resources */
3042 count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
3043 if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
3044 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
3045
3046 return NETDEV_TX_BUSY;
3047 }
3048
3049 if (tso) {
3050 /* If tso is needed, set up context desc */
3051 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
3052
3053 ctx_desc->tso.qw1.cmd_dtype =
3054 cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
3055 IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
3056 ctx_desc->tso.qw0.flex_tlen =
3057 cpu_to_le32(tx_params.offload.tso_len &
3058 IDPF_TXD_FLEX_CTX_TLEN_M);
3059 ctx_desc->tso.qw0.mss_rt =
3060 cpu_to_le16(tx_params.offload.mss &
3061 IDPF_TXD_FLEX_CTX_MSS_RT_M);
3062 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
3063
3064 u64_stats_update_begin(&tx_q->stats_sync);
3065 u64_stats_inc(&tx_q->q_stats.lso_pkts);
3066 u64_stats_update_end(&tx_q->stats_sync);
3067 }
3068
3069 idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
3070 if (idx != -1) {
3071 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
3072 idpf_tx_set_tstamp_desc(ctx_desc, idx);
3073 }
3074
3075 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
3076 struct idpf_sw_queue *refillq = tx_q->refillq;
3077
3078 /* Save refillq state in case of a packet rollback. Otherwise,
3079 * the tags will be leaked since they will be popped from the
3080 * refillq but never reposted during cleaning.
3081 */
3082 tx_params.prev_refill_gen =
3083 idpf_queue_has(RFL_GEN_CHK, refillq);
3084 tx_params.prev_refill_ntc = refillq->next_to_clean;
3085
3086 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
3087 &buf_id))) {
3088 if (tx_params.prev_refill_gen !=
3089 idpf_queue_has(RFL_GEN_CHK, refillq))
3090 idpf_queue_change(RFL_GEN_CHK, refillq);
3091 refillq->next_to_clean = tx_params.prev_refill_ntc;
3092
3093 tx_q->next_to_use = tx_params.prev_ntu;
3094 return idpf_tx_drop_skb(tx_q, skb);
3095 }
3096 tx_params.compl_tag = buf_id;
3097
3098 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
3099 tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
3100 /* Set the RE bit to periodically "clean" the descriptor ring.
3101 * MIN_GAP is set to MIN_RING size to ensure it will be set at
3102 * least once each time around the ring.
3103 */
3104 if (idpf_tx_splitq_need_re(tx_q)) {
3105 tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
3106 tx_q->txq_grp->num_completions_pending++;
3107 tx_q->last_re = tx_q->next_to_use;
3108 }
3109
3110 if (skb->ip_summed == CHECKSUM_PARTIAL)
3111 tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
3112
3113 } else {
3114 buf_id = tx_q->next_to_use;
3115
3116 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
3117 tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
3118
3119 if (skb->ip_summed == CHECKSUM_PARTIAL)
3120 tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
3121 }
3122
3123 first = &tx_q->tx_buf[buf_id];
3124 first->skb = skb;
3125
3126 if (tso) {
3127 first->packets = tx_params.offload.tso_segs;
3128 first->bytes = skb->len +
3129 ((first->packets - 1) * tx_params.offload.tso_hdr_len);
3130 } else {
3131 first->packets = 1;
3132 first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
3133 }
3134
3135 idpf_tx_splitq_map(tx_q, &tx_params, first);
3136
3137 return NETDEV_TX_OK;
3138 }
3139
3140 /**
3141 * idpf_tx_start - Selects the right Tx queue to send buffer
3142 * @skb: send buffer
3143 * @netdev: network interface device structure
3144 *
3145 * Return: NETDEV_TX_OK if sent, else an error code
3146 */
idpf_tx_start(struct sk_buff * skb,struct net_device * netdev)3147 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
3148 {
3149 const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
3150 struct idpf_tx_queue *tx_q;
3151
3152 if (unlikely(skb_get_queue_mapping(skb) >=
3153 vport->num_txq - vport->num_xdp_txq)) {
3154 dev_kfree_skb_any(skb);
3155
3156 return NETDEV_TX_OK;
3157 }
3158
3159 tx_q = vport->txqs[skb_get_queue_mapping(skb)];
3160
3161 /* hardware can't handle really short frames, hardware padding works
3162 * beyond this point
3163 */
3164 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
3165 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
3166
3167 return NETDEV_TX_OK;
3168 }
3169
3170 if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
3171 return idpf_tx_splitq_frame(skb, tx_q);
3172 else
3173 return idpf_tx_singleq_frame(skb, tx_q);
3174 }
3175
3176 /**
3177 * idpf_rx_hash - set the hash value in the skb
3178 * @rxq: Rx descriptor ring packet is being transacted on
3179 * @skb: pointer to current skb being populated
3180 * @rx_desc: Receive descriptor
3181 * @decoded: Decoded Rx packet type related fields
3182 */
3183 static void
idpf_rx_hash(const struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)3184 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
3185 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3186 struct libeth_rx_pt decoded)
3187 {
3188 u32 hash;
3189
3190 if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
3191 return;
3192
3193 hash = le16_to_cpu(rx_desc->hash1) |
3194 (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
3195 (rx_desc->hash3 << 24);
3196
3197 libeth_rx_pt_set_hash(skb, hash, decoded);
3198 }
3199
3200 /**
3201 * idpf_rx_csum - Indicate in skb if checksum is good
3202 * @rxq: Rx descriptor ring packet is being transacted on
3203 * @skb: pointer to current skb being populated
3204 * @csum_bits: checksum fields extracted from the descriptor
3205 * @decoded: Decoded Rx packet type related fields
3206 *
3207 * skb->protocol must be set before this function is called
3208 */
idpf_rx_csum(struct idpf_rx_queue * rxq,struct sk_buff * skb,struct libeth_rx_csum csum_bits,struct libeth_rx_pt decoded)3209 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3210 struct libeth_rx_csum csum_bits,
3211 struct libeth_rx_pt decoded)
3212 {
3213 bool ipv4, ipv6;
3214
3215 /* check if Rx checksum is enabled */
3216 if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
3217 return;
3218
3219 /* check if HW has decoded the packet and checksum */
3220 if (unlikely(!csum_bits.l3l4p))
3221 return;
3222
3223 ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3224 ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3225
3226 if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
3227 goto checksum_fail;
3228
3229 if (unlikely(ipv6 && csum_bits.ipv6exadd))
3230 return;
3231
3232 /* check for L4 errors and handle packets that were not able to be
3233 * checksummed
3234 */
3235 if (unlikely(csum_bits.l4e))
3236 goto checksum_fail;
3237
3238 if (!csum_bits.raw_csum_valid ||
3239 decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
3240 skb->ip_summed = CHECKSUM_UNNECESSARY;
3241 return;
3242 }
3243
3244 skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
3245 skb->ip_summed = CHECKSUM_COMPLETE;
3246
3247 return;
3248
3249 checksum_fail:
3250 u64_stats_update_begin(&rxq->stats_sync);
3251 u64_stats_inc(&rxq->q_stats.hw_csum_err);
3252 u64_stats_update_end(&rxq->stats_sync);
3253 }
3254
3255 /**
3256 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
3257 * @rx_desc: receive descriptor
3258 *
3259 * Return: parsed checksum status.
3260 **/
3261 static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3262 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3263 {
3264 struct libeth_rx_csum csum = { };
3265 u8 qword0, qword1;
3266
3267 qword0 = rx_desc->status_err0_qw0;
3268 qword1 = rx_desc->status_err0_qw1;
3269
3270 csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
3271 qword1);
3272 csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
3273 qword1);
3274 csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
3275 qword1);
3276 csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
3277 qword1);
3278 csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
3279 qword0);
3280 csum.raw_csum_valid =
3281 !le16_get_bits(rx_desc->ptype_err_fflags0,
3282 VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
3283 csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
3284
3285 return csum;
3286 }
3287
3288 /**
3289 * idpf_rx_rsc - Set the RSC fields in the skb
3290 * @rxq : Rx descriptor ring packet is being transacted on
3291 * @skb : pointer to current skb being populated
3292 * @rx_desc: Receive descriptor
3293 * @decoded: Decoded Rx packet type related fields
3294 *
3295 * Populate the skb fields with the total number of RSC segments, RSC payload
3296 * length and packet type.
3297 *
3298 * Return: 0 on success and error code on failure
3299 */
idpf_rx_rsc(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)3300 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3301 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3302 struct libeth_rx_pt decoded)
3303 {
3304 u16 rsc_segments, rsc_seg_len;
3305 bool ipv4, ipv6;
3306 int len;
3307
3308 if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
3309 LIBETH_RX_PT_OUTER_L2))
3310 return -EINVAL;
3311
3312 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3313 if (unlikely(!rsc_seg_len))
3314 return -EINVAL;
3315
3316 ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3317 ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3318
3319 if (unlikely(!(ipv4 ^ ipv6)))
3320 return -EINVAL;
3321
3322 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3323
3324 NAPI_GRO_CB(skb)->count = rsc_segments;
3325 skb_shinfo(skb)->gso_size = rsc_seg_len;
3326
3327 skb_reset_network_header(skb);
3328
3329 if (ipv4) {
3330 struct iphdr *ipv4h = ip_hdr(skb);
3331
3332 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3333
3334 /* Reset and set transport header offset in skb */
3335 skb_set_transport_header(skb, sizeof(struct iphdr));
3336 len = skb->len - skb_transport_offset(skb);
3337
3338 /* Compute the TCP pseudo header checksum*/
3339 tcp_hdr(skb)->check =
3340 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3341 } else {
3342 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3343
3344 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3345 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3346 len = skb->len - skb_transport_offset(skb);
3347 tcp_hdr(skb)->check =
3348 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3349 }
3350
3351 tcp_gro_complete(skb);
3352
3353 u64_stats_update_begin(&rxq->stats_sync);
3354 u64_stats_inc(&rxq->q_stats.rsc_pkts);
3355 u64_stats_update_end(&rxq->stats_sync);
3356
3357 return 0;
3358 }
3359
3360 /**
3361 * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
3362 * @rxq: pointer to the rx queue that receives the timestamp
3363 * @rx_desc: pointer to rx descriptor containing timestamp
3364 * @skb: skb to put timestamp in
3365 */
3366 static void
idpf_rx_hwtstamp(const struct idpf_rx_queue * rxq,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct sk_buff * skb)3367 idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
3368 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
3369 struct sk_buff *skb)
3370 {
3371 u64 cached_time, ts_ns;
3372 u32 ts_high;
3373
3374 if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
3375 return;
3376
3377 cached_time = READ_ONCE(rxq->cached_phc_time);
3378
3379 ts_high = le32_to_cpu(rx_desc->ts_high);
3380 ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
3381
3382 *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
3383 .hwtstamp = ns_to_ktime(ts_ns),
3384 };
3385 }
3386
3387 /**
3388 * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3389 * @rxq: Rx descriptor ring packet is being transacted on
3390 * @skb: pointer to current skb being populated
3391 * @rx_desc: Receive descriptor
3392 *
3393 * This function checks the ring, descriptor, and packet information in
3394 * order to populate the hash, checksum, protocol, and
3395 * other fields within the skb.
3396 *
3397 * Return: 0 on success and error code on failure
3398 */
3399 static int
__idpf_rx_process_skb_fields(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3400 __idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3401 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3402 {
3403 struct libeth_rx_csum csum_bits;
3404 struct libeth_rx_pt decoded;
3405 u16 rx_ptype;
3406
3407 rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3408 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3409 decoded = rxq->rx_ptype_lkup[rx_ptype];
3410
3411 /* process RSS/hash */
3412 idpf_rx_hash(rxq, skb, rx_desc, decoded);
3413
3414 if (idpf_queue_has(PTP, rxq))
3415 idpf_rx_hwtstamp(rxq, rx_desc, skb);
3416
3417 if (le16_get_bits(rx_desc->hdrlen_flags,
3418 VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3419 return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3420
3421 csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3422 idpf_rx_csum(rxq, skb, csum_bits, decoded);
3423
3424 return 0;
3425 }
3426
idpf_rx_process_skb_fields(struct sk_buff * skb,const struct libeth_xdp_buff * xdp,struct libeth_rq_napi_stats * rs)3427 bool idpf_rx_process_skb_fields(struct sk_buff *skb,
3428 const struct libeth_xdp_buff *xdp,
3429 struct libeth_rq_napi_stats *rs)
3430 {
3431 struct idpf_rx_queue *rxq;
3432
3433 rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
3434
3435 return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
3436 }
3437
3438 LIBETH_XDP_DEFINE_START();
3439 LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
3440 idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
3441 LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
3442 idpf_xdp_tx_finalize);
3443 LIBETH_XDP_DEFINE_END();
3444
3445 /**
3446 * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3447 * @hdr: Rx buffer for the headers
3448 * @buf: Rx buffer for the payload
3449 * @data_len: number of bytes received to the payload buffer
3450 *
3451 * When a header buffer overflow occurs or the HW was unable do parse the
3452 * packet type to perform header split, the whole frame gets placed to the
3453 * payload buffer. We can't build a valid skb around a payload buffer when
3454 * the header split is active since it doesn't reserve any head- or tailroom.
3455 * In that case, copy either the whole frame when it's short or just the
3456 * Ethernet header to the header buffer to be able to build an skb and adjust
3457 * the data offset in the payload buffer, IOW emulate the header split.
3458 *
3459 * Return: number of bytes copied to the header buffer.
3460 */
idpf_rx_hsplit_wa(const struct libeth_fqe * hdr,struct libeth_fqe * buf,u32 data_len)3461 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3462 struct libeth_fqe *buf, u32 data_len)
3463 {
3464 u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3465 struct page *hdr_page, *buf_page;
3466 const void *src;
3467 void *dst;
3468
3469 if (unlikely(netmem_is_net_iov(buf->netmem)) ||
3470 !libeth_rx_sync_for_cpu(buf, copy))
3471 return 0;
3472
3473 hdr_page = __netmem_to_page(hdr->netmem);
3474 buf_page = __netmem_to_page(buf->netmem);
3475 dst = page_address(hdr_page) + hdr->offset +
3476 pp_page_to_nmdesc(hdr_page)->pp->p.offset;
3477 src = page_address(buf_page) + buf->offset +
3478 pp_page_to_nmdesc(buf_page)->pp->p.offset;
3479
3480 memcpy(dst, src, LARGEST_ALIGN(copy));
3481 buf->offset += copy;
3482
3483 return copy;
3484 }
3485
3486 /**
3487 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3488 * status and error fields
3489 * @stat_err_field: field from descriptor to test bits in
3490 * @stat_err_bits: value to mask
3491 *
3492 * Return: %true if any of given @stat_err_bits are set, %false otherwise.
3493 */
idpf_rx_splitq_test_staterr(const u8 stat_err_field,const u8 stat_err_bits)3494 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3495 const u8 stat_err_bits)
3496 {
3497 return !!(stat_err_field & stat_err_bits);
3498 }
3499
3500 /**
3501 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3502 * @rx_desc: Rx descriptor for current buffer
3503 *
3504 * Return: %true if the buffer is an EOP buffer, %false otherwise, indicating
3505 * that this is in fact a non-EOP buffer.
3506 */
idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3507 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3508 {
3509 /* if we are the last buffer then there is nothing else to do */
3510 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3511 IDPF_RXD_EOF_SPLITQ));
3512 }
3513
3514 /**
3515 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3516 * @rxq: Rx descriptor queue to retrieve receive buffer queue
3517 * @budget: Total limit on number of packets to process
3518 *
3519 * This function provides a "bounce buffer" approach to Rx interrupt
3520 * processing. The advantage to this is that on systems that have
3521 * expensive overhead for IOMMU access this provides a means of avoiding
3522 * it by maintaining the mapping of the page to the system.
3523 *
3524 * Return: amount of work completed
3525 */
idpf_rx_splitq_clean(struct idpf_rx_queue * rxq,int budget)3526 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3527 {
3528 struct idpf_buf_queue *rx_bufq = NULL;
3529 struct libeth_rq_napi_stats rs = { };
3530 u16 ntc = rxq->next_to_clean;
3531 LIBETH_XDP_ONSTACK_BUFF(xdp);
3532 LIBETH_XDP_ONSTACK_BULK(bq);
3533
3534 libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
3535 rxq->xdpsqs, rxq->num_xdp_txq);
3536 libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
3537
3538 /* Process Rx packets bounded by budget */
3539 while (likely(rs.packets < budget)) {
3540 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3541 struct libeth_fqe *hdr, *rx_buf = NULL;
3542 struct idpf_sw_queue *refillq = NULL;
3543 struct idpf_rxq_set *rxq_set = NULL;
3544 unsigned int pkt_len = 0;
3545 unsigned int hdr_len = 0;
3546 u16 gen_id, buf_id = 0;
3547 int bufq_id;
3548 u8 rxdid;
3549
3550 /* get the Rx desc from Rx queue based on 'next_to_clean' */
3551 rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3552
3553 /* if the descriptor isn't done, no work yet to do */
3554 gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3555 VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3556 if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3557 break;
3558
3559 dma_rmb();
3560
3561 rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3562 rx_desc->rxdid_ucast);
3563 if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3564 IDPF_RX_BUMP_NTC(rxq, ntc);
3565 u64_stats_update_begin(&rxq->stats_sync);
3566 u64_stats_inc(&rxq->q_stats.bad_descs);
3567 u64_stats_update_end(&rxq->stats_sync);
3568 continue;
3569 }
3570
3571 pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3572 VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3573
3574 bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3575 VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3576
3577 rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3578 refillq = rxq_set->refillq[bufq_id];
3579
3580 /* retrieve buffer from the rxq */
3581 rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3582
3583 buf_id = le16_to_cpu(rx_desc->buf_id);
3584
3585 rx_buf = &rx_bufq->buf[buf_id];
3586
3587 if (!rx_bufq->hdr_pp)
3588 goto payload;
3589
3590 #define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3591 #define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3592 if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3593 /* If a header buffer overflow, occurs, i.e. header is
3594 * too large to fit in the header split buffer, HW will
3595 * put the entire packet, including headers, in the
3596 * data/payload buffer.
3597 */
3598 hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3599 __HDR_LEN_MASK);
3600 #undef __HDR_LEN_MASK
3601 #undef __HBO_BIT
3602
3603 hdr = &rx_bufq->hdr_buf[buf_id];
3604
3605 if (unlikely(!hdr_len && !xdp->data)) {
3606 hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3607 /* If failed, drop both buffers by setting len to 0 */
3608 pkt_len -= hdr_len ? : pkt_len;
3609
3610 u64_stats_update_begin(&rxq->stats_sync);
3611 u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3612 u64_stats_update_end(&rxq->stats_sync);
3613 }
3614
3615 if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
3616 rs.hsplit++;
3617
3618 hdr->netmem = 0;
3619
3620 payload:
3621 libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
3622 rx_buf->netmem = 0;
3623
3624 idpf_post_buf_refill(refillq, buf_id);
3625 IDPF_RX_BUMP_NTC(rxq, ntc);
3626
3627 /* skip if it is non EOP desc */
3628 if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
3629 continue;
3630
3631 idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
3632 }
3633
3634 idpf_xdp_finalize_rx(&bq);
3635
3636 rxq->next_to_clean = ntc;
3637 libeth_xdp_save_buff(&rxq->xdp, xdp);
3638
3639 u64_stats_update_begin(&rxq->stats_sync);
3640 u64_stats_add(&rxq->q_stats.packets, rs.packets);
3641 u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
3642 u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
3643 u64_stats_update_end(&rxq->stats_sync);
3644
3645 return rs.packets;
3646 }
3647
3648 /**
3649 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3650 * @bufq: Pointer to the buffer queue
3651 * @buf_id: buffer ID
3652 * @buf_desc: Buffer queue descriptor
3653 *
3654 * Return: 0 on success and negative on failure.
3655 */
idpf_rx_update_bufq_desc(struct idpf_buf_queue * bufq,u32 buf_id,struct virtchnl2_splitq_rx_buf_desc * buf_desc)3656 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3657 struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3658 {
3659 struct libeth_fq_fp fq = {
3660 .pp = bufq->pp,
3661 .fqes = bufq->buf,
3662 .truesize = bufq->truesize,
3663 .count = bufq->desc_count,
3664 };
3665 dma_addr_t addr;
3666
3667 addr = libeth_rx_alloc(&fq, buf_id);
3668 if (addr == DMA_MAPPING_ERROR)
3669 return -ENOMEM;
3670
3671 buf_desc->pkt_addr = cpu_to_le64(addr);
3672 buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3673
3674 if (!idpf_queue_has(HSPLIT_EN, bufq))
3675 return 0;
3676
3677 fq.pp = bufq->hdr_pp;
3678 fq.fqes = bufq->hdr_buf;
3679 fq.truesize = bufq->hdr_truesize;
3680
3681 addr = libeth_rx_alloc(&fq, buf_id);
3682 if (addr == DMA_MAPPING_ERROR)
3683 return -ENOMEM;
3684
3685 buf_desc->hdr_addr = cpu_to_le64(addr);
3686
3687 return 0;
3688 }
3689
3690 /**
3691 * idpf_rx_clean_refillq - Clean refill queue buffers
3692 * @bufq: buffer queue to post buffers back to
3693 * @refillq: refill queue to clean
3694 *
3695 * This function takes care of the buffer refill management
3696 */
idpf_rx_clean_refillq(struct idpf_buf_queue * bufq,struct idpf_sw_queue * refillq)3697 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3698 struct idpf_sw_queue *refillq)
3699 {
3700 struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3701 u16 bufq_nta = bufq->next_to_alloc;
3702 u16 ntc = refillq->next_to_clean;
3703 int cleaned = 0;
3704
3705 buf_desc = &bufq->split_buf[bufq_nta];
3706
3707 /* make sure we stop at ring wrap in the unlikely case ring is full */
3708 while (likely(cleaned < refillq->desc_count)) {
3709 u32 buf_id, refill_desc = refillq->ring[ntc];
3710 bool failure;
3711
3712 if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3713 !!(refill_desc & IDPF_RFL_BI_GEN_M))
3714 break;
3715
3716 buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
3717 failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3718 if (failure)
3719 break;
3720
3721 if (unlikely(++ntc == refillq->desc_count)) {
3722 idpf_queue_change(RFL_GEN_CHK, refillq);
3723 ntc = 0;
3724 }
3725
3726 if (unlikely(++bufq_nta == bufq->desc_count)) {
3727 buf_desc = &bufq->split_buf[0];
3728 bufq_nta = 0;
3729 } else {
3730 buf_desc++;
3731 }
3732
3733 cleaned++;
3734 }
3735
3736 if (!cleaned)
3737 return;
3738
3739 /* We want to limit how many transactions on the bus we trigger with
3740 * tail writes so we only do it in strides. It's also important we
3741 * align the write to a multiple of 8 as required by HW.
3742 */
3743 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3744 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3745 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3746 IDPF_RX_BUF_POST_STRIDE));
3747
3748 /* update next to alloc since we have filled the ring */
3749 refillq->next_to_clean = ntc;
3750 bufq->next_to_alloc = bufq_nta;
3751 }
3752
3753 /**
3754 * idpf_rx_clean_refillq_all - Clean all refill queues
3755 * @bufq: buffer queue with refill queues
3756 * @nid: ID of the closest NUMA node with memory
3757 *
3758 * Iterates through all refill queues assigned to the buffer queue assigned to
3759 * this vector. Returns true if clean is complete within budget, false
3760 * otherwise.
3761 */
idpf_rx_clean_refillq_all(struct idpf_buf_queue * bufq,int nid)3762 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3763 {
3764 struct idpf_bufq_set *bufq_set;
3765 int i;
3766
3767 page_pool_nid_changed(bufq->pp, nid);
3768 if (bufq->hdr_pp)
3769 page_pool_nid_changed(bufq->hdr_pp, nid);
3770
3771 bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3772 for (i = 0; i < bufq_set->num_refillqs; i++)
3773 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3774 }
3775
3776 /**
3777 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3778 * @irq: interrupt number
3779 * @data: pointer to a q_vector
3780 *
3781 * Return: always IRQ_HANDLED
3782 */
idpf_vport_intr_clean_queues(int __always_unused irq,void * data)3783 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3784 void *data)
3785 {
3786 struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3787
3788 q_vector->total_events++;
3789 napi_schedule_irqoff(&q_vector->napi);
3790
3791 return IRQ_HANDLED;
3792 }
3793
3794 /**
3795 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3796 * @rsrc: pointer to queue and vector resources
3797 */
idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc * rsrc)3798 static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
3799 {
3800 for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
3801 netif_napi_del(&rsrc->q_vectors[v_idx].napi);
3802 }
3803
3804 /**
3805 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3806 * @rsrc: pointer to queue and vector resources
3807 */
idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc * rsrc)3808 static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
3809 {
3810 for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
3811 napi_disable(&rsrc->q_vectors[v_idx].napi);
3812 }
3813
3814 /**
3815 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3816 * @rsrc: pointer to queue and vector resources
3817 *
3818 * Free the memory allocated for interrupt vectors associated to a vport
3819 */
idpf_vport_intr_rel(struct idpf_q_vec_rsrc * rsrc)3820 void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
3821 {
3822 for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
3823 struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
3824
3825 kfree(q_vector->xsksq);
3826 q_vector->xsksq = NULL;
3827 kfree(q_vector->complq);
3828 q_vector->complq = NULL;
3829 kfree(q_vector->bufq);
3830 q_vector->bufq = NULL;
3831 kfree(q_vector->tx);
3832 q_vector->tx = NULL;
3833 kfree(q_vector->rx);
3834 q_vector->rx = NULL;
3835 }
3836
3837 kfree(rsrc->q_vectors);
3838 rsrc->q_vectors = NULL;
3839 }
3840
idpf_q_vector_set_napi(struct idpf_q_vector * q_vector,bool link)3841 static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
3842 {
3843 struct napi_struct *napi = link ? &q_vector->napi : NULL;
3844 struct net_device *dev = q_vector->vport->netdev;
3845
3846 for (u32 i = 0; i < q_vector->num_rxq; i++)
3847 netif_queue_set_napi(dev, q_vector->rx[i]->idx,
3848 NETDEV_QUEUE_TYPE_RX, napi);
3849
3850 for (u32 i = 0; i < q_vector->num_txq; i++)
3851 netif_queue_set_napi(dev, q_vector->tx[i]->idx,
3852 NETDEV_QUEUE_TYPE_TX, napi);
3853 }
3854
3855 /**
3856 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3857 * @vport: main vport structure
3858 * @rsrc: pointer to queue and vector resources
3859 */
idpf_vport_intr_rel_irq(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)3860 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
3861 struct idpf_q_vec_rsrc *rsrc)
3862 {
3863 struct idpf_adapter *adapter = vport->adapter;
3864
3865 for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
3866 struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
3867 int irq_num, vidx;
3868
3869 /* free only the irqs that were actually requested */
3870 if (!q_vector)
3871 continue;
3872
3873 vidx = rsrc->q_vector_idxs[vector];
3874 irq_num = adapter->msix_entries[vidx].vector;
3875
3876 idpf_q_vector_set_napi(q_vector, false);
3877 kfree(free_irq(irq_num, q_vector));
3878 }
3879 }
3880
3881 /**
3882 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3883 * @rsrc: pointer to queue and vector resources
3884 */
idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc * rsrc)3885 static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
3886 {
3887 struct idpf_q_vector *q_vector = rsrc->q_vectors;
3888
3889 writel(0, rsrc->noirq_dyn_ctl);
3890
3891 for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
3892 writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3893 }
3894
3895 /**
3896 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3897 * @q_vector: pointer to q_vector
3898 *
3899 * Return: value to be written back to HW to enable interrupt generation
3900 */
idpf_vport_intr_buildreg_itr(struct idpf_q_vector * q_vector)3901 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
3902 {
3903 u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3904 int type = IDPF_NO_ITR_UPDATE_IDX;
3905 u16 itr = 0;
3906
3907 if (q_vector->wb_on_itr) {
3908 /*
3909 * Trigger a software interrupt when exiting wb_on_itr, to make
3910 * sure we catch any pending write backs that might have been
3911 * missed due to interrupt state transition.
3912 */
3913 itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3914 q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3915 type = IDPF_SW_ITR_UPDATE_IDX;
3916 itr = IDPF_ITR_20K;
3917 }
3918
3919 itr &= IDPF_ITR_MASK;
3920 /* Don't clear PBA because that can cause lost interrupts that
3921 * came in while we were cleaning/polling
3922 */
3923 itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3924 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3925
3926 return itr_val;
3927 }
3928
3929 /**
3930 * idpf_update_dim_sample - Update dim sample with packets and bytes
3931 * @q_vector: the vector associated with the interrupt
3932 * @dim_sample: dim sample to update
3933 * @dim: dim instance structure
3934 * @packets: total packets
3935 * @bytes: total bytes
3936 *
3937 * Update the dim sample with the packets and bytes which are passed to this
3938 * function. Set the dim state appropriately if the dim settings gets stale.
3939 */
idpf_update_dim_sample(struct idpf_q_vector * q_vector,struct dim_sample * dim_sample,struct dim * dim,u64 packets,u64 bytes)3940 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3941 struct dim_sample *dim_sample,
3942 struct dim *dim, u64 packets, u64 bytes)
3943 {
3944 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3945 dim_sample->comp_ctr = 0;
3946
3947 /* if dim settings get stale, like when not updated for 1 second or
3948 * longer, force it to start again. This addresses the frequent case
3949 * of an idle queue being switched to by the scheduler.
3950 */
3951 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3952 dim->state = DIM_START_MEASURE;
3953 }
3954
3955 /**
3956 * idpf_net_dim - Update net DIM algorithm
3957 * @q_vector: the vector associated with the interrupt
3958 *
3959 * Create a DIM sample and notify net_dim() so that it can possibly decide
3960 * a new ITR value based on incoming packets, bytes, and interrupts.
3961 *
3962 * This function is a no-op if the queue is not configured to dynamic ITR.
3963 */
idpf_net_dim(struct idpf_q_vector * q_vector)3964 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3965 {
3966 struct dim_sample dim_sample = { };
3967 u64 packets, bytes, pkts, bts;
3968 u32 i;
3969
3970 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3971 goto check_rx_itr;
3972
3973 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3974 struct idpf_tx_queue *txq = q_vector->tx[i];
3975 unsigned int start;
3976
3977 do {
3978 start = u64_stats_fetch_begin(&txq->stats_sync);
3979 pkts = u64_stats_read(&txq->q_stats.packets);
3980 bts = u64_stats_read(&txq->q_stats.bytes);
3981 } while (u64_stats_fetch_retry(&txq->stats_sync, start));
3982
3983 packets += pkts;
3984 bytes += bts;
3985 }
3986
3987 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3988 packets, bytes);
3989 net_dim(&q_vector->tx_dim, &dim_sample);
3990
3991 check_rx_itr:
3992 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3993 return;
3994
3995 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3996 struct idpf_rx_queue *rxq = q_vector->rx[i];
3997 unsigned int start;
3998
3999 do {
4000 start = u64_stats_fetch_begin(&rxq->stats_sync);
4001 pkts = u64_stats_read(&rxq->q_stats.packets);
4002 bts = u64_stats_read(&rxq->q_stats.bytes);
4003 } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
4004
4005 packets += pkts;
4006 bytes += bts;
4007 }
4008
4009 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
4010 packets, bytes);
4011 net_dim(&q_vector->rx_dim, &dim_sample);
4012 }
4013
4014 /**
4015 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
4016 * @q_vector: q_vector for which itr is being updated and interrupt enabled
4017 *
4018 * Update the net_dim() algorithm and re-enable the interrupt associated with
4019 * this vector.
4020 */
idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector * q_vector)4021 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
4022 {
4023 u32 intval;
4024
4025 /* net_dim() updates ITR out-of-band using a work item */
4026 idpf_net_dim(q_vector);
4027
4028 intval = idpf_vport_intr_buildreg_itr(q_vector);
4029 q_vector->wb_on_itr = false;
4030
4031 writel(intval, q_vector->intr_reg.dyn_ctl);
4032 }
4033
4034 /**
4035 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
4036 * @vport: main vport structure
4037 * @rsrc: pointer to queue and vector resources
4038 *
4039 * Return: 0 on success, negative on failure
4040 */
idpf_vport_intr_req_irq(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4041 static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
4042 struct idpf_q_vec_rsrc *rsrc)
4043 {
4044 struct idpf_adapter *adapter = vport->adapter;
4045 const char *drv_name, *if_name, *vec_name;
4046 int vector, err, irq_num, vidx;
4047
4048 drv_name = dev_driver_string(&adapter->pdev->dev);
4049 if_name = netdev_name(vport->netdev);
4050
4051 for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
4052 struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
4053 char *name;
4054
4055 vidx = rsrc->q_vector_idxs[vector];
4056 irq_num = adapter->msix_entries[vidx].vector;
4057
4058 if (q_vector->num_rxq && q_vector->num_txq)
4059 vec_name = "TxRx";
4060 else if (q_vector->num_rxq)
4061 vec_name = "Rx";
4062 else if (q_vector->num_txq)
4063 vec_name = "Tx";
4064 else
4065 continue;
4066
4067 name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
4068 vec_name, vector);
4069
4070 err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
4071 name, q_vector);
4072 if (err) {
4073 netdev_err(vport->netdev,
4074 "Request_irq failed, error: %d\n", err);
4075 goto free_q_irqs;
4076 }
4077
4078 idpf_q_vector_set_napi(q_vector, true);
4079 }
4080
4081 return 0;
4082
4083 free_q_irqs:
4084 while (--vector >= 0) {
4085 vidx = rsrc->q_vector_idxs[vector];
4086 irq_num = adapter->msix_entries[vidx].vector;
4087 kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
4088 }
4089
4090 return err;
4091 }
4092
4093 /**
4094 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
4095 * @q_vector: q_vector structure
4096 * @itr: Interrupt throttling rate
4097 * @tx: Tx or Rx ITR
4098 */
idpf_vport_intr_write_itr(struct idpf_q_vector * q_vector,u16 itr,bool tx)4099 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
4100 {
4101 struct idpf_intr_reg *intr_reg;
4102
4103 if (tx && !q_vector->tx)
4104 return;
4105 else if (!tx && !q_vector->rx)
4106 return;
4107
4108 intr_reg = &q_vector->intr_reg;
4109 writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
4110 tx ? intr_reg->tx_itr : intr_reg->rx_itr);
4111 }
4112
4113 /**
4114 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
4115 * @vport: main vport structure
4116 * @rsrc: pointer to queue and vector resources
4117 */
idpf_vport_intr_ena_irq_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4118 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
4119 struct idpf_q_vec_rsrc *rsrc)
4120 {
4121 bool dynamic;
4122 u16 itr;
4123
4124 for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
4125 struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
4126
4127 /* Set the initial ITR values */
4128 if (qv->num_txq) {
4129 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
4130 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
4131 idpf_vport_intr_write_itr(qv, dynamic ?
4132 itr : qv->tx_itr_value,
4133 true);
4134 }
4135
4136 if (qv->num_rxq) {
4137 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
4138 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
4139 idpf_vport_intr_write_itr(qv, dynamic ?
4140 itr : qv->rx_itr_value,
4141 false);
4142 }
4143
4144 if (qv->num_txq || qv->num_rxq)
4145 idpf_vport_intr_update_itr_ena_irq(qv);
4146 }
4147
4148 writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
4149 }
4150
4151 /**
4152 * idpf_vport_intr_deinit - Release all vector associations for the vport
4153 * @vport: main vport structure
4154 * @rsrc: pointer to queue and vector resources
4155 */
idpf_vport_intr_deinit(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4156 void idpf_vport_intr_deinit(struct idpf_vport *vport,
4157 struct idpf_q_vec_rsrc *rsrc)
4158 {
4159 idpf_vport_intr_dis_irq_all(rsrc);
4160 idpf_vport_intr_napi_dis_all(rsrc);
4161 idpf_vport_intr_napi_del_all(rsrc);
4162 idpf_vport_intr_rel_irq(vport, rsrc);
4163 }
4164
4165 /**
4166 * idpf_tx_dim_work - Call back from the stack
4167 * @work: work queue structure
4168 */
idpf_tx_dim_work(struct work_struct * work)4169 static void idpf_tx_dim_work(struct work_struct *work)
4170 {
4171 struct idpf_q_vector *q_vector;
4172 struct idpf_vport *vport;
4173 struct dim *dim;
4174 u16 itr;
4175
4176 dim = container_of(work, struct dim, work);
4177 q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
4178 vport = q_vector->vport;
4179
4180 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
4181 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
4182
4183 /* look up the values in our local table */
4184 itr = vport->tx_itr_profile[dim->profile_ix];
4185
4186 idpf_vport_intr_write_itr(q_vector, itr, true);
4187
4188 dim->state = DIM_START_MEASURE;
4189 }
4190
4191 /**
4192 * idpf_rx_dim_work - Call back from the stack
4193 * @work: work queue structure
4194 */
idpf_rx_dim_work(struct work_struct * work)4195 static void idpf_rx_dim_work(struct work_struct *work)
4196 {
4197 struct idpf_q_vector *q_vector;
4198 struct idpf_vport *vport;
4199 struct dim *dim;
4200 u16 itr;
4201
4202 dim = container_of(work, struct dim, work);
4203 q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
4204 vport = q_vector->vport;
4205
4206 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
4207 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
4208
4209 /* look up the values in our local table */
4210 itr = vport->rx_itr_profile[dim->profile_ix];
4211
4212 idpf_vport_intr_write_itr(q_vector, itr, false);
4213
4214 dim->state = DIM_START_MEASURE;
4215 }
4216
4217 /**
4218 * idpf_init_dim - Set up dynamic interrupt moderation
4219 * @qv: q_vector structure
4220 */
idpf_init_dim(struct idpf_q_vector * qv)4221 static void idpf_init_dim(struct idpf_q_vector *qv)
4222 {
4223 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
4224 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4225 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
4226
4227 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
4228 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4229 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
4230 }
4231
4232 /**
4233 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
4234 * @rsrc: pointer to queue and vector resources
4235 */
idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc * rsrc)4236 static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
4237 {
4238 for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
4239 struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
4240
4241 idpf_init_dim(q_vector);
4242 napi_enable(&q_vector->napi);
4243 }
4244 }
4245
4246 /**
4247 * idpf_tx_splitq_clean_all- Clean completion queues
4248 * @q_vec: queue vector
4249 * @budget: Used to determine if we are in netpoll
4250 * @cleaned: returns number of packets cleaned
4251 *
4252 * Return: %false if clean is not complete else returns %true
4253 */
idpf_tx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)4254 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
4255 int budget, int *cleaned)
4256 {
4257 u16 num_complq = q_vec->num_complq;
4258 bool clean_complete = true;
4259 int i, budget_per_q;
4260
4261 if (unlikely(!num_complq))
4262 return true;
4263
4264 budget_per_q = DIV_ROUND_UP(budget, num_complq);
4265
4266 for (i = 0; i < num_complq; i++)
4267 clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
4268 budget_per_q, cleaned);
4269
4270 return clean_complete;
4271 }
4272
4273 /**
4274 * idpf_rx_splitq_clean_all- Clean completion queues
4275 * @q_vec: queue vector
4276 * @budget: Used to determine if we are in netpoll
4277 * @cleaned: returns number of packets cleaned
4278 *
4279 * Return: %false if clean is not complete else returns %true
4280 */
idpf_rx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)4281 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
4282 int *cleaned)
4283 {
4284 u16 num_rxq = q_vec->num_rxq;
4285 bool clean_complete = true;
4286 int pkts_cleaned = 0;
4287 int i, budget_per_q;
4288 int nid;
4289
4290 /* We attempt to distribute budget to each Rx queue fairly, but don't
4291 * allow the budget to go below 1 because that would exit polling early.
4292 */
4293 budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
4294 for (i = 0; i < num_rxq; i++) {
4295 struct idpf_rx_queue *rxq = q_vec->rx[i];
4296 int pkts_cleaned_per_q;
4297
4298 pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
4299 idpf_xskrq_poll(rxq, budget_per_q) :
4300 idpf_rx_splitq_clean(rxq, budget_per_q);
4301 /* if we clean as many as budgeted, we must not be done */
4302 if (pkts_cleaned_per_q >= budget_per_q)
4303 clean_complete = false;
4304 pkts_cleaned += pkts_cleaned_per_q;
4305 }
4306 *cleaned = pkts_cleaned;
4307
4308 nid = numa_mem_id();
4309
4310 for (i = 0; i < q_vec->num_bufq; i++) {
4311 if (!idpf_queue_has(XSK, q_vec->bufq[i]))
4312 idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4313 }
4314
4315 return clean_complete;
4316 }
4317
4318 /**
4319 * idpf_vport_splitq_napi_poll - NAPI handler
4320 * @napi: struct from which you get q_vector
4321 * @budget: budget provided by stack
4322 *
4323 * Return: how many packets were cleaned
4324 */
idpf_vport_splitq_napi_poll(struct napi_struct * napi,int budget)4325 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4326 {
4327 struct idpf_q_vector *q_vector =
4328 container_of(napi, struct idpf_q_vector, napi);
4329 bool clean_complete = true;
4330 int work_done = 0;
4331
4332 /* Handle case where we are called by netpoll with a budget of 0 */
4333 if (unlikely(!budget)) {
4334 idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4335
4336 return 0;
4337 }
4338
4339 for (u32 i = 0; i < q_vector->num_xsksq; i++)
4340 clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
4341
4342 clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
4343 &work_done);
4344 clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
4345 &work_done);
4346
4347 /* If work not completed, return budget and polling will return */
4348 if (!clean_complete) {
4349 idpf_vport_intr_set_wb_on_itr(q_vector);
4350 return budget;
4351 }
4352
4353 work_done = min_t(int, work_done, budget - 1);
4354
4355 /* Exit the polling mode, but don't re-enable interrupts if stack might
4356 * poll us due to busy-polling
4357 */
4358 if (napi_complete_done(napi, work_done))
4359 idpf_vport_intr_update_itr_ena_irq(q_vector);
4360 else
4361 idpf_vport_intr_set_wb_on_itr(q_vector);
4362
4363 return work_done;
4364 }
4365
4366 /**
4367 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4368 * @vport: virtual port
4369 * @rsrc: pointer to queue and vector resources
4370 *
4371 * Mapping for vectors to queues
4372 */
idpf_vport_intr_map_vector_to_qs(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4373 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
4374 struct idpf_q_vec_rsrc *rsrc)
4375 {
4376 u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
4377 bool split = idpf_is_queue_model_split(rsrc->rxq_model);
4378 struct idpf_rxq_group *rx_qgrp;
4379 struct idpf_txq_group *tx_qgrp;
4380 u32 q_index;
4381
4382 for (unsigned int i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
4383 u16 num_rxq;
4384
4385 if (qv_idx >= rsrc->num_q_vectors)
4386 qv_idx = 0;
4387
4388 rx_qgrp = &rsrc->rxq_grps[i];
4389 if (split)
4390 num_rxq = rx_qgrp->splitq.num_rxq_sets;
4391 else
4392 num_rxq = rx_qgrp->singleq.num_rxq;
4393
4394 for (u32 j = 0; j < num_rxq; j++) {
4395 struct idpf_rx_queue *q;
4396
4397 if (split)
4398 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4399 else
4400 q = rx_qgrp->singleq.rxqs[j];
4401 q->q_vector = &rsrc->q_vectors[qv_idx];
4402 q_index = q->q_vector->num_rxq;
4403 q->q_vector->rx[q_index] = q;
4404 q->q_vector->num_rxq++;
4405
4406 if (split)
4407 q->napi = &q->q_vector->napi;
4408 }
4409
4410 if (split) {
4411 for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
4412 struct idpf_buf_queue *bufq;
4413
4414 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4415 bufq->q_vector = &rsrc->q_vectors[qv_idx];
4416 q_index = bufq->q_vector->num_bufq;
4417 bufq->q_vector->bufq[q_index] = bufq;
4418 bufq->q_vector->num_bufq++;
4419 }
4420 }
4421
4422 qv_idx++;
4423 }
4424
4425 split = idpf_is_queue_model_split(rsrc->txq_model);
4426
4427 for (unsigned int i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4428 u16 num_txq;
4429
4430 if (qv_idx >= rsrc->num_q_vectors)
4431 qv_idx = 0;
4432
4433 tx_qgrp = &rsrc->txq_grps[i];
4434 num_txq = tx_qgrp->num_txq;
4435
4436 for (u32 j = 0; j < num_txq; j++) {
4437 struct idpf_tx_queue *q;
4438
4439 q = tx_qgrp->txqs[j];
4440 q->q_vector = &rsrc->q_vectors[qv_idx];
4441 q->q_vector->tx[q->q_vector->num_txq++] = q;
4442 }
4443
4444 if (split) {
4445 struct idpf_compl_queue *q = tx_qgrp->complq;
4446
4447 q->q_vector = &rsrc->q_vectors[qv_idx];
4448 q->q_vector->complq[q->q_vector->num_complq++] = q;
4449 }
4450
4451 qv_idx++;
4452 }
4453
4454 for (unsigned int i = 0; i < vport->num_xdp_txq; i++) {
4455 struct idpf_tx_queue *xdpsq;
4456 struct idpf_q_vector *qv;
4457
4458 xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
4459 if (!idpf_queue_has(XSK, xdpsq))
4460 continue;
4461
4462 qv = idpf_find_rxq_vec(vport, i);
4463 idpf_xsk_init_wakeup(qv);
4464
4465 xdpsq->q_vector = qv;
4466 qv->xsksq[qv->num_xsksq++] = xdpsq;
4467 }
4468 }
4469
4470 /**
4471 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4472 * @vport: virtual port
4473 * @rsrc: pointer to queue and vector resources
4474 *
4475 * Initialize vector indexes with values returned over mailbox.
4476 *
4477 * Return: 0 on success, negative on failure
4478 */
idpf_vport_intr_init_vec_idx(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4479 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
4480 struct idpf_q_vec_rsrc *rsrc)
4481 {
4482 struct idpf_adapter *adapter = vport->adapter;
4483 struct virtchnl2_alloc_vectors *ac;
4484 u16 *vecids, total_vecs;
4485 int i;
4486
4487 ac = adapter->req_vec_chunks;
4488 if (!ac) {
4489 for (i = 0; i < rsrc->num_q_vectors; i++)
4490 rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
4491
4492 rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
4493
4494 return 0;
4495 }
4496
4497 total_vecs = idpf_get_reserved_vecs(adapter);
4498 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4499 if (!vecids)
4500 return -ENOMEM;
4501
4502 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4503
4504 for (i = 0; i < rsrc->num_q_vectors; i++)
4505 rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
4506
4507 rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
4508
4509 kfree(vecids);
4510
4511 return 0;
4512 }
4513
4514 /**
4515 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4516 * @vport: virtual port structure
4517 * @rsrc: pointer to queue and vector resources
4518 */
idpf_vport_intr_napi_add_all(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4519 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
4520 struct idpf_q_vec_rsrc *rsrc)
4521 {
4522 int (*napi_poll)(struct napi_struct *napi, int budget);
4523 int irq_num;
4524 u16 qv_idx;
4525
4526 if (idpf_is_queue_model_split(rsrc->txq_model))
4527 napi_poll = idpf_vport_splitq_napi_poll;
4528 else
4529 napi_poll = idpf_vport_singleq_napi_poll;
4530
4531 for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
4532 struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
4533
4534 qv_idx = rsrc->q_vector_idxs[v_idx];
4535 irq_num = vport->adapter->msix_entries[qv_idx].vector;
4536
4537 netif_napi_add_config(vport->netdev, &q_vector->napi,
4538 napi_poll, v_idx);
4539 netif_napi_set_irq(&q_vector->napi, irq_num);
4540 }
4541 }
4542
4543 /**
4544 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4545 * @vport: virtual port
4546 * @rsrc: pointer to queue and vector resources
4547 *
4548 * Allocate one q_vector per queue interrupt.
4549 *
4550 * Return: 0 on success, if allocation fails we return -ENOMEM.
4551 */
idpf_vport_intr_alloc(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4552 int idpf_vport_intr_alloc(struct idpf_vport *vport,
4553 struct idpf_q_vec_rsrc *rsrc)
4554 {
4555 u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4556 struct idpf_vport_user_config_data *user_config;
4557 struct idpf_q_vector *q_vector;
4558 struct idpf_q_coalesce *q_coal;
4559 u32 complqs_per_vector;
4560 u16 idx = vport->idx;
4561
4562 user_config = &vport->adapter->vport_config[idx]->user_config;
4563
4564 rsrc->q_vectors = kzalloc_objs(struct idpf_q_vector,
4565 rsrc->num_q_vectors);
4566 if (!rsrc->q_vectors)
4567 return -ENOMEM;
4568
4569 txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
4570 rsrc->num_q_vectors);
4571 rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
4572 rsrc->num_q_vectors);
4573 bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
4574 DIV_ROUND_UP(rsrc->num_rxq_grp,
4575 rsrc->num_q_vectors);
4576 complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
4577 rsrc->num_q_vectors);
4578
4579 for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
4580 q_vector = &rsrc->q_vectors[v_idx];
4581 q_coal = &user_config->q_coalesce[v_idx];
4582 q_vector->vport = vport;
4583
4584 q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
4585 q_vector->tx_intr_mode = q_coal->tx_intr_mode;
4586 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4587
4588 q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
4589 q_vector->rx_intr_mode = q_coal->rx_intr_mode;
4590 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4591
4592 q_vector->tx = kzalloc_objs(*q_vector->tx, txqs_per_vector);
4593 if (!q_vector->tx)
4594 goto error;
4595
4596 q_vector->rx = kzalloc_objs(*q_vector->rx, rxqs_per_vector);
4597 if (!q_vector->rx)
4598 goto error;
4599
4600 if (!idpf_is_queue_model_split(rsrc->rxq_model))
4601 continue;
4602
4603 q_vector->bufq = kzalloc_objs(*q_vector->bufq, bufqs_per_vector);
4604 if (!q_vector->bufq)
4605 goto error;
4606
4607 q_vector->complq = kzalloc_objs(*q_vector->complq,
4608 complqs_per_vector);
4609 if (!q_vector->complq)
4610 goto error;
4611
4612 if (!rsrc->xdp_txq_offset)
4613 continue;
4614
4615 q_vector->xsksq = kzalloc_objs(*q_vector->xsksq,
4616 rxqs_per_vector);
4617 if (!q_vector->xsksq)
4618 goto error;
4619 }
4620
4621 return 0;
4622
4623 error:
4624 idpf_vport_intr_rel(rsrc);
4625
4626 return -ENOMEM;
4627 }
4628
4629 /**
4630 * idpf_vport_intr_init - Setup all vectors for the given vport
4631 * @vport: virtual port
4632 * @rsrc: pointer to queue and vector resources
4633 *
4634 * Return: 0 on success or negative on failure
4635 */
idpf_vport_intr_init(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4636 int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4637 {
4638 int err;
4639
4640 err = idpf_vport_intr_init_vec_idx(vport, rsrc);
4641 if (err)
4642 return err;
4643
4644 idpf_vport_intr_map_vector_to_qs(vport, rsrc);
4645 idpf_vport_intr_napi_add_all(vport, rsrc);
4646
4647 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
4648 if (err)
4649 goto unroll_vectors_alloc;
4650
4651 err = idpf_vport_intr_req_irq(vport, rsrc);
4652 if (err)
4653 goto unroll_vectors_alloc;
4654
4655 return 0;
4656
4657 unroll_vectors_alloc:
4658 idpf_vport_intr_napi_del_all(rsrc);
4659
4660 return err;
4661 }
4662
idpf_vport_intr_ena(struct idpf_vport * vport,struct idpf_q_vec_rsrc * rsrc)4663 void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4664 {
4665 idpf_vport_intr_napi_ena_all(rsrc);
4666 idpf_vport_intr_ena_irq_all(vport, rsrc);
4667 }
4668
4669 /**
4670 * idpf_config_rss - Send virtchnl messages to configure RSS
4671 * @vport: virtual port
4672 * @rss_data: pointer to RSS key and lut info
4673 *
4674 * Return: 0 on success, negative on failure
4675 */
idpf_config_rss(struct idpf_vport * vport,struct idpf_rss_data * rss_data)4676 int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
4677 {
4678 struct idpf_adapter *adapter = vport->adapter;
4679 u32 vport_id = vport->vport_id;
4680 int err;
4681
4682 err = idpf_send_get_set_rss_key_msg(adapter, rss_data, vport_id, false);
4683 if (err)
4684 return err;
4685
4686 return idpf_send_get_set_rss_lut_msg(adapter, rss_data, vport_id, false);
4687 }
4688
4689 /**
4690 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4691 * @vport: virtual port structure
4692 * @rss_data: pointer to RSS key and lut info
4693 */
idpf_fill_dflt_rss_lut(struct idpf_vport * vport,struct idpf_rss_data * rss_data)4694 void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
4695 struct idpf_rss_data *rss_data)
4696 {
4697 u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
4698 int i;
4699
4700 for (i = 0; i < rss_data->rss_lut_size; i++)
4701 rss_data->rss_lut[i] = i % num_active_rxq;
4702 }
4703
4704 /**
4705 * idpf_init_rss_lut - Allocate and initialize RSS LUT
4706 * @vport: virtual port
4707 * @rss_data: pointer to RSS key and lut info
4708 *
4709 * Return: 0 on success, negative on failure
4710 */
idpf_init_rss_lut(struct idpf_vport * vport,struct idpf_rss_data * rss_data)4711 int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
4712 {
4713 if (!rss_data->rss_lut) {
4714 u32 lut_size;
4715
4716 lut_size = rss_data->rss_lut_size * sizeof(u32);
4717 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4718 if (!rss_data->rss_lut)
4719 return -ENOMEM;
4720 }
4721
4722 /* Fill the default RSS lut values */
4723 idpf_fill_dflt_rss_lut(vport, rss_data);
4724
4725 return 0;
4726 }
4727
4728 /**
4729 * idpf_deinit_rss_lut - Release RSS LUT
4730 * @rss_data: pointer to RSS key and lut info
4731 */
idpf_deinit_rss_lut(struct idpf_rss_data * rss_data)4732 void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data)
4733 {
4734 kfree(rss_data->rss_lut);
4735 rss_data->rss_lut = NULL;
4736 }
4737