1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #include <net/libeth/rx.h>
5 #include <net/libeth/tx.h>
6
7 #include "idpf.h"
8 #include "idpf_ptp.h"
9 #include "idpf_virtchnl.h"
10
11 #define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
12 LIBETH_SQE_CHECK_PRIV(u32);
13
14 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
15 unsigned int count);
16
17 /**
18 * idpf_tx_timeout - Respond to a Tx Hang
19 * @netdev: network interface device structure
20 * @txqueue: TX queue
21 */
idpf_tx_timeout(struct net_device * netdev,unsigned int txqueue)22 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
23 {
24 struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
25
26 adapter->tx_timeout_count++;
27
28 netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
29 adapter->tx_timeout_count, txqueue);
30 if (!idpf_is_reset_in_prog(adapter)) {
31 set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
32 queue_delayed_work(adapter->vc_event_wq,
33 &adapter->vc_event_task,
34 msecs_to_jiffies(10));
35 }
36 }
37
38 /**
39 * idpf_tx_buf_rel_all - Free any empty Tx buffers
40 * @txq: queue to be cleaned
41 */
idpf_tx_buf_rel_all(struct idpf_tx_queue * txq)42 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
43 {
44 struct libeth_sq_napi_stats ss = { };
45 struct libeth_cq_pp cp = {
46 .dev = txq->dev,
47 .ss = &ss,
48 };
49 u32 i;
50
51 /* Buffers already cleared, nothing to do */
52 if (!txq->tx_buf)
53 return;
54
55 /* Free all the Tx buffer sk_buffs */
56 for (i = 0; i < txq->buf_pool_size; i++)
57 libeth_tx_complete(&txq->tx_buf[i], &cp);
58
59 kfree(txq->tx_buf);
60 txq->tx_buf = NULL;
61 }
62
63 /**
64 * idpf_tx_desc_rel - Free Tx resources per queue
65 * @txq: Tx descriptor ring for a specific queue
66 *
67 * Free all transmit software resources
68 */
idpf_tx_desc_rel(struct idpf_tx_queue * txq)69 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
70 {
71 idpf_tx_buf_rel_all(txq);
72 netdev_tx_reset_subqueue(txq->netdev, txq->idx);
73
74 if (!txq->desc_ring)
75 return;
76
77 if (txq->refillq)
78 kfree(txq->refillq->ring);
79
80 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
81 txq->desc_ring = NULL;
82 txq->next_to_use = 0;
83 txq->next_to_clean = 0;
84 }
85
86 /**
87 * idpf_compl_desc_rel - Free completion resources per queue
88 * @complq: completion queue
89 *
90 * Free all completion software resources.
91 */
idpf_compl_desc_rel(struct idpf_compl_queue * complq)92 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
93 {
94 if (!complq->comp)
95 return;
96
97 dma_free_coherent(complq->netdev->dev.parent, complq->size,
98 complq->comp, complq->dma);
99 complq->comp = NULL;
100 complq->next_to_use = 0;
101 complq->next_to_clean = 0;
102 }
103
104 /**
105 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
106 * @vport: virtual port structure
107 *
108 * Free all transmit software resources
109 */
idpf_tx_desc_rel_all(struct idpf_vport * vport)110 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
111 {
112 int i, j;
113
114 if (!vport->txq_grps)
115 return;
116
117 for (i = 0; i < vport->num_txq_grp; i++) {
118 struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
119
120 for (j = 0; j < txq_grp->num_txq; j++)
121 idpf_tx_desc_rel(txq_grp->txqs[j]);
122
123 if (idpf_is_queue_model_split(vport->txq_model))
124 idpf_compl_desc_rel(txq_grp->complq);
125 }
126 }
127
128 /**
129 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
130 * @tx_q: queue for which the buffers are allocated
131 *
132 * Returns 0 on success, negative on failure
133 */
idpf_tx_buf_alloc_all(struct idpf_tx_queue * tx_q)134 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
135 {
136 /* Allocate book keeping buffers only. Buffers to be supplied to HW
137 * are allocated by kernel network stack and received as part of skb
138 */
139 if (idpf_queue_has(FLOW_SCH_EN, tx_q))
140 tx_q->buf_pool_size = U16_MAX;
141 else
142 tx_q->buf_pool_size = tx_q->desc_count;
143 tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
144 GFP_KERNEL);
145 if (!tx_q->tx_buf)
146 return -ENOMEM;
147
148 return 0;
149 }
150
151 /**
152 * idpf_tx_desc_alloc - Allocate the Tx descriptors
153 * @vport: vport to allocate resources for
154 * @tx_q: the tx ring to set up
155 *
156 * Returns 0 on success, negative on failure
157 */
idpf_tx_desc_alloc(const struct idpf_vport * vport,struct idpf_tx_queue * tx_q)158 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
159 struct idpf_tx_queue *tx_q)
160 {
161 struct device *dev = tx_q->dev;
162 struct idpf_sw_queue *refillq;
163 int err;
164
165 err = idpf_tx_buf_alloc_all(tx_q);
166 if (err)
167 goto err_alloc;
168
169 tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
170
171 /* Allocate descriptors also round up to nearest 4K */
172 tx_q->size = ALIGN(tx_q->size, 4096);
173 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
174 GFP_KERNEL);
175 if (!tx_q->desc_ring) {
176 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
177 tx_q->size);
178 err = -ENOMEM;
179 goto err_alloc;
180 }
181
182 tx_q->next_to_use = 0;
183 tx_q->next_to_clean = 0;
184 idpf_queue_set(GEN_CHK, tx_q);
185
186 if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
187 return 0;
188
189 refillq = tx_q->refillq;
190 refillq->desc_count = tx_q->buf_pool_size;
191 refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
192 GFP_KERNEL);
193 if (!refillq->ring) {
194 err = -ENOMEM;
195 goto err_alloc;
196 }
197
198 for (unsigned int i = 0; i < refillq->desc_count; i++)
199 refillq->ring[i] =
200 FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
201 FIELD_PREP(IDPF_RFL_BI_GEN_M,
202 idpf_queue_has(GEN_CHK, refillq));
203
204 /* Go ahead and flip the GEN bit since this counts as filling
205 * up the ring, i.e. we already ring wrapped.
206 */
207 idpf_queue_change(GEN_CHK, refillq);
208
209 tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
210
211 return 0;
212
213 err_alloc:
214 idpf_tx_desc_rel(tx_q);
215
216 return err;
217 }
218
219 /**
220 * idpf_compl_desc_alloc - allocate completion descriptors
221 * @vport: vport to allocate resources for
222 * @complq: completion queue to set up
223 *
224 * Return: 0 on success, -errno on failure.
225 */
idpf_compl_desc_alloc(const struct idpf_vport * vport,struct idpf_compl_queue * complq)226 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
227 struct idpf_compl_queue *complq)
228 {
229 complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
230
231 complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
232 complq->size, &complq->dma,
233 GFP_KERNEL);
234 if (!complq->comp)
235 return -ENOMEM;
236
237 complq->next_to_use = 0;
238 complq->next_to_clean = 0;
239 idpf_queue_set(GEN_CHK, complq);
240
241 return 0;
242 }
243
244 /**
245 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
246 * @vport: virtual port private structure
247 *
248 * Returns 0 on success, negative on failure
249 */
idpf_tx_desc_alloc_all(struct idpf_vport * vport)250 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
251 {
252 int err = 0;
253 int i, j;
254
255 /* Setup buffer queues. In single queue model buffer queues and
256 * completion queues will be same
257 */
258 for (i = 0; i < vport->num_txq_grp; i++) {
259 for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
260 struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
261
262 err = idpf_tx_desc_alloc(vport, txq);
263 if (err) {
264 pci_err(vport->adapter->pdev,
265 "Allocation for Tx Queue %u failed\n",
266 i);
267 goto err_out;
268 }
269 }
270
271 if (!idpf_is_queue_model_split(vport->txq_model))
272 continue;
273
274 /* Setup completion queues */
275 err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
276 if (err) {
277 pci_err(vport->adapter->pdev,
278 "Allocation for Tx Completion Queue %u failed\n",
279 i);
280 goto err_out;
281 }
282 }
283
284 err_out:
285 if (err)
286 idpf_tx_desc_rel_all(vport);
287
288 return err;
289 }
290
291 /**
292 * idpf_rx_page_rel - Release an rx buffer page
293 * @rx_buf: the buffer to free
294 */
idpf_rx_page_rel(struct libeth_fqe * rx_buf)295 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
296 {
297 if (unlikely(!rx_buf->netmem))
298 return;
299
300 libeth_rx_recycle_slow(rx_buf->netmem);
301
302 rx_buf->netmem = 0;
303 rx_buf->offset = 0;
304 }
305
306 /**
307 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
308 * @bufq: queue to use
309 */
idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue * bufq)310 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
311 {
312 struct libeth_fq fq = {
313 .fqes = bufq->hdr_buf,
314 .pp = bufq->hdr_pp,
315 };
316
317 for (u32 i = 0; i < bufq->desc_count; i++)
318 idpf_rx_page_rel(&bufq->hdr_buf[i]);
319
320 libeth_rx_fq_destroy(&fq);
321 bufq->hdr_buf = NULL;
322 bufq->hdr_pp = NULL;
323 }
324
325 /**
326 * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
327 * @bufq: queue to be cleaned
328 */
idpf_rx_buf_rel_bufq(struct idpf_buf_queue * bufq)329 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
330 {
331 struct libeth_fq fq = {
332 .fqes = bufq->buf,
333 .pp = bufq->pp,
334 };
335
336 /* queue already cleared, nothing to do */
337 if (!bufq->buf)
338 return;
339
340 /* Free all the bufs allocated and given to hw on Rx queue */
341 for (u32 i = 0; i < bufq->desc_count; i++)
342 idpf_rx_page_rel(&bufq->buf[i]);
343
344 if (idpf_queue_has(HSPLIT_EN, bufq))
345 idpf_rx_hdr_buf_rel_all(bufq);
346
347 libeth_rx_fq_destroy(&fq);
348 bufq->buf = NULL;
349 bufq->pp = NULL;
350 }
351
352 /**
353 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
354 * @rxq: queue to be cleaned
355 */
idpf_rx_buf_rel_all(struct idpf_rx_queue * rxq)356 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
357 {
358 struct libeth_fq fq = {
359 .fqes = rxq->rx_buf,
360 .pp = rxq->pp,
361 };
362
363 if (!rxq->rx_buf)
364 return;
365
366 for (u32 i = 0; i < rxq->desc_count; i++)
367 idpf_rx_page_rel(&rxq->rx_buf[i]);
368
369 libeth_rx_fq_destroy(&fq);
370 rxq->rx_buf = NULL;
371 rxq->pp = NULL;
372 }
373
374 /**
375 * idpf_rx_desc_rel - Free a specific Rx q resources
376 * @rxq: queue to clean the resources from
377 * @dev: device to free DMA memory
378 * @model: single or split queue model
379 *
380 * Free a specific rx queue resources
381 */
idpf_rx_desc_rel(struct idpf_rx_queue * rxq,struct device * dev,u32 model)382 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
383 u32 model)
384 {
385 if (!rxq)
386 return;
387
388 if (rxq->skb) {
389 dev_kfree_skb_any(rxq->skb);
390 rxq->skb = NULL;
391 }
392
393 if (!idpf_is_queue_model_split(model))
394 idpf_rx_buf_rel_all(rxq);
395
396 rxq->next_to_alloc = 0;
397 rxq->next_to_clean = 0;
398 rxq->next_to_use = 0;
399 if (!rxq->desc_ring)
400 return;
401
402 dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
403 rxq->desc_ring = NULL;
404 }
405
406 /**
407 * idpf_rx_desc_rel_bufq - free buffer queue resources
408 * @bufq: buffer queue to clean the resources from
409 * @dev: device to free DMA memory
410 */
idpf_rx_desc_rel_bufq(struct idpf_buf_queue * bufq,struct device * dev)411 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
412 struct device *dev)
413 {
414 if (!bufq)
415 return;
416
417 idpf_rx_buf_rel_bufq(bufq);
418
419 bufq->next_to_alloc = 0;
420 bufq->next_to_clean = 0;
421 bufq->next_to_use = 0;
422
423 if (!bufq->split_buf)
424 return;
425
426 dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
427 bufq->split_buf = NULL;
428 }
429
430 /**
431 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
432 * @vport: virtual port structure
433 *
434 * Free all rx queues resources
435 */
idpf_rx_desc_rel_all(struct idpf_vport * vport)436 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
437 {
438 struct device *dev = &vport->adapter->pdev->dev;
439 struct idpf_rxq_group *rx_qgrp;
440 u16 num_rxq;
441 int i, j;
442
443 if (!vport->rxq_grps)
444 return;
445
446 for (i = 0; i < vport->num_rxq_grp; i++) {
447 rx_qgrp = &vport->rxq_grps[i];
448
449 if (!idpf_is_queue_model_split(vport->rxq_model)) {
450 for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
451 idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
452 VIRTCHNL2_QUEUE_MODEL_SINGLE);
453 continue;
454 }
455
456 num_rxq = rx_qgrp->splitq.num_rxq_sets;
457 for (j = 0; j < num_rxq; j++)
458 idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
459 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
460
461 if (!rx_qgrp->splitq.bufq_sets)
462 continue;
463
464 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
465 struct idpf_bufq_set *bufq_set =
466 &rx_qgrp->splitq.bufq_sets[j];
467
468 idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
469 }
470 }
471 }
472
473 /**
474 * idpf_rx_buf_hw_update - Store the new tail and head values
475 * @bufq: queue to bump
476 * @val: new head index
477 */
idpf_rx_buf_hw_update(struct idpf_buf_queue * bufq,u32 val)478 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
479 {
480 bufq->next_to_use = val;
481
482 if (unlikely(!bufq->tail))
483 return;
484
485 /* writel has an implicit memory barrier */
486 writel(val, bufq->tail);
487 }
488
489 /**
490 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
491 * @bufq: ring to use
492 *
493 * Returns 0 on success, negative on failure.
494 */
idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue * bufq)495 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
496 {
497 struct libeth_fq fq = {
498 .count = bufq->desc_count,
499 .type = LIBETH_FQE_HDR,
500 .nid = idpf_q_vector_to_mem(bufq->q_vector),
501 };
502 int ret;
503
504 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
505 if (ret)
506 return ret;
507
508 bufq->hdr_pp = fq.pp;
509 bufq->hdr_buf = fq.fqes;
510 bufq->hdr_truesize = fq.truesize;
511 bufq->rx_hbuf_size = fq.buf_len;
512
513 return 0;
514 }
515
516 /**
517 * idpf_post_buf_refill - Post buffer id to refill queue
518 * @refillq: refill queue to post to
519 * @buf_id: buffer id to post
520 */
idpf_post_buf_refill(struct idpf_sw_queue * refillq,u16 buf_id)521 static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
522 {
523 u32 nta = refillq->next_to_use;
524
525 /* store the buffer ID and the SW maintained GEN bit to the refillq */
526 refillq->ring[nta] =
527 FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
528 FIELD_PREP(IDPF_RFL_BI_GEN_M,
529 idpf_queue_has(GEN_CHK, refillq));
530
531 if (unlikely(++nta == refillq->desc_count)) {
532 nta = 0;
533 idpf_queue_change(GEN_CHK, refillq);
534 }
535
536 refillq->next_to_use = nta;
537 }
538
539 /**
540 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
541 * @bufq: buffer queue to post to
542 * @buf_id: buffer id to post
543 *
544 * Returns false if buffer could not be allocated, true otherwise.
545 */
idpf_rx_post_buf_desc(struct idpf_buf_queue * bufq,u16 buf_id)546 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
547 {
548 struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
549 struct libeth_fq_fp fq = {
550 .count = bufq->desc_count,
551 };
552 u16 nta = bufq->next_to_alloc;
553 dma_addr_t addr;
554
555 splitq_rx_desc = &bufq->split_buf[nta];
556
557 if (idpf_queue_has(HSPLIT_EN, bufq)) {
558 fq.pp = bufq->hdr_pp;
559 fq.fqes = bufq->hdr_buf;
560 fq.truesize = bufq->hdr_truesize;
561
562 addr = libeth_rx_alloc(&fq, buf_id);
563 if (addr == DMA_MAPPING_ERROR)
564 return false;
565
566 splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
567 }
568
569 fq.pp = bufq->pp;
570 fq.fqes = bufq->buf;
571 fq.truesize = bufq->truesize;
572
573 addr = libeth_rx_alloc(&fq, buf_id);
574 if (addr == DMA_MAPPING_ERROR)
575 return false;
576
577 splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
578 splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
579
580 nta++;
581 if (unlikely(nta == bufq->desc_count))
582 nta = 0;
583 bufq->next_to_alloc = nta;
584
585 return true;
586 }
587
588 /**
589 * idpf_rx_post_init_bufs - Post initial buffers to bufq
590 * @bufq: buffer queue to post working set to
591 * @working_set: number of buffers to put in working set
592 *
593 * Returns true if @working_set bufs were posted successfully, false otherwise.
594 */
idpf_rx_post_init_bufs(struct idpf_buf_queue * bufq,u16 working_set)595 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
596 u16 working_set)
597 {
598 int i;
599
600 for (i = 0; i < working_set; i++) {
601 if (!idpf_rx_post_buf_desc(bufq, i))
602 return false;
603 }
604
605 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
606 IDPF_RX_BUF_STRIDE));
607
608 return true;
609 }
610
611 /**
612 * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
613 * @rxq: queue for which the buffers are allocated
614 *
615 * Return: 0 on success, -ENOMEM on failure.
616 */
idpf_rx_buf_alloc_singleq(struct idpf_rx_queue * rxq)617 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
618 {
619 if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
620 goto err;
621
622 return 0;
623
624 err:
625 idpf_rx_buf_rel_all(rxq);
626
627 return -ENOMEM;
628 }
629
630 /**
631 * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
632 * @rxq: buffer queue to create page pool for
633 *
634 * Return: 0 on success, -errno on failure.
635 */
idpf_rx_bufs_init_singleq(struct idpf_rx_queue * rxq)636 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
637 {
638 struct libeth_fq fq = {
639 .count = rxq->desc_count,
640 .type = LIBETH_FQE_MTU,
641 .nid = idpf_q_vector_to_mem(rxq->q_vector),
642 };
643 int ret;
644
645 ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
646 if (ret)
647 return ret;
648
649 rxq->pp = fq.pp;
650 rxq->rx_buf = fq.fqes;
651 rxq->truesize = fq.truesize;
652 rxq->rx_buf_size = fq.buf_len;
653
654 return idpf_rx_buf_alloc_singleq(rxq);
655 }
656
657 /**
658 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
659 * @rxbufq: queue for which the buffers are allocated
660 *
661 * Returns 0 on success, negative on failure
662 */
idpf_rx_buf_alloc_all(struct idpf_buf_queue * rxbufq)663 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
664 {
665 int err = 0;
666
667 if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
668 err = idpf_rx_hdr_buf_alloc_all(rxbufq);
669 if (err)
670 goto rx_buf_alloc_all_out;
671 }
672
673 /* Allocate buffers to be given to HW. */
674 if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
675 err = -ENOMEM;
676
677 rx_buf_alloc_all_out:
678 if (err)
679 idpf_rx_buf_rel_bufq(rxbufq);
680
681 return err;
682 }
683
684 /**
685 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
686 * @bufq: buffer queue to create page pool for
687 * @type: type of Rx buffers to allocate
688 *
689 * Returns 0 on success, negative on failure
690 */
idpf_rx_bufs_init(struct idpf_buf_queue * bufq,enum libeth_fqe_type type)691 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
692 enum libeth_fqe_type type)
693 {
694 struct libeth_fq fq = {
695 .truesize = bufq->truesize,
696 .count = bufq->desc_count,
697 .type = type,
698 .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
699 .nid = idpf_q_vector_to_mem(bufq->q_vector),
700 };
701 int ret;
702
703 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
704 if (ret)
705 return ret;
706
707 bufq->pp = fq.pp;
708 bufq->buf = fq.fqes;
709 bufq->truesize = fq.truesize;
710 bufq->rx_buf_size = fq.buf_len;
711
712 return idpf_rx_buf_alloc_all(bufq);
713 }
714
715 /**
716 * idpf_rx_bufs_init_all - Initialize all RX bufs
717 * @vport: virtual port struct
718 *
719 * Returns 0 on success, negative on failure
720 */
idpf_rx_bufs_init_all(struct idpf_vport * vport)721 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
722 {
723 bool split = idpf_is_queue_model_split(vport->rxq_model);
724 int i, j, err;
725
726 for (i = 0; i < vport->num_rxq_grp; i++) {
727 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
728 u32 truesize = 0;
729
730 /* Allocate bufs for the rxq itself in singleq */
731 if (!split) {
732 int num_rxq = rx_qgrp->singleq.num_rxq;
733
734 for (j = 0; j < num_rxq; j++) {
735 struct idpf_rx_queue *q;
736
737 q = rx_qgrp->singleq.rxqs[j];
738 err = idpf_rx_bufs_init_singleq(q);
739 if (err)
740 return err;
741 }
742
743 continue;
744 }
745
746 /* Otherwise, allocate bufs for the buffer queues */
747 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
748 enum libeth_fqe_type type;
749 struct idpf_buf_queue *q;
750
751 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
752 q->truesize = truesize;
753
754 type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
755
756 err = idpf_rx_bufs_init(q, type);
757 if (err)
758 return err;
759
760 truesize = q->truesize >> 1;
761 }
762 }
763
764 return 0;
765 }
766
767 /**
768 * idpf_rx_desc_alloc - Allocate queue Rx resources
769 * @vport: vport to allocate resources for
770 * @rxq: Rx queue for which the resources are setup
771 *
772 * Returns 0 on success, negative on failure
773 */
idpf_rx_desc_alloc(const struct idpf_vport * vport,struct idpf_rx_queue * rxq)774 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
775 struct idpf_rx_queue *rxq)
776 {
777 struct device *dev = &vport->adapter->pdev->dev;
778
779 rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
780
781 /* Allocate descriptors and also round up to nearest 4K */
782 rxq->size = ALIGN(rxq->size, 4096);
783 rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
784 &rxq->dma, GFP_KERNEL);
785 if (!rxq->desc_ring) {
786 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
787 rxq->size);
788 return -ENOMEM;
789 }
790
791 rxq->next_to_alloc = 0;
792 rxq->next_to_clean = 0;
793 rxq->next_to_use = 0;
794 idpf_queue_set(GEN_CHK, rxq);
795
796 return 0;
797 }
798
799 /**
800 * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
801 * @vport: vport to allocate resources for
802 * @bufq: buffer queue for which the resources are set up
803 *
804 * Return: 0 on success, -ENOMEM on failure.
805 */
idpf_bufq_desc_alloc(const struct idpf_vport * vport,struct idpf_buf_queue * bufq)806 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
807 struct idpf_buf_queue *bufq)
808 {
809 struct device *dev = &vport->adapter->pdev->dev;
810
811 bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
812
813 bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
814 GFP_KERNEL);
815 if (!bufq->split_buf)
816 return -ENOMEM;
817
818 bufq->next_to_alloc = 0;
819 bufq->next_to_clean = 0;
820 bufq->next_to_use = 0;
821
822 idpf_queue_set(GEN_CHK, bufq);
823
824 return 0;
825 }
826
827 /**
828 * idpf_rx_desc_alloc_all - allocate all RX queues resources
829 * @vport: virtual port structure
830 *
831 * Returns 0 on success, negative on failure
832 */
idpf_rx_desc_alloc_all(struct idpf_vport * vport)833 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
834 {
835 struct idpf_rxq_group *rx_qgrp;
836 int i, j, err;
837 u16 num_rxq;
838
839 for (i = 0; i < vport->num_rxq_grp; i++) {
840 rx_qgrp = &vport->rxq_grps[i];
841 if (idpf_is_queue_model_split(vport->rxq_model))
842 num_rxq = rx_qgrp->splitq.num_rxq_sets;
843 else
844 num_rxq = rx_qgrp->singleq.num_rxq;
845
846 for (j = 0; j < num_rxq; j++) {
847 struct idpf_rx_queue *q;
848
849 if (idpf_is_queue_model_split(vport->rxq_model))
850 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
851 else
852 q = rx_qgrp->singleq.rxqs[j];
853
854 err = idpf_rx_desc_alloc(vport, q);
855 if (err) {
856 pci_err(vport->adapter->pdev,
857 "Memory allocation for Rx Queue %u failed\n",
858 i);
859 goto err_out;
860 }
861 }
862
863 if (!idpf_is_queue_model_split(vport->rxq_model))
864 continue;
865
866 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
867 struct idpf_buf_queue *q;
868
869 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
870
871 err = idpf_bufq_desc_alloc(vport, q);
872 if (err) {
873 pci_err(vport->adapter->pdev,
874 "Memory allocation for Rx Buffer Queue %u failed\n",
875 i);
876 goto err_out;
877 }
878 }
879 }
880
881 return 0;
882
883 err_out:
884 idpf_rx_desc_rel_all(vport);
885
886 return err;
887 }
888
889 /**
890 * idpf_txq_group_rel - Release all resources for txq groups
891 * @vport: vport to release txq groups on
892 */
idpf_txq_group_rel(struct idpf_vport * vport)893 static void idpf_txq_group_rel(struct idpf_vport *vport)
894 {
895 bool split, flow_sch_en;
896 int i, j;
897
898 if (!vport->txq_grps)
899 return;
900
901 split = idpf_is_queue_model_split(vport->txq_model);
902 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
903 VIRTCHNL2_CAP_SPLITQ_QSCHED);
904
905 for (i = 0; i < vport->num_txq_grp; i++) {
906 struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
907
908 for (j = 0; j < txq_grp->num_txq; j++) {
909 if (flow_sch_en) {
910 kfree(txq_grp->txqs[j]->refillq);
911 txq_grp->txqs[j]->refillq = NULL;
912 }
913
914 kfree(txq_grp->txqs[j]);
915 txq_grp->txqs[j] = NULL;
916 }
917
918 if (!split)
919 continue;
920
921 kfree(txq_grp->complq);
922 txq_grp->complq = NULL;
923 }
924 kfree(vport->txq_grps);
925 vport->txq_grps = NULL;
926 }
927
928 /**
929 * idpf_rxq_sw_queue_rel - Release software queue resources
930 * @rx_qgrp: rx queue group with software queues
931 */
idpf_rxq_sw_queue_rel(struct idpf_rxq_group * rx_qgrp)932 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
933 {
934 int i, j;
935
936 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
937 struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
938
939 for (j = 0; j < bufq_set->num_refillqs; j++) {
940 kfree(bufq_set->refillqs[j].ring);
941 bufq_set->refillqs[j].ring = NULL;
942 }
943 kfree(bufq_set->refillqs);
944 bufq_set->refillqs = NULL;
945 }
946 }
947
948 /**
949 * idpf_rxq_group_rel - Release all resources for rxq groups
950 * @vport: vport to release rxq groups on
951 */
idpf_rxq_group_rel(struct idpf_vport * vport)952 static void idpf_rxq_group_rel(struct idpf_vport *vport)
953 {
954 int i;
955
956 if (!vport->rxq_grps)
957 return;
958
959 for (i = 0; i < vport->num_rxq_grp; i++) {
960 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
961 u16 num_rxq;
962 int j;
963
964 if (idpf_is_queue_model_split(vport->rxq_model)) {
965 num_rxq = rx_qgrp->splitq.num_rxq_sets;
966 for (j = 0; j < num_rxq; j++) {
967 kfree(rx_qgrp->splitq.rxq_sets[j]);
968 rx_qgrp->splitq.rxq_sets[j] = NULL;
969 }
970
971 idpf_rxq_sw_queue_rel(rx_qgrp);
972 kfree(rx_qgrp->splitq.bufq_sets);
973 rx_qgrp->splitq.bufq_sets = NULL;
974 } else {
975 num_rxq = rx_qgrp->singleq.num_rxq;
976 for (j = 0; j < num_rxq; j++) {
977 kfree(rx_qgrp->singleq.rxqs[j]);
978 rx_qgrp->singleq.rxqs[j] = NULL;
979 }
980 }
981 }
982 kfree(vport->rxq_grps);
983 vport->rxq_grps = NULL;
984 }
985
986 /**
987 * idpf_vport_queue_grp_rel_all - Release all queue groups
988 * @vport: vport to release queue groups for
989 */
idpf_vport_queue_grp_rel_all(struct idpf_vport * vport)990 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
991 {
992 idpf_txq_group_rel(vport);
993 idpf_rxq_group_rel(vport);
994 }
995
996 /**
997 * idpf_vport_queues_rel - Free memory for all queues
998 * @vport: virtual port
999 *
1000 * Free the memory allocated for queues associated to a vport
1001 */
idpf_vport_queues_rel(struct idpf_vport * vport)1002 void idpf_vport_queues_rel(struct idpf_vport *vport)
1003 {
1004 idpf_tx_desc_rel_all(vport);
1005 idpf_rx_desc_rel_all(vport);
1006 idpf_vport_queue_grp_rel_all(vport);
1007
1008 kfree(vport->txqs);
1009 vport->txqs = NULL;
1010 }
1011
1012 /**
1013 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1014 * @vport: vport to init txqs on
1015 *
1016 * We get a queue index from skb->queue_mapping and we need a fast way to
1017 * dereference the queue from queue groups. This allows us to quickly pull a
1018 * txq based on a queue index.
1019 *
1020 * Returns 0 on success, negative on failure
1021 */
idpf_vport_init_fast_path_txqs(struct idpf_vport * vport)1022 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1023 {
1024 struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
1025 struct work_struct *tstamp_task = &vport->tstamp_task;
1026 int i, j, k = 0;
1027
1028 vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1029 GFP_KERNEL);
1030
1031 if (!vport->txqs)
1032 return -ENOMEM;
1033
1034 for (i = 0; i < vport->num_txq_grp; i++) {
1035 struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1036
1037 for (j = 0; j < tx_grp->num_txq; j++, k++) {
1038 vport->txqs[k] = tx_grp->txqs[j];
1039 vport->txqs[k]->idx = k;
1040
1041 if (!caps)
1042 continue;
1043
1044 vport->txqs[k]->cached_tstamp_caps = caps;
1045 vport->txqs[k]->tstamp_task = tstamp_task;
1046 }
1047 }
1048
1049 return 0;
1050 }
1051
1052 /**
1053 * idpf_vport_init_num_qs - Initialize number of queues
1054 * @vport: vport to initialize queues
1055 * @vport_msg: data to be filled into vport
1056 */
idpf_vport_init_num_qs(struct idpf_vport * vport,struct virtchnl2_create_vport * vport_msg)1057 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1058 struct virtchnl2_create_vport *vport_msg)
1059 {
1060 struct idpf_vport_user_config_data *config_data;
1061 u16 idx = vport->idx;
1062
1063 config_data = &vport->adapter->vport_config[idx]->user_config;
1064 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1065 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1066 /* number of txqs and rxqs in config data will be zeros only in the
1067 * driver load path and we dont update them there after
1068 */
1069 if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1070 config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1071 config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1072 }
1073
1074 if (idpf_is_queue_model_split(vport->txq_model))
1075 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1076 if (idpf_is_queue_model_split(vport->rxq_model))
1077 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1078
1079 /* Adjust number of buffer queues per Rx queue group. */
1080 if (!idpf_is_queue_model_split(vport->rxq_model)) {
1081 vport->num_bufqs_per_qgrp = 0;
1082
1083 return;
1084 }
1085
1086 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1087 }
1088
1089 /**
1090 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1091 * @vport: vport to calculate q groups for
1092 */
idpf_vport_calc_num_q_desc(struct idpf_vport * vport)1093 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1094 {
1095 struct idpf_vport_user_config_data *config_data;
1096 int num_bufqs = vport->num_bufqs_per_qgrp;
1097 u32 num_req_txq_desc, num_req_rxq_desc;
1098 u16 idx = vport->idx;
1099 int i;
1100
1101 config_data = &vport->adapter->vport_config[idx]->user_config;
1102 num_req_txq_desc = config_data->num_req_txq_desc;
1103 num_req_rxq_desc = config_data->num_req_rxq_desc;
1104
1105 vport->complq_desc_count = 0;
1106 if (num_req_txq_desc) {
1107 vport->txq_desc_count = num_req_txq_desc;
1108 if (idpf_is_queue_model_split(vport->txq_model)) {
1109 vport->complq_desc_count = num_req_txq_desc;
1110 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1111 vport->complq_desc_count =
1112 IDPF_MIN_TXQ_COMPLQ_DESC;
1113 }
1114 } else {
1115 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
1116 if (idpf_is_queue_model_split(vport->txq_model))
1117 vport->complq_desc_count =
1118 IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1119 }
1120
1121 if (num_req_rxq_desc)
1122 vport->rxq_desc_count = num_req_rxq_desc;
1123 else
1124 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1125
1126 for (i = 0; i < num_bufqs; i++) {
1127 if (!vport->bufq_desc_count[i])
1128 vport->bufq_desc_count[i] =
1129 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1130 num_bufqs);
1131 }
1132 }
1133
1134 /**
1135 * idpf_vport_calc_total_qs - Calculate total number of queues
1136 * @adapter: private data struct
1137 * @vport_idx: vport idx to retrieve vport pointer
1138 * @vport_msg: message to fill with data
1139 * @max_q: vport max queue info
1140 *
1141 * Return 0 on success, error value on failure.
1142 */
idpf_vport_calc_total_qs(struct idpf_adapter * adapter,u16 vport_idx,struct virtchnl2_create_vport * vport_msg,struct idpf_vport_max_q * max_q)1143 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1144 struct virtchnl2_create_vport *vport_msg,
1145 struct idpf_vport_max_q *max_q)
1146 {
1147 int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1148 int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1149 u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1150 struct idpf_vport_config *vport_config;
1151 u16 num_txq_grps, num_rxq_grps;
1152 u32 num_qs;
1153
1154 vport_config = adapter->vport_config[vport_idx];
1155 if (vport_config) {
1156 num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1157 num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1158 } else {
1159 int num_cpus;
1160
1161 /* Restrict num of queues to cpus online as a default
1162 * configuration to give best performance. User can always
1163 * override to a max number of queues via ethtool.
1164 */
1165 num_cpus = num_online_cpus();
1166
1167 dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1168 dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1169 dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1170 dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1171 }
1172
1173 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1174 num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1175 vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1176 IDPF_COMPLQ_PER_GROUP);
1177 vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1178 IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1179 } else {
1180 num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1181 num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1182 dflt_singleq_txqs);
1183 vport_msg->num_tx_q = cpu_to_le16(num_qs);
1184 vport_msg->num_tx_complq = 0;
1185 }
1186 if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1187 num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1188 vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1189 IDPF_MAX_BUFQS_PER_RXQ_GRP);
1190 vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1191 IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1192 } else {
1193 num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1194 num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1195 dflt_singleq_rxqs);
1196 vport_msg->num_rx_q = cpu_to_le16(num_qs);
1197 vport_msg->num_rx_bufq = 0;
1198 }
1199
1200 return 0;
1201 }
1202
1203 /**
1204 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1205 * @vport: vport to calculate q groups for
1206 */
idpf_vport_calc_num_q_groups(struct idpf_vport * vport)1207 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1208 {
1209 if (idpf_is_queue_model_split(vport->txq_model))
1210 vport->num_txq_grp = vport->num_txq;
1211 else
1212 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1213
1214 if (idpf_is_queue_model_split(vport->rxq_model))
1215 vport->num_rxq_grp = vport->num_rxq;
1216 else
1217 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1218 }
1219
1220 /**
1221 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1222 * @vport: vport to calculate queues for
1223 * @num_txq: return parameter for number of TX queues
1224 * @num_rxq: return parameter for number of RX queues
1225 */
idpf_vport_calc_numq_per_grp(struct idpf_vport * vport,u16 * num_txq,u16 * num_rxq)1226 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1227 u16 *num_txq, u16 *num_rxq)
1228 {
1229 if (idpf_is_queue_model_split(vport->txq_model))
1230 *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1231 else
1232 *num_txq = vport->num_txq;
1233
1234 if (idpf_is_queue_model_split(vport->rxq_model))
1235 *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1236 else
1237 *num_rxq = vport->num_rxq;
1238 }
1239
1240 /**
1241 * idpf_rxq_set_descids - set the descids supported by this queue
1242 * @vport: virtual port data structure
1243 * @q: rx queue for which descids are set
1244 *
1245 */
idpf_rxq_set_descids(const struct idpf_vport * vport,struct idpf_rx_queue * q)1246 static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1247 struct idpf_rx_queue *q)
1248 {
1249 if (idpf_is_queue_model_split(vport->rxq_model)) {
1250 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1251 } else {
1252 if (vport->base_rxd)
1253 q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1254 else
1255 q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1256 }
1257 }
1258
1259 /**
1260 * idpf_txq_group_alloc - Allocate all txq group resources
1261 * @vport: vport to allocate txq groups for
1262 * @num_txq: number of txqs to allocate for each group
1263 *
1264 * Returns 0 on success, negative on failure
1265 */
idpf_txq_group_alloc(struct idpf_vport * vport,u16 num_txq)1266 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1267 {
1268 bool split, flow_sch_en;
1269 int i;
1270
1271 vport->txq_grps = kcalloc(vport->num_txq_grp,
1272 sizeof(*vport->txq_grps), GFP_KERNEL);
1273 if (!vport->txq_grps)
1274 return -ENOMEM;
1275
1276 split = idpf_is_queue_model_split(vport->txq_model);
1277 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1278 VIRTCHNL2_CAP_SPLITQ_QSCHED);
1279
1280 for (i = 0; i < vport->num_txq_grp; i++) {
1281 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1282 struct idpf_adapter *adapter = vport->adapter;
1283 int j;
1284
1285 tx_qgrp->vport = vport;
1286 tx_qgrp->num_txq = num_txq;
1287
1288 for (j = 0; j < tx_qgrp->num_txq; j++) {
1289 tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1290 GFP_KERNEL);
1291 if (!tx_qgrp->txqs[j])
1292 goto err_alloc;
1293 }
1294
1295 for (j = 0; j < tx_qgrp->num_txq; j++) {
1296 struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1297
1298 q->dev = &adapter->pdev->dev;
1299 q->desc_count = vport->txq_desc_count;
1300 q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1301 q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1302 q->netdev = vport->netdev;
1303 q->txq_grp = tx_qgrp;
1304
1305 if (!split) {
1306 q->clean_budget = vport->compln_clean_budget;
1307 idpf_queue_assign(CRC_EN, q,
1308 vport->crc_enable);
1309 }
1310
1311 if (!flow_sch_en)
1312 continue;
1313
1314 idpf_queue_set(FLOW_SCH_EN, q);
1315
1316 q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
1317 if (!q->refillq)
1318 goto err_alloc;
1319
1320 idpf_queue_set(GEN_CHK, q->refillq);
1321 idpf_queue_set(RFL_GEN_CHK, q->refillq);
1322 }
1323
1324 if (!split)
1325 continue;
1326
1327 tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1328 sizeof(*tx_qgrp->complq),
1329 GFP_KERNEL);
1330 if (!tx_qgrp->complq)
1331 goto err_alloc;
1332
1333 tx_qgrp->complq->desc_count = vport->complq_desc_count;
1334 tx_qgrp->complq->txq_grp = tx_qgrp;
1335 tx_qgrp->complq->netdev = vport->netdev;
1336 tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1337
1338 if (flow_sch_en)
1339 idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1340 }
1341
1342 return 0;
1343
1344 err_alloc:
1345 idpf_txq_group_rel(vport);
1346
1347 return -ENOMEM;
1348 }
1349
1350 /**
1351 * idpf_rxq_group_alloc - Allocate all rxq group resources
1352 * @vport: vport to allocate rxq groups for
1353 * @num_rxq: number of rxqs to allocate for each group
1354 *
1355 * Returns 0 on success, negative on failure
1356 */
idpf_rxq_group_alloc(struct idpf_vport * vport,u16 num_rxq)1357 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1358 {
1359 int i, k, err = 0;
1360 bool hs;
1361
1362 vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1363 sizeof(struct idpf_rxq_group), GFP_KERNEL);
1364 if (!vport->rxq_grps)
1365 return -ENOMEM;
1366
1367 hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1368
1369 for (i = 0; i < vport->num_rxq_grp; i++) {
1370 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1371 int j;
1372
1373 rx_qgrp->vport = vport;
1374 if (!idpf_is_queue_model_split(vport->rxq_model)) {
1375 rx_qgrp->singleq.num_rxq = num_rxq;
1376 for (j = 0; j < num_rxq; j++) {
1377 rx_qgrp->singleq.rxqs[j] =
1378 kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1379 GFP_KERNEL);
1380 if (!rx_qgrp->singleq.rxqs[j]) {
1381 err = -ENOMEM;
1382 goto err_alloc;
1383 }
1384 }
1385 goto skip_splitq_rx_init;
1386 }
1387 rx_qgrp->splitq.num_rxq_sets = num_rxq;
1388
1389 for (j = 0; j < num_rxq; j++) {
1390 rx_qgrp->splitq.rxq_sets[j] =
1391 kzalloc(sizeof(struct idpf_rxq_set),
1392 GFP_KERNEL);
1393 if (!rx_qgrp->splitq.rxq_sets[j]) {
1394 err = -ENOMEM;
1395 goto err_alloc;
1396 }
1397 }
1398
1399 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1400 sizeof(struct idpf_bufq_set),
1401 GFP_KERNEL);
1402 if (!rx_qgrp->splitq.bufq_sets) {
1403 err = -ENOMEM;
1404 goto err_alloc;
1405 }
1406
1407 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1408 struct idpf_bufq_set *bufq_set =
1409 &rx_qgrp->splitq.bufq_sets[j];
1410 int swq_size = sizeof(struct idpf_sw_queue);
1411 struct idpf_buf_queue *q;
1412
1413 q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1414 q->desc_count = vport->bufq_desc_count[j];
1415 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1416
1417 idpf_queue_assign(HSPLIT_EN, q, hs);
1418
1419 bufq_set->num_refillqs = num_rxq;
1420 bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1421 GFP_KERNEL);
1422 if (!bufq_set->refillqs) {
1423 err = -ENOMEM;
1424 goto err_alloc;
1425 }
1426 for (k = 0; k < bufq_set->num_refillqs; k++) {
1427 struct idpf_sw_queue *refillq =
1428 &bufq_set->refillqs[k];
1429
1430 refillq->desc_count =
1431 vport->bufq_desc_count[j];
1432 idpf_queue_set(GEN_CHK, refillq);
1433 idpf_queue_set(RFL_GEN_CHK, refillq);
1434 refillq->ring = kcalloc(refillq->desc_count,
1435 sizeof(*refillq->ring),
1436 GFP_KERNEL);
1437 if (!refillq->ring) {
1438 err = -ENOMEM;
1439 goto err_alloc;
1440 }
1441 }
1442 }
1443
1444 skip_splitq_rx_init:
1445 for (j = 0; j < num_rxq; j++) {
1446 struct idpf_rx_queue *q;
1447
1448 if (!idpf_is_queue_model_split(vport->rxq_model)) {
1449 q = rx_qgrp->singleq.rxqs[j];
1450 goto setup_rxq;
1451 }
1452 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1453 rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1454 &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1455 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1456 rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1457 &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1458
1459 idpf_queue_assign(HSPLIT_EN, q, hs);
1460
1461 setup_rxq:
1462 q->desc_count = vport->rxq_desc_count;
1463 q->rx_ptype_lkup = vport->rx_ptype_lkup;
1464 q->netdev = vport->netdev;
1465 q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1466 q->idx = (i * num_rxq) + j;
1467 q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1468 q->rx_max_pkt_size = vport->netdev->mtu +
1469 LIBETH_RX_LL_LEN;
1470 idpf_rxq_set_descids(vport, q);
1471 }
1472 }
1473
1474 err_alloc:
1475 if (err)
1476 idpf_rxq_group_rel(vport);
1477
1478 return err;
1479 }
1480
1481 /**
1482 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1483 * @vport: vport with qgrps to allocate
1484 *
1485 * Returns 0 on success, negative on failure
1486 */
idpf_vport_queue_grp_alloc_all(struct idpf_vport * vport)1487 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1488 {
1489 u16 num_txq, num_rxq;
1490 int err;
1491
1492 idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1493
1494 err = idpf_txq_group_alloc(vport, num_txq);
1495 if (err)
1496 goto err_out;
1497
1498 err = idpf_rxq_group_alloc(vport, num_rxq);
1499 if (err)
1500 goto err_out;
1501
1502 return 0;
1503
1504 err_out:
1505 idpf_vport_queue_grp_rel_all(vport);
1506
1507 return err;
1508 }
1509
1510 /**
1511 * idpf_vport_queues_alloc - Allocate memory for all queues
1512 * @vport: virtual port
1513 *
1514 * Allocate memory for queues associated with a vport. Returns 0 on success,
1515 * negative on failure.
1516 */
idpf_vport_queues_alloc(struct idpf_vport * vport)1517 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1518 {
1519 int err;
1520
1521 err = idpf_vport_queue_grp_alloc_all(vport);
1522 if (err)
1523 goto err_out;
1524
1525 err = idpf_tx_desc_alloc_all(vport);
1526 if (err)
1527 goto err_out;
1528
1529 err = idpf_rx_desc_alloc_all(vport);
1530 if (err)
1531 goto err_out;
1532
1533 err = idpf_vport_init_fast_path_txqs(vport);
1534 if (err)
1535 goto err_out;
1536
1537 return 0;
1538
1539 err_out:
1540 idpf_vport_queues_rel(vport);
1541
1542 return err;
1543 }
1544
1545 /**
1546 * idpf_tx_handle_sw_marker - Handle queue marker packet
1547 * @tx_q: tx queue to handle software marker
1548 */
idpf_tx_handle_sw_marker(struct idpf_tx_queue * tx_q)1549 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1550 {
1551 struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1552 struct idpf_vport *vport = priv->vport;
1553 int i;
1554
1555 idpf_queue_clear(SW_MARKER, tx_q);
1556 /* Hardware must write marker packets to all queues associated with
1557 * completion queues. So check if all queues received marker packets
1558 */
1559 for (i = 0; i < vport->num_txq; i++)
1560 /* If we're still waiting on any other TXQ marker completions,
1561 * just return now since we cannot wake up the marker_wq yet.
1562 */
1563 if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1564 return;
1565
1566 /* Drain complete */
1567 set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1568 wake_up(&vport->sw_marker_wq);
1569 }
1570
1571 /**
1572 * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
1573 * @txq: queue to read the timestamp from
1574 * @skb: socket buffer to provide Tx timestamp value
1575 *
1576 * Schedule a work to read Tx timestamp value generated once the packet is
1577 * transmitted.
1578 */
idpf_tx_read_tstamp(struct idpf_tx_queue * txq,struct sk_buff * skb)1579 static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
1580 {
1581 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
1582 struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
1583
1584 tx_tstamp_caps = txq->cached_tstamp_caps;
1585 spin_lock_bh(&tx_tstamp_caps->status_lock);
1586
1587 for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
1588 tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
1589 if (tx_tstamp_status->state != IDPF_PTP_FREE)
1590 continue;
1591
1592 tx_tstamp_status->skb = skb;
1593 tx_tstamp_status->state = IDPF_PTP_REQUEST;
1594
1595 /* Fetch timestamp from completion descriptor through
1596 * virtchnl msg to report to stack.
1597 */
1598 queue_work(system_unbound_wq, txq->tstamp_task);
1599 break;
1600 }
1601
1602 spin_unlock_bh(&tx_tstamp_caps->status_lock);
1603 }
1604
1605 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
1606 do { \
1607 if (unlikely(++(ntc) == (txq)->desc_count)) { \
1608 ntc = 0; \
1609 buf = (txq)->tx_buf; \
1610 desc = &(txq)->flex_tx[0]; \
1611 } else { \
1612 (buf)++; \
1613 (desc)++; \
1614 } \
1615 } while (0)
1616
1617 /**
1618 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1619 * @tx_q: Tx queue to clean
1620 * @end: queue index until which it should be cleaned
1621 * @napi_budget: Used to determine if we are in netpoll
1622 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1623 * @descs_only: true if queue is using flow-based scheduling and should
1624 * not clean buffers at this time
1625 *
1626 * Cleans the queue descriptor ring. If the queue is using queue-based
1627 * scheduling, the buffers will be cleaned as well. If the queue is using
1628 * flow-based scheduling, only the descriptors are cleaned at this time.
1629 * Separate packet completion events will be reported on the completion queue,
1630 * and the buffers will be cleaned separately. The stats are not updated from
1631 * this function when using flow-based scheduling.
1632 */
idpf_tx_splitq_clean(struct idpf_tx_queue * tx_q,u16 end,int napi_budget,struct libeth_sq_napi_stats * cleaned,bool descs_only)1633 static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1634 int napi_budget,
1635 struct libeth_sq_napi_stats *cleaned,
1636 bool descs_only)
1637 {
1638 union idpf_tx_flex_desc *next_pending_desc = NULL;
1639 union idpf_tx_flex_desc *tx_desc;
1640 u32 ntc = tx_q->next_to_clean;
1641 struct libeth_cq_pp cp = {
1642 .dev = tx_q->dev,
1643 .ss = cleaned,
1644 .napi = napi_budget,
1645 };
1646 struct idpf_tx_buf *tx_buf;
1647
1648 if (descs_only) {
1649 /* Bump ring index to mark as cleaned. */
1650 tx_q->next_to_clean = end;
1651 return;
1652 }
1653
1654 tx_desc = &tx_q->flex_tx[ntc];
1655 next_pending_desc = &tx_q->flex_tx[end];
1656 tx_buf = &tx_q->tx_buf[ntc];
1657
1658 while (tx_desc != next_pending_desc) {
1659 u32 eop_idx;
1660
1661 /* If this entry in the ring was used as a context descriptor,
1662 * it's corresponding entry in the buffer ring is reserved. We
1663 * can skip this descriptor since there is no buffer to clean.
1664 */
1665 if (tx_buf->type <= LIBETH_SQE_CTX)
1666 goto fetch_next_txq_desc;
1667
1668 if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
1669 break;
1670
1671 eop_idx = tx_buf->rs_idx;
1672 libeth_tx_complete(tx_buf, &cp);
1673
1674 /* unmap remaining buffers */
1675 while (ntc != eop_idx) {
1676 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1677 tx_desc, tx_buf);
1678
1679 /* unmap any remaining paged data */
1680 libeth_tx_complete(tx_buf, &cp);
1681 }
1682
1683 fetch_next_txq_desc:
1684 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1685 }
1686
1687 tx_q->next_to_clean = ntc;
1688 }
1689
1690 /**
1691 * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
1692 * @txq: queue to clean
1693 * @buf_id: packet's starting buffer ID, from completion descriptor
1694 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1695 * @budget: Used to determine if we are in netpoll
1696 *
1697 * Clean all buffers associated with the packet starting at buf_id. Returns the
1698 * byte/segment count for the cleaned packet.
1699 */
idpf_tx_clean_bufs(struct idpf_tx_queue * txq,u32 buf_id,struct libeth_sq_napi_stats * cleaned,int budget)1700 static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
1701 struct libeth_sq_napi_stats *cleaned,
1702 int budget)
1703 {
1704 struct idpf_tx_buf *tx_buf = NULL;
1705 struct libeth_cq_pp cp = {
1706 .dev = txq->dev,
1707 .ss = cleaned,
1708 .napi = budget,
1709 };
1710
1711 tx_buf = &txq->tx_buf[buf_id];
1712 if (tx_buf->type == LIBETH_SQE_SKB) {
1713 if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
1714 idpf_tx_read_tstamp(txq, tx_buf->skb);
1715
1716 libeth_tx_complete(tx_buf, &cp);
1717 idpf_post_buf_refill(txq->refillq, buf_id);
1718 }
1719
1720 while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
1721 buf_id = idpf_tx_buf_next(tx_buf);
1722
1723 tx_buf = &txq->tx_buf[buf_id];
1724 libeth_tx_complete(tx_buf, &cp);
1725 idpf_post_buf_refill(txq->refillq, buf_id);
1726 }
1727 }
1728
1729 /**
1730 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1731 * whether on the buffer ring or in the hash table
1732 * @txq: Tx ring to clean
1733 * @desc: pointer to completion queue descriptor to extract completion
1734 * information from
1735 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1736 * @budget: Used to determine if we are in netpoll
1737 *
1738 * Returns bytes/packets cleaned
1739 */
idpf_tx_handle_rs_completion(struct idpf_tx_queue * txq,struct idpf_splitq_tx_compl_desc * desc,struct libeth_sq_napi_stats * cleaned,int budget)1740 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
1741 struct idpf_splitq_tx_compl_desc *desc,
1742 struct libeth_sq_napi_stats *cleaned,
1743 int budget)
1744 {
1745 /* RS completion contains queue head for queue based scheduling or
1746 * completion tag for flow based scheduling.
1747 */
1748 u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
1749
1750 if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
1751 idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
1752 return;
1753 }
1754
1755 idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
1756 }
1757
1758 /**
1759 * idpf_tx_clean_complq - Reclaim resources on completion queue
1760 * @complq: Tx ring to clean
1761 * @budget: Used to determine if we are in netpoll
1762 * @cleaned: returns number of packets cleaned
1763 *
1764 * Returns true if there's any budget left (e.g. the clean is finished)
1765 */
idpf_tx_clean_complq(struct idpf_compl_queue * complq,int budget,int * cleaned)1766 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
1767 int *cleaned)
1768 {
1769 struct idpf_splitq_tx_compl_desc *tx_desc;
1770 s16 ntc = complq->next_to_clean;
1771 struct idpf_netdev_priv *np;
1772 unsigned int complq_budget;
1773 bool complq_ok = true;
1774 int i;
1775
1776 complq_budget = complq->clean_budget;
1777 tx_desc = &complq->comp[ntc];
1778 ntc -= complq->desc_count;
1779
1780 do {
1781 struct libeth_sq_napi_stats cleaned_stats = { };
1782 struct idpf_tx_queue *tx_q;
1783 int rel_tx_qid;
1784 u16 hw_head;
1785 u8 ctype; /* completion type */
1786 u16 gen;
1787
1788 /* if the descriptor isn't done, no work yet to do */
1789 gen = le16_get_bits(tx_desc->qid_comptype_gen,
1790 IDPF_TXD_COMPLQ_GEN_M);
1791 if (idpf_queue_has(GEN_CHK, complq) != gen)
1792 break;
1793
1794 /* Find necessary info of TX queue to clean buffers */
1795 rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
1796 IDPF_TXD_COMPLQ_QID_M);
1797 if (rel_tx_qid >= complq->txq_grp->num_txq ||
1798 !complq->txq_grp->txqs[rel_tx_qid]) {
1799 netdev_err(complq->netdev, "TxQ not found\n");
1800 goto fetch_next_desc;
1801 }
1802 tx_q = complq->txq_grp->txqs[rel_tx_qid];
1803
1804 /* Determine completion type */
1805 ctype = le16_get_bits(tx_desc->qid_comptype_gen,
1806 IDPF_TXD_COMPLQ_COMPL_TYPE_M);
1807 switch (ctype) {
1808 case IDPF_TXD_COMPLT_RE:
1809 hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
1810
1811 idpf_tx_splitq_clean(tx_q, hw_head, budget,
1812 &cleaned_stats, true);
1813 break;
1814 case IDPF_TXD_COMPLT_RS:
1815 idpf_tx_handle_rs_completion(tx_q, tx_desc,
1816 &cleaned_stats, budget);
1817 break;
1818 case IDPF_TXD_COMPLT_SW_MARKER:
1819 idpf_tx_handle_sw_marker(tx_q);
1820 break;
1821 default:
1822 netdev_err(tx_q->netdev,
1823 "Unknown TX completion type: %d\n", ctype);
1824 goto fetch_next_desc;
1825 }
1826
1827 u64_stats_update_begin(&tx_q->stats_sync);
1828 u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
1829 u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
1830 tx_q->cleaned_pkts += cleaned_stats.packets;
1831 tx_q->cleaned_bytes += cleaned_stats.bytes;
1832 complq->num_completions++;
1833 u64_stats_update_end(&tx_q->stats_sync);
1834
1835 fetch_next_desc:
1836 tx_desc++;
1837 ntc++;
1838 if (unlikely(!ntc)) {
1839 ntc -= complq->desc_count;
1840 tx_desc = &complq->comp[0];
1841 idpf_queue_change(GEN_CHK, complq);
1842 }
1843
1844 prefetch(tx_desc);
1845
1846 /* update budget accounting */
1847 complq_budget--;
1848 } while (likely(complq_budget));
1849
1850 /* Store the state of the complq to be used later in deciding if a
1851 * TXQ can be started again
1852 */
1853 if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
1854 IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
1855 complq_ok = false;
1856
1857 np = netdev_priv(complq->netdev);
1858 for (i = 0; i < complq->txq_grp->num_txq; ++i) {
1859 struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
1860 struct netdev_queue *nq;
1861 bool dont_wake;
1862
1863 /* We didn't clean anything on this queue, move along */
1864 if (!tx_q->cleaned_bytes)
1865 continue;
1866
1867 *cleaned += tx_q->cleaned_pkts;
1868
1869 /* Update BQL */
1870 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
1871
1872 dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
1873 !netif_carrier_ok(tx_q->netdev);
1874 /* Check if the TXQ needs to and can be restarted */
1875 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
1876 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
1877 dont_wake);
1878
1879 /* Reset cleaned stats for the next time this queue is
1880 * cleaned
1881 */
1882 tx_q->cleaned_bytes = 0;
1883 tx_q->cleaned_pkts = 0;
1884 }
1885
1886 ntc += complq->desc_count;
1887 complq->next_to_clean = ntc;
1888
1889 return !!complq_budget;
1890 }
1891
1892 /**
1893 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
1894 * based scheduling descriptors
1895 * @desc: descriptor to populate
1896 * @params: pointer to tx params struct
1897 * @td_cmd: command to be filled in desc
1898 * @size: size of buffer
1899 */
idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)1900 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
1901 struct idpf_tx_splitq_params *params,
1902 u16 td_cmd, u16 size)
1903 {
1904 desc->q.qw1.cmd_dtype =
1905 le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
1906 desc->q.qw1.cmd_dtype |=
1907 le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
1908 desc->q.qw1.buf_size = cpu_to_le16(size);
1909 desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
1910 }
1911
1912 /**
1913 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
1914 * scheduling descriptors
1915 * @desc: descriptor to populate
1916 * @params: pointer to tx params struct
1917 * @td_cmd: command to be filled in desc
1918 * @size: size of buffer
1919 */
idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)1920 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
1921 struct idpf_tx_splitq_params *params,
1922 u16 td_cmd, u16 size)
1923 {
1924 *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
1925 desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
1926 desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
1927 }
1928
1929 /**
1930 * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
1931 * @tx_q: the queue to be checked
1932 * @descs_needed: number of descriptors required for this packet
1933 * @bufs_needed: number of Tx buffers required for this packet
1934 *
1935 * Return: 0 if no room available, 1 otherwise
1936 */
idpf_txq_has_room(struct idpf_tx_queue * tx_q,u32 descs_needed,u32 bufs_needed)1937 static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
1938 u32 bufs_needed)
1939 {
1940 if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
1941 IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
1942 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
1943 idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
1944 return 0;
1945 return 1;
1946 }
1947
1948 /**
1949 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
1950 * @tx_q: the queue to be checked
1951 * @descs_needed: number of descriptors required for this packet
1952 * @bufs_needed: number of buffers needed for this packet
1953 *
1954 * Return: 0 if stop is not needed
1955 */
idpf_tx_maybe_stop_splitq(struct idpf_tx_queue * tx_q,u32 descs_needed,u32 bufs_needed)1956 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
1957 u32 descs_needed,
1958 u32 bufs_needed)
1959 {
1960 /* Since we have multiple resources to check for splitq, our
1961 * start,stop_thrs becomes a boolean check instead of a count
1962 * threshold.
1963 */
1964 if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1965 idpf_txq_has_room(tx_q, descs_needed,
1966 bufs_needed),
1967 1, 1))
1968 return 0;
1969
1970 u64_stats_update_begin(&tx_q->stats_sync);
1971 u64_stats_inc(&tx_q->q_stats.q_busy);
1972 u64_stats_update_end(&tx_q->stats_sync);
1973
1974 return -EBUSY;
1975 }
1976
1977 /**
1978 * idpf_tx_buf_hw_update - Store the new tail value
1979 * @tx_q: queue to bump
1980 * @val: new tail index
1981 * @xmit_more: more skb's pending
1982 *
1983 * The naming here is special in that 'hw' signals that this function is about
1984 * to do a register write to update our queue status. We know this can only
1985 * mean tail here as HW should be owning head for TX.
1986 */
idpf_tx_buf_hw_update(struct idpf_tx_queue * tx_q,u32 val,bool xmit_more)1987 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
1988 bool xmit_more)
1989 {
1990 struct netdev_queue *nq;
1991
1992 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
1993 tx_q->next_to_use = val;
1994
1995 /* Force memory writes to complete before letting h/w
1996 * know there are new descriptors to fetch. (Only
1997 * applicable for weak-ordered memory model archs,
1998 * such as IA-64).
1999 */
2000 wmb();
2001
2002 /* notify HW of packet */
2003 if (netif_xmit_stopped(nq) || !xmit_more)
2004 writel(val, tx_q->tail);
2005 }
2006
2007 /**
2008 * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
2009 * @txq: queue to send buffer on
2010 * @skb: send buffer
2011 * @bufs_needed: (output) number of buffers needed for this skb.
2012 *
2013 * Return: number of data descriptors and buffers needed for this skb.
2014 */
idpf_tx_res_count_required(struct idpf_tx_queue * txq,struct sk_buff * skb,u32 * bufs_needed)2015 unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
2016 struct sk_buff *skb,
2017 u32 *bufs_needed)
2018 {
2019 const struct skb_shared_info *shinfo;
2020 unsigned int count = 0, i;
2021
2022 count += !!skb_headlen(skb);
2023
2024 if (!skb_is_nonlinear(skb))
2025 return count;
2026
2027 shinfo = skb_shinfo(skb);
2028 *bufs_needed += shinfo->nr_frags;
2029 for (i = 0; i < shinfo->nr_frags; i++) {
2030 unsigned int size;
2031
2032 size = skb_frag_size(&shinfo->frags[i]);
2033
2034 /* We only need to use the idpf_size_to_txd_count check if the
2035 * fragment is going to span multiple descriptors,
2036 * i.e. size >= 16K.
2037 */
2038 if (size >= SZ_16K)
2039 count += idpf_size_to_txd_count(size);
2040 else
2041 count++;
2042 }
2043
2044 if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2045 if (__skb_linearize(skb))
2046 return 0;
2047
2048 count = idpf_size_to_txd_count(skb->len);
2049 u64_stats_update_begin(&txq->stats_sync);
2050 u64_stats_inc(&txq->q_stats.linearize);
2051 u64_stats_update_end(&txq->stats_sync);
2052 }
2053
2054 return count;
2055 }
2056
2057 /**
2058 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2059 * @txq: the tx ring to wrap
2060 * @ntu: ring index to bump
2061 */
idpf_tx_splitq_bump_ntu(struct idpf_tx_queue * txq,u16 ntu)2062 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2063 {
2064 ntu++;
2065
2066 if (ntu == txq->desc_count)
2067 ntu = 0;
2068
2069 return ntu;
2070 }
2071
2072 /**
2073 * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
2074 * @refillq: refill queue to get buffer ID from
2075 * @buf_id: return buffer ID
2076 *
2077 * Return: true if a buffer ID was found, false if not
2078 */
idpf_tx_get_free_buf_id(struct idpf_sw_queue * refillq,u32 * buf_id)2079 static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
2080 u32 *buf_id)
2081 {
2082 u32 ntc = refillq->next_to_clean;
2083 u32 refill_desc;
2084
2085 refill_desc = refillq->ring[ntc];
2086
2087 if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
2088 !!(refill_desc & IDPF_RFL_BI_GEN_M)))
2089 return false;
2090
2091 *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
2092
2093 if (unlikely(++ntc == refillq->desc_count)) {
2094 idpf_queue_change(RFL_GEN_CHK, refillq);
2095 ntc = 0;
2096 }
2097
2098 refillq->next_to_clean = ntc;
2099
2100 return true;
2101 }
2102
2103 /**
2104 * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2105 * @txq: Tx queue to unwind
2106 * @params: pointer to splitq params struct
2107 * @first: starting buffer for packet to unmap
2108 */
idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue * txq,struct idpf_tx_splitq_params * params,struct idpf_tx_buf * first)2109 static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2110 struct idpf_tx_splitq_params *params,
2111 struct idpf_tx_buf *first)
2112 {
2113 struct idpf_sw_queue *refillq = txq->refillq;
2114 struct libeth_sq_napi_stats ss = { };
2115 struct idpf_tx_buf *tx_buf = first;
2116 struct libeth_cq_pp cp = {
2117 .dev = txq->dev,
2118 .ss = &ss,
2119 };
2120
2121 u64_stats_update_begin(&txq->stats_sync);
2122 u64_stats_inc(&txq->q_stats.dma_map_errs);
2123 u64_stats_update_end(&txq->stats_sync);
2124
2125 libeth_tx_complete(tx_buf, &cp);
2126 while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
2127 tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
2128 libeth_tx_complete(tx_buf, &cp);
2129 }
2130
2131 /* Update tail in case netdev_xmit_more was previously true. */
2132 idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2133
2134 if (!refillq)
2135 return;
2136
2137 /* Restore refillq state to avoid leaking tags. */
2138 if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
2139 idpf_queue_change(RFL_GEN_CHK, refillq);
2140 refillq->next_to_clean = params->prev_refill_ntc;
2141 }
2142
2143 /**
2144 * idpf_tx_splitq_map - Build the Tx flex descriptor
2145 * @tx_q: queue to send buffer on
2146 * @params: pointer to splitq params struct
2147 * @first: first buffer info buffer to use
2148 *
2149 * This function loops over the skb data pointed to by *first
2150 * and gets a physical address for each memory location and programs
2151 * it and the length into the transmit flex descriptor.
2152 */
idpf_tx_splitq_map(struct idpf_tx_queue * tx_q,struct idpf_tx_splitq_params * params,struct idpf_tx_buf * first)2153 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2154 struct idpf_tx_splitq_params *params,
2155 struct idpf_tx_buf *first)
2156 {
2157 union idpf_tx_flex_desc *tx_desc;
2158 unsigned int data_len, size;
2159 struct idpf_tx_buf *tx_buf;
2160 u16 i = tx_q->next_to_use;
2161 struct netdev_queue *nq;
2162 struct sk_buff *skb;
2163 skb_frag_t *frag;
2164 u32 next_buf_id;
2165 u16 td_cmd = 0;
2166 dma_addr_t dma;
2167
2168 skb = first->skb;
2169
2170 td_cmd = params->offload.td_cmd;
2171
2172 data_len = skb->data_len;
2173 size = skb_headlen(skb);
2174
2175 tx_desc = &tx_q->flex_tx[i];
2176
2177 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2178
2179 tx_buf = first;
2180 first->nr_frags = 0;
2181
2182 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2183 unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2184
2185 if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
2186 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2187 return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2188 first);
2189 }
2190
2191 first->nr_frags++;
2192 tx_buf->type = LIBETH_SQE_FRAG;
2193
2194 /* record length, and DMA address */
2195 dma_unmap_len_set(tx_buf, len, size);
2196 dma_unmap_addr_set(tx_buf, dma, dma);
2197
2198 /* buf_addr is in same location for both desc types */
2199 tx_desc->q.buf_addr = cpu_to_le64(dma);
2200
2201 /* The stack can send us fragments that are too large for a
2202 * single descriptor i.e. frag size > 16K-1. We will need to
2203 * split the fragment across multiple descriptors in this case.
2204 * To adhere to HW alignment restrictions, the fragment needs
2205 * to be split such that the first chunk ends on a 4K boundary
2206 * and all subsequent chunks start on a 4K boundary. We still
2207 * want to send as much data as possible though, so our
2208 * intermediate descriptor chunk size will be 12K.
2209 *
2210 * For example, consider a 32K fragment mapped to DMA addr 2600.
2211 * ------------------------------------------------------------
2212 * | frag_size = 32K |
2213 * ------------------------------------------------------------
2214 * |2600 |16384 |28672
2215 *
2216 * 3 descriptors will be used for this fragment. The HW expects
2217 * the descriptors to contain the following:
2218 * ------------------------------------------------------------
2219 * | size = 13784 | size = 12K | size = 6696 |
2220 * | dma = 2600 | dma = 16384 | dma = 28672 |
2221 * ------------------------------------------------------------
2222 *
2223 * We need to first adjust the max_data for the first chunk so
2224 * that it ends on a 4K boundary. By negating the value of the
2225 * DMA address and taking only the low order bits, we're
2226 * effectively calculating
2227 * 4K - (DMA addr lower order bits) =
2228 * bytes to next boundary.
2229 *
2230 * Add that to our base aligned max_data (12K) and we have
2231 * our first chunk size. In the example above,
2232 * 13784 = 12K + (4096-2600)
2233 *
2234 * After guaranteeing the first chunk ends on a 4K boundary, we
2235 * will give the intermediate descriptors 12K chunks and
2236 * whatever is left to the final descriptor. This ensures that
2237 * all descriptors used for the remaining chunks of the
2238 * fragment start on a 4K boundary and we use as few
2239 * descriptors as possible.
2240 */
2241 max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2242 while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2243 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2244 max_data);
2245
2246 if (unlikely(++i == tx_q->desc_count)) {
2247 tx_desc = &tx_q->flex_tx[0];
2248 i = 0;
2249 } else {
2250 tx_desc++;
2251 }
2252
2253 /* Adjust the DMA offset and the remaining size of the
2254 * fragment. On the first iteration of this loop,
2255 * max_data will be >= 12K and <= 16K-1. On any
2256 * subsequent iteration of this loop, max_data will
2257 * always be 12K.
2258 */
2259 dma += max_data;
2260 size -= max_data;
2261
2262 /* Reset max_data since remaining chunks will be 12K
2263 * at most
2264 */
2265 max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2266
2267 /* buf_addr is in same location for both desc types */
2268 tx_desc->q.buf_addr = cpu_to_le64(dma);
2269 }
2270
2271 if (!data_len)
2272 break;
2273
2274 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2275
2276 if (unlikely(++i == tx_q->desc_count)) {
2277 tx_desc = &tx_q->flex_tx[0];
2278 i = 0;
2279 } else {
2280 tx_desc++;
2281 }
2282
2283 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2284 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
2285 &next_buf_id))) {
2286 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2287 return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2288 first);
2289 }
2290 } else {
2291 next_buf_id = i;
2292 }
2293 idpf_tx_buf_next(tx_buf) = next_buf_id;
2294 tx_buf = &tx_q->tx_buf[next_buf_id];
2295
2296 size = skb_frag_size(frag);
2297 data_len -= size;
2298
2299 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2300 DMA_TO_DEVICE);
2301 }
2302
2303 /* record SW timestamp if HW timestamp is not available */
2304 skb_tx_timestamp(skb);
2305
2306 first->type = LIBETH_SQE_SKB;
2307
2308 /* write last descriptor with RS and EOP bits */
2309 first->rs_idx = i;
2310 idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
2311 td_cmd |= params->eop_cmd;
2312 idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2313 i = idpf_tx_splitq_bump_ntu(tx_q, i);
2314
2315 tx_q->txq_grp->num_completions_pending++;
2316
2317 /* record bytecount for BQL */
2318 nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2319 netdev_tx_sent_queue(nq, first->bytes);
2320
2321 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2322 }
2323
2324 /**
2325 * idpf_tso - computes mss and TSO length to prepare for TSO
2326 * @skb: pointer to skb
2327 * @off: pointer to struct that holds offload parameters
2328 *
2329 * Returns error (negative) if TSO was requested but cannot be applied to the
2330 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2331 */
idpf_tso(struct sk_buff * skb,struct idpf_tx_offload_params * off)2332 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2333 {
2334 const struct skb_shared_info *shinfo;
2335 union {
2336 struct iphdr *v4;
2337 struct ipv6hdr *v6;
2338 unsigned char *hdr;
2339 } ip;
2340 union {
2341 struct tcphdr *tcp;
2342 struct udphdr *udp;
2343 unsigned char *hdr;
2344 } l4;
2345 u32 paylen, l4_start;
2346 int err;
2347
2348 if (!skb_is_gso(skb))
2349 return 0;
2350
2351 err = skb_cow_head(skb, 0);
2352 if (err < 0)
2353 return err;
2354
2355 shinfo = skb_shinfo(skb);
2356
2357 ip.hdr = skb_network_header(skb);
2358 l4.hdr = skb_transport_header(skb);
2359
2360 /* initialize outer IP header fields */
2361 if (ip.v4->version == 4) {
2362 ip.v4->tot_len = 0;
2363 ip.v4->check = 0;
2364 } else if (ip.v6->version == 6) {
2365 ip.v6->payload_len = 0;
2366 }
2367
2368 l4_start = skb_transport_offset(skb);
2369
2370 /* remove payload length from checksum */
2371 paylen = skb->len - l4_start;
2372
2373 switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2374 case SKB_GSO_TCPV4:
2375 case SKB_GSO_TCPV6:
2376 csum_replace_by_diff(&l4.tcp->check,
2377 (__force __wsum)htonl(paylen));
2378 off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2379 break;
2380 case SKB_GSO_UDP_L4:
2381 csum_replace_by_diff(&l4.udp->check,
2382 (__force __wsum)htonl(paylen));
2383 /* compute length of segmentation header */
2384 off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2385 l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2386 break;
2387 default:
2388 return -EINVAL;
2389 }
2390
2391 off->tso_len = skb->len - off->tso_hdr_len;
2392 off->mss = shinfo->gso_size;
2393 off->tso_segs = shinfo->gso_segs;
2394
2395 off->tx_flags |= IDPF_TX_FLAGS_TSO;
2396
2397 return 1;
2398 }
2399
2400 /**
2401 * __idpf_chk_linearize - Check skb is not using too many buffers
2402 * @skb: send buffer
2403 * @max_bufs: maximum number of buffers
2404 *
2405 * For TSO we need to count the TSO header and segment payload separately. As
2406 * such we need to check cases where we have max_bufs-1 fragments or more as we
2407 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2408 * for the segment payload in the first descriptor, and another max_buf-1 for
2409 * the fragments.
2410 */
__idpf_chk_linearize(struct sk_buff * skb,unsigned int max_bufs)2411 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2412 {
2413 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2414 const skb_frag_t *frag, *stale;
2415 int nr_frags, sum;
2416
2417 /* no need to check if number of frags is less than max_bufs - 1 */
2418 nr_frags = shinfo->nr_frags;
2419 if (nr_frags < (max_bufs - 1))
2420 return false;
2421
2422 /* We need to walk through the list and validate that each group
2423 * of max_bufs-2 fragments totals at least gso_size.
2424 */
2425 nr_frags -= max_bufs - 2;
2426 frag = &shinfo->frags[0];
2427
2428 /* Initialize size to the negative value of gso_size minus 1. We use
2429 * this as the worst case scenario in which the frag ahead of us only
2430 * provides one byte which is why we are limited to max_bufs-2
2431 * descriptors for a single transmit as the header and previous
2432 * fragment are already consuming 2 descriptors.
2433 */
2434 sum = 1 - shinfo->gso_size;
2435
2436 /* Add size of frags 0 through 4 to create our initial sum */
2437 sum += skb_frag_size(frag++);
2438 sum += skb_frag_size(frag++);
2439 sum += skb_frag_size(frag++);
2440 sum += skb_frag_size(frag++);
2441 sum += skb_frag_size(frag++);
2442
2443 /* Walk through fragments adding latest fragment, testing it, and
2444 * then removing stale fragments from the sum.
2445 */
2446 for (stale = &shinfo->frags[0];; stale++) {
2447 int stale_size = skb_frag_size(stale);
2448
2449 sum += skb_frag_size(frag++);
2450
2451 /* The stale fragment may present us with a smaller
2452 * descriptor than the actual fragment size. To account
2453 * for that we need to remove all the data on the front and
2454 * figure out what the remainder would be in the last
2455 * descriptor associated with the fragment.
2456 */
2457 if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2458 int align_pad = -(skb_frag_off(stale)) &
2459 (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2460
2461 sum -= align_pad;
2462 stale_size -= align_pad;
2463
2464 do {
2465 sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2466 stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2467 } while (stale_size > IDPF_TX_MAX_DESC_DATA);
2468 }
2469
2470 /* if sum is negative we failed to make sufficient progress */
2471 if (sum < 0)
2472 return true;
2473
2474 if (!nr_frags--)
2475 break;
2476
2477 sum -= stale_size;
2478 }
2479
2480 return false;
2481 }
2482
2483 /**
2484 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2485 * @skb: send buffer
2486 * @max_bufs: maximum scatter gather buffers for single packet
2487 * @count: number of buffers this packet needs
2488 *
2489 * Make sure we don't exceed maximum scatter gather buffers for a single
2490 * packet. We have to do some special checking around the boundary (max_bufs-1)
2491 * if TSO is on since we need count the TSO header and payload separately.
2492 * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2493 * header, 1 for segment payload, and then 7 for the fragments.
2494 */
idpf_chk_linearize(struct sk_buff * skb,unsigned int max_bufs,unsigned int count)2495 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2496 unsigned int count)
2497 {
2498 if (likely(count < max_bufs))
2499 return false;
2500 if (skb_is_gso(skb))
2501 return __idpf_chk_linearize(skb, max_bufs);
2502
2503 return count > max_bufs;
2504 }
2505
2506 /**
2507 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2508 * @txq: queue to put context descriptor on
2509 *
2510 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2511 * ring entry to reflect that this index is a context descriptor
2512 */
2513 static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue * txq)2514 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2515 {
2516 union idpf_flex_tx_ctx_desc *desc;
2517 int i = txq->next_to_use;
2518
2519 /* grab the next descriptor */
2520 desc = &txq->flex_ctx[i];
2521 txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2522
2523 return desc;
2524 }
2525
2526 /**
2527 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2528 * @tx_q: queue to send buffer on
2529 * @skb: pointer to skb
2530 */
idpf_tx_drop_skb(struct idpf_tx_queue * tx_q,struct sk_buff * skb)2531 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2532 {
2533 u64_stats_update_begin(&tx_q->stats_sync);
2534 u64_stats_inc(&tx_q->q_stats.skb_drops);
2535 u64_stats_update_end(&tx_q->stats_sync);
2536
2537 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2538
2539 dev_kfree_skb(skb);
2540
2541 return NETDEV_TX_OK;
2542 }
2543
2544 #if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
2545 /**
2546 * idpf_tx_tstamp - set up context descriptor for hardware timestamp
2547 * @tx_q: queue to send buffer on
2548 * @skb: pointer to the SKB we're sending
2549 * @off: pointer to the offload struct
2550 *
2551 * Return: Positive index number on success, negative otherwise.
2552 */
idpf_tx_tstamp(struct idpf_tx_queue * tx_q,struct sk_buff * skb,struct idpf_tx_offload_params * off)2553 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2554 struct idpf_tx_offload_params *off)
2555 {
2556 int err, idx;
2557
2558 /* only timestamp the outbound packet if the user has requested it */
2559 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2560 return -1;
2561
2562 if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
2563 return -1;
2564
2565 /* Tx timestamps cannot be sampled when doing TSO */
2566 if (off->tx_flags & IDPF_TX_FLAGS_TSO)
2567 return -1;
2568
2569 /* Grab an open timestamp slot */
2570 err = idpf_ptp_request_ts(tx_q, skb, &idx);
2571 if (err) {
2572 u64_stats_update_begin(&tx_q->stats_sync);
2573 u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
2574 u64_stats_update_end(&tx_q->stats_sync);
2575
2576 return -1;
2577 }
2578
2579 off->tx_flags |= IDPF_TX_FLAGS_TSYN;
2580
2581 return idx;
2582 }
2583
2584 /**
2585 * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
2586 * PHY Tx timestamp
2587 * @ctx_desc: Context descriptor
2588 * @idx: Index of the Tx timestamp latch
2589 */
idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc * ctx_desc,u32 idx)2590 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2591 u32 idx)
2592 {
2593 ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
2594 IDPF_TX_CTX_DTYPE_M) |
2595 le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
2596 IDPF_TX_CTX_CMD_M) |
2597 le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
2598 }
2599 #else /* CONFIG_PTP_1588_CLOCK */
idpf_tx_tstamp(struct idpf_tx_queue * tx_q,struct sk_buff * skb,struct idpf_tx_offload_params * off)2600 static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
2601 struct idpf_tx_offload_params *off)
2602 {
2603 return -1;
2604 }
2605
idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc * ctx_desc,u32 idx)2606 static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
2607 u32 idx)
2608 { }
2609 #endif /* CONFIG_PTP_1588_CLOCK */
2610
2611 /**
2612 * idpf_tx_splitq_need_re - check whether RE bit needs to be set
2613 * @tx_q: pointer to Tx queue
2614 *
2615 * Return: true if RE bit needs to be set, false otherwise
2616 */
idpf_tx_splitq_need_re(struct idpf_tx_queue * tx_q)2617 static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
2618 {
2619 int gap = tx_q->next_to_use - tx_q->last_re;
2620
2621 gap += (gap < 0) ? tx_q->desc_count : 0;
2622
2623 return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
2624 }
2625
2626 /**
2627 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2628 * @skb: send buffer
2629 * @tx_q: queue to send buffer on
2630 *
2631 * Returns NETDEV_TX_OK if sent, else an error code
2632 */
idpf_tx_splitq_frame(struct sk_buff * skb,struct idpf_tx_queue * tx_q)2633 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2634 struct idpf_tx_queue *tx_q)
2635 {
2636 struct idpf_tx_splitq_params tx_params = {
2637 .prev_ntu = tx_q->next_to_use,
2638 };
2639 union idpf_flex_tx_ctx_desc *ctx_desc;
2640 struct idpf_tx_buf *first;
2641 u32 count, buf_count = 1;
2642 int tso, idx;
2643 u32 buf_id;
2644
2645 count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
2646 if (unlikely(!count))
2647 return idpf_tx_drop_skb(tx_q, skb);
2648
2649 tso = idpf_tso(skb, &tx_params.offload);
2650 if (unlikely(tso < 0))
2651 return idpf_tx_drop_skb(tx_q, skb);
2652
2653 /* Check for splitq specific TX resources */
2654 count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2655 if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
2656 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2657
2658 return NETDEV_TX_BUSY;
2659 }
2660
2661 if (tso) {
2662 /* If tso is needed, set up context desc */
2663 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2664
2665 ctx_desc->tso.qw1.cmd_dtype =
2666 cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2667 IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2668 ctx_desc->tso.qw0.flex_tlen =
2669 cpu_to_le32(tx_params.offload.tso_len &
2670 IDPF_TXD_FLEX_CTX_TLEN_M);
2671 ctx_desc->tso.qw0.mss_rt =
2672 cpu_to_le16(tx_params.offload.mss &
2673 IDPF_TXD_FLEX_CTX_MSS_RT_M);
2674 ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2675
2676 u64_stats_update_begin(&tx_q->stats_sync);
2677 u64_stats_inc(&tx_q->q_stats.lso_pkts);
2678 u64_stats_update_end(&tx_q->stats_sync);
2679 }
2680
2681 idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
2682 if (idx != -1) {
2683 ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
2684 idpf_tx_set_tstamp_desc(ctx_desc, idx);
2685 }
2686
2687 if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2688 struct idpf_sw_queue *refillq = tx_q->refillq;
2689
2690 /* Save refillq state in case of a packet rollback. Otherwise,
2691 * the tags will be leaked since they will be popped from the
2692 * refillq but never reposted during cleaning.
2693 */
2694 tx_params.prev_refill_gen =
2695 idpf_queue_has(RFL_GEN_CHK, refillq);
2696 tx_params.prev_refill_ntc = refillq->next_to_clean;
2697
2698 if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
2699 &buf_id))) {
2700 if (tx_params.prev_refill_gen !=
2701 idpf_queue_has(RFL_GEN_CHK, refillq))
2702 idpf_queue_change(RFL_GEN_CHK, refillq);
2703 refillq->next_to_clean = tx_params.prev_refill_ntc;
2704
2705 tx_q->next_to_use = tx_params.prev_ntu;
2706 return idpf_tx_drop_skb(tx_q, skb);
2707 }
2708 tx_params.compl_tag = buf_id;
2709
2710 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2711 tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2712 /* Set the RE bit to periodically "clean" the descriptor ring.
2713 * MIN_GAP is set to MIN_RING size to ensure it will be set at
2714 * least once each time around the ring.
2715 */
2716 if (idpf_tx_splitq_need_re(tx_q)) {
2717 tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2718 tx_q->txq_grp->num_completions_pending++;
2719 tx_q->last_re = tx_q->next_to_use;
2720 }
2721
2722 if (skb->ip_summed == CHECKSUM_PARTIAL)
2723 tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2724
2725 } else {
2726 buf_id = tx_q->next_to_use;
2727
2728 tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2729 tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2730
2731 if (skb->ip_summed == CHECKSUM_PARTIAL)
2732 tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2733 }
2734
2735 first = &tx_q->tx_buf[buf_id];
2736 first->skb = skb;
2737
2738 if (tso) {
2739 first->packets = tx_params.offload.tso_segs;
2740 first->bytes = skb->len +
2741 ((first->packets - 1) * tx_params.offload.tso_hdr_len);
2742 } else {
2743 first->packets = 1;
2744 first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
2745 }
2746
2747 idpf_tx_splitq_map(tx_q, &tx_params, first);
2748
2749 return NETDEV_TX_OK;
2750 }
2751
2752 /**
2753 * idpf_tx_start - Selects the right Tx queue to send buffer
2754 * @skb: send buffer
2755 * @netdev: network interface device structure
2756 *
2757 * Returns NETDEV_TX_OK if sent, else an error code
2758 */
idpf_tx_start(struct sk_buff * skb,struct net_device * netdev)2759 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
2760 {
2761 struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2762 struct idpf_tx_queue *tx_q;
2763
2764 if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2765 dev_kfree_skb_any(skb);
2766
2767 return NETDEV_TX_OK;
2768 }
2769
2770 tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2771
2772 /* hardware can't handle really short frames, hardware padding works
2773 * beyond this point
2774 */
2775 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2776 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2777
2778 return NETDEV_TX_OK;
2779 }
2780
2781 if (idpf_is_queue_model_split(vport->txq_model))
2782 return idpf_tx_splitq_frame(skb, tx_q);
2783 else
2784 return idpf_tx_singleq_frame(skb, tx_q);
2785 }
2786
2787 /**
2788 * idpf_rx_hash - set the hash value in the skb
2789 * @rxq: Rx descriptor ring packet is being transacted on
2790 * @skb: pointer to current skb being populated
2791 * @rx_desc: Receive descriptor
2792 * @decoded: Decoded Rx packet type related fields
2793 */
2794 static void
idpf_rx_hash(const struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)2795 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2796 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2797 struct libeth_rx_pt decoded)
2798 {
2799 u32 hash;
2800
2801 if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2802 return;
2803
2804 hash = le16_to_cpu(rx_desc->hash1) |
2805 (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2806 (rx_desc->hash3 << 24);
2807
2808 libeth_rx_pt_set_hash(skb, hash, decoded);
2809 }
2810
2811 /**
2812 * idpf_rx_csum - Indicate in skb if checksum is good
2813 * @rxq: Rx descriptor ring packet is being transacted on
2814 * @skb: pointer to current skb being populated
2815 * @csum_bits: checksum fields extracted from the descriptor
2816 * @decoded: Decoded Rx packet type related fields
2817 *
2818 * skb->protocol must be set before this function is called
2819 */
idpf_rx_csum(struct idpf_rx_queue * rxq,struct sk_buff * skb,struct libeth_rx_csum csum_bits,struct libeth_rx_pt decoded)2820 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2821 struct libeth_rx_csum csum_bits,
2822 struct libeth_rx_pt decoded)
2823 {
2824 bool ipv4, ipv6;
2825
2826 /* check if Rx checksum is enabled */
2827 if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2828 return;
2829
2830 /* check if HW has decoded the packet and checksum */
2831 if (unlikely(!csum_bits.l3l4p))
2832 return;
2833
2834 ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2835 ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2836
2837 if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2838 goto checksum_fail;
2839
2840 if (unlikely(ipv6 && csum_bits.ipv6exadd))
2841 return;
2842
2843 /* check for L4 errors and handle packets that were not able to be
2844 * checksummed
2845 */
2846 if (unlikely(csum_bits.l4e))
2847 goto checksum_fail;
2848
2849 if (!csum_bits.raw_csum_valid ||
2850 decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
2851 skb->ip_summed = CHECKSUM_UNNECESSARY;
2852 return;
2853 }
2854
2855 skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2856 skb->ip_summed = CHECKSUM_COMPLETE;
2857
2858 return;
2859
2860 checksum_fail:
2861 u64_stats_update_begin(&rxq->stats_sync);
2862 u64_stats_inc(&rxq->q_stats.hw_csum_err);
2863 u64_stats_update_end(&rxq->stats_sync);
2864 }
2865
2866 /**
2867 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2868 * @rx_desc: receive descriptor
2869 *
2870 * Return: parsed checksum status.
2871 **/
2872 static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)2873 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2874 {
2875 struct libeth_rx_csum csum = { };
2876 u8 qword0, qword1;
2877
2878 qword0 = rx_desc->status_err0_qw0;
2879 qword1 = rx_desc->status_err0_qw1;
2880
2881 csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2882 qword1);
2883 csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2884 qword1);
2885 csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2886 qword1);
2887 csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2888 qword1);
2889 csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2890 qword0);
2891 csum.raw_csum_valid =
2892 !le16_get_bits(rx_desc->ptype_err_fflags0,
2893 VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
2894 csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
2895
2896 return csum;
2897 }
2898
2899 /**
2900 * idpf_rx_rsc - Set the RSC fields in the skb
2901 * @rxq : Rx descriptor ring packet is being transacted on
2902 * @skb : pointer to current skb being populated
2903 * @rx_desc: Receive descriptor
2904 * @decoded: Decoded Rx packet type related fields
2905 *
2906 * Return 0 on success and error code on failure
2907 *
2908 * Populate the skb fields with the total number of RSC segments, RSC payload
2909 * length and packet type.
2910 */
idpf_rx_rsc(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)2911 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2912 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2913 struct libeth_rx_pt decoded)
2914 {
2915 u16 rsc_segments, rsc_seg_len;
2916 bool ipv4, ipv6;
2917 int len;
2918
2919 if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
2920 LIBETH_RX_PT_OUTER_L2))
2921 return -EINVAL;
2922
2923 rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
2924 if (unlikely(!rsc_seg_len))
2925 return -EINVAL;
2926
2927 ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2928 ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2929
2930 if (unlikely(!(ipv4 ^ ipv6)))
2931 return -EINVAL;
2932
2933 rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
2934
2935 NAPI_GRO_CB(skb)->count = rsc_segments;
2936 skb_shinfo(skb)->gso_size = rsc_seg_len;
2937
2938 skb_reset_network_header(skb);
2939
2940 if (ipv4) {
2941 struct iphdr *ipv4h = ip_hdr(skb);
2942
2943 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2944
2945 /* Reset and set transport header offset in skb */
2946 skb_set_transport_header(skb, sizeof(struct iphdr));
2947 len = skb->len - skb_transport_offset(skb);
2948
2949 /* Compute the TCP pseudo header checksum*/
2950 tcp_hdr(skb)->check =
2951 ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
2952 } else {
2953 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2954
2955 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2956 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
2957 len = skb->len - skb_transport_offset(skb);
2958 tcp_hdr(skb)->check =
2959 ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
2960 }
2961
2962 tcp_gro_complete(skb);
2963
2964 u64_stats_update_begin(&rxq->stats_sync);
2965 u64_stats_inc(&rxq->q_stats.rsc_pkts);
2966 u64_stats_update_end(&rxq->stats_sync);
2967
2968 return 0;
2969 }
2970
2971 /**
2972 * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
2973 * @rxq: pointer to the rx queue that receives the timestamp
2974 * @rx_desc: pointer to rx descriptor containing timestamp
2975 * @skb: skb to put timestamp in
2976 */
2977 static void
idpf_rx_hwtstamp(const struct idpf_rx_queue * rxq,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct sk_buff * skb)2978 idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
2979 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2980 struct sk_buff *skb)
2981 {
2982 u64 cached_time, ts_ns;
2983 u32 ts_high;
2984
2985 if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
2986 return;
2987
2988 cached_time = READ_ONCE(rxq->cached_phc_time);
2989
2990 ts_high = le32_to_cpu(rx_desc->ts_high);
2991 ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
2992
2993 *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
2994 .hwtstamp = ns_to_ktime(ts_ns),
2995 };
2996 }
2997
2998 /**
2999 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3000 * @rxq: Rx descriptor ring packet is being transacted on
3001 * @skb: pointer to current skb being populated
3002 * @rx_desc: Receive descriptor
3003 *
3004 * This function checks the ring, descriptor, and packet information in
3005 * order to populate the hash, checksum, protocol, and
3006 * other fields within the skb.
3007 */
3008 static int
idpf_rx_process_skb_fields(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3009 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3010 const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3011 {
3012 struct libeth_rx_csum csum_bits;
3013 struct libeth_rx_pt decoded;
3014 u16 rx_ptype;
3015
3016 rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3017 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3018 decoded = rxq->rx_ptype_lkup[rx_ptype];
3019
3020 /* process RSS/hash */
3021 idpf_rx_hash(rxq, skb, rx_desc, decoded);
3022
3023 if (idpf_queue_has(PTP, rxq))
3024 idpf_rx_hwtstamp(rxq, rx_desc, skb);
3025
3026 skb->protocol = eth_type_trans(skb, rxq->netdev);
3027 skb_record_rx_queue(skb, rxq->idx);
3028
3029 if (le16_get_bits(rx_desc->hdrlen_flags,
3030 VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3031 return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3032
3033 csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3034 idpf_rx_csum(rxq, skb, csum_bits, decoded);
3035
3036 return 0;
3037 }
3038
3039 /**
3040 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3041 * @rx_buf: buffer containing page to add
3042 * @skb: sk_buff to place the data into
3043 * @size: packet length from rx_desc
3044 *
3045 * This function will add the data contained in rx_buf->page to the skb.
3046 * It will just attach the page as a frag to the skb.
3047 * The function will then update the page offset.
3048 */
idpf_rx_add_frag(struct idpf_rx_buf * rx_buf,struct sk_buff * skb,unsigned int size)3049 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3050 unsigned int size)
3051 {
3052 u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
3053
3054 skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
3055 rx_buf->offset + hr, size, rx_buf->truesize);
3056 }
3057
3058 /**
3059 * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3060 * @hdr: Rx buffer for the headers
3061 * @buf: Rx buffer for the payload
3062 * @data_len: number of bytes received to the payload buffer
3063 *
3064 * When a header buffer overflow occurs or the HW was unable do parse the
3065 * packet type to perform header split, the whole frame gets placed to the
3066 * payload buffer. We can't build a valid skb around a payload buffer when
3067 * the header split is active since it doesn't reserve any head- or tailroom.
3068 * In that case, copy either the whole frame when it's short or just the
3069 * Ethernet header to the header buffer to be able to build an skb and adjust
3070 * the data offset in the payload buffer, IOW emulate the header split.
3071 *
3072 * Return: number of bytes copied to the header buffer.
3073 */
idpf_rx_hsplit_wa(const struct libeth_fqe * hdr,struct libeth_fqe * buf,u32 data_len)3074 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3075 struct libeth_fqe *buf, u32 data_len)
3076 {
3077 u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3078 struct page *hdr_page, *buf_page;
3079 const void *src;
3080 void *dst;
3081
3082 if (unlikely(netmem_is_net_iov(buf->netmem)) ||
3083 !libeth_rx_sync_for_cpu(buf, copy))
3084 return 0;
3085
3086 hdr_page = __netmem_to_page(hdr->netmem);
3087 buf_page = __netmem_to_page(buf->netmem);
3088 dst = page_address(hdr_page) + hdr->offset +
3089 pp_page_to_nmdesc(hdr_page)->pp->p.offset;
3090 src = page_address(buf_page) + buf->offset +
3091 pp_page_to_nmdesc(buf_page)->pp->p.offset;
3092
3093 memcpy(dst, src, LARGEST_ALIGN(copy));
3094 buf->offset += copy;
3095
3096 return copy;
3097 }
3098
3099 /**
3100 * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3101 * @buf: Rx buffer to pull data from
3102 * @size: the length of the packet
3103 *
3104 * This function allocates an skb. It then populates it with the page data from
3105 * the current receive descriptor, taking care to set up the skb correctly.
3106 */
idpf_rx_build_skb(const struct libeth_fqe * buf,u32 size)3107 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
3108 {
3109 struct page *buf_page = __netmem_to_page(buf->netmem);
3110 u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
3111 struct sk_buff *skb;
3112 void *va;
3113
3114 va = page_address(buf_page) + buf->offset;
3115 prefetch(va + hr);
3116
3117 skb = napi_build_skb(va, buf->truesize);
3118 if (unlikely(!skb))
3119 return NULL;
3120
3121 skb_mark_for_recycle(skb);
3122
3123 skb_reserve(skb, hr);
3124 __skb_put(skb, size);
3125
3126 return skb;
3127 }
3128
3129 /**
3130 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3131 * status and error fields
3132 * @stat_err_field: field from descriptor to test bits in
3133 * @stat_err_bits: value to mask
3134 *
3135 */
idpf_rx_splitq_test_staterr(const u8 stat_err_field,const u8 stat_err_bits)3136 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3137 const u8 stat_err_bits)
3138 {
3139 return !!(stat_err_field & stat_err_bits);
3140 }
3141
3142 /**
3143 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3144 * @rx_desc: Rx descriptor for current buffer
3145 *
3146 * If the buffer is an EOP buffer, this function exits returning true,
3147 * otherwise return false indicating that this is in fact a non-EOP buffer.
3148 */
idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3149 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3150 {
3151 /* if we are the last buffer then there is nothing else to do */
3152 return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3153 IDPF_RXD_EOF_SPLITQ));
3154 }
3155
3156 /**
3157 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3158 * @rxq: Rx descriptor queue to retrieve receive buffer queue
3159 * @budget: Total limit on number of packets to process
3160 *
3161 * This function provides a "bounce buffer" approach to Rx interrupt
3162 * processing. The advantage to this is that on systems that have
3163 * expensive overhead for IOMMU access this provides a means of avoiding
3164 * it by maintaining the mapping of the page to the system.
3165 *
3166 * Returns amount of work completed
3167 */
idpf_rx_splitq_clean(struct idpf_rx_queue * rxq,int budget)3168 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3169 {
3170 int total_rx_bytes = 0, total_rx_pkts = 0;
3171 struct idpf_buf_queue *rx_bufq = NULL;
3172 struct sk_buff *skb = rxq->skb;
3173 u16 ntc = rxq->next_to_clean;
3174
3175 /* Process Rx packets bounded by budget */
3176 while (likely(total_rx_pkts < budget)) {
3177 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3178 struct libeth_fqe *hdr, *rx_buf = NULL;
3179 struct idpf_sw_queue *refillq = NULL;
3180 struct idpf_rxq_set *rxq_set = NULL;
3181 unsigned int pkt_len = 0;
3182 unsigned int hdr_len = 0;
3183 u16 gen_id, buf_id = 0;
3184 int bufq_id;
3185 u8 rxdid;
3186
3187 /* get the Rx desc from Rx queue based on 'next_to_clean' */
3188 rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3189
3190 /* This memory barrier is needed to keep us from reading
3191 * any other fields out of the rx_desc
3192 */
3193 dma_rmb();
3194
3195 /* if the descriptor isn't done, no work yet to do */
3196 gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3197 VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3198
3199 if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3200 break;
3201
3202 rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3203 rx_desc->rxdid_ucast);
3204 if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3205 IDPF_RX_BUMP_NTC(rxq, ntc);
3206 u64_stats_update_begin(&rxq->stats_sync);
3207 u64_stats_inc(&rxq->q_stats.bad_descs);
3208 u64_stats_update_end(&rxq->stats_sync);
3209 continue;
3210 }
3211
3212 pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3213 VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3214
3215 bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3216 VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3217
3218 rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3219 refillq = rxq_set->refillq[bufq_id];
3220
3221 /* retrieve buffer from the rxq */
3222 rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3223
3224 buf_id = le16_to_cpu(rx_desc->buf_id);
3225
3226 rx_buf = &rx_bufq->buf[buf_id];
3227
3228 if (!rx_bufq->hdr_pp)
3229 goto payload;
3230
3231 #define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3232 #define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3233 if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3234 /* If a header buffer overflow, occurs, i.e. header is
3235 * too large to fit in the header split buffer, HW will
3236 * put the entire packet, including headers, in the
3237 * data/payload buffer.
3238 */
3239 hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3240 __HDR_LEN_MASK);
3241 #undef __HDR_LEN_MASK
3242 #undef __HBO_BIT
3243
3244 hdr = &rx_bufq->hdr_buf[buf_id];
3245
3246 if (unlikely(!hdr_len && !skb)) {
3247 hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3248 /* If failed, drop both buffers by setting len to 0 */
3249 pkt_len -= hdr_len ? : pkt_len;
3250
3251 u64_stats_update_begin(&rxq->stats_sync);
3252 u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3253 u64_stats_update_end(&rxq->stats_sync);
3254 }
3255
3256 if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3257 skb = idpf_rx_build_skb(hdr, hdr_len);
3258 if (!skb)
3259 break;
3260
3261 u64_stats_update_begin(&rxq->stats_sync);
3262 u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3263 u64_stats_update_end(&rxq->stats_sync);
3264 }
3265
3266 hdr->netmem = 0;
3267
3268 payload:
3269 if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3270 goto skip_data;
3271
3272 if (skb)
3273 idpf_rx_add_frag(rx_buf, skb, pkt_len);
3274 else
3275 skb = idpf_rx_build_skb(rx_buf, pkt_len);
3276
3277 /* exit if we failed to retrieve a buffer */
3278 if (!skb)
3279 break;
3280
3281 skip_data:
3282 rx_buf->netmem = 0;
3283
3284 idpf_post_buf_refill(refillq, buf_id);
3285 IDPF_RX_BUMP_NTC(rxq, ntc);
3286
3287 /* skip if it is non EOP desc */
3288 if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3289 continue;
3290
3291 /* pad skb if needed (to make valid ethernet frame) */
3292 if (eth_skb_pad(skb)) {
3293 skb = NULL;
3294 continue;
3295 }
3296
3297 /* probably a little skewed due to removing CRC */
3298 total_rx_bytes += skb->len;
3299
3300 /* protocol */
3301 if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3302 dev_kfree_skb_any(skb);
3303 skb = NULL;
3304 continue;
3305 }
3306
3307 /* send completed skb up the stack */
3308 napi_gro_receive(rxq->napi, skb);
3309 skb = NULL;
3310
3311 /* update budget accounting */
3312 total_rx_pkts++;
3313 }
3314
3315 rxq->next_to_clean = ntc;
3316
3317 rxq->skb = skb;
3318 u64_stats_update_begin(&rxq->stats_sync);
3319 u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3320 u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3321 u64_stats_update_end(&rxq->stats_sync);
3322
3323 /* guarantee a trip back through this routine if there was a failure */
3324 return total_rx_pkts;
3325 }
3326
3327 /**
3328 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3329 * @bufq: Pointer to the buffer queue
3330 * @buf_id: buffer ID
3331 * @buf_desc: Buffer queue descriptor
3332 *
3333 * Return 0 on success and negative on failure.
3334 */
idpf_rx_update_bufq_desc(struct idpf_buf_queue * bufq,u32 buf_id,struct virtchnl2_splitq_rx_buf_desc * buf_desc)3335 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3336 struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3337 {
3338 struct libeth_fq_fp fq = {
3339 .pp = bufq->pp,
3340 .fqes = bufq->buf,
3341 .truesize = bufq->truesize,
3342 .count = bufq->desc_count,
3343 };
3344 dma_addr_t addr;
3345
3346 addr = libeth_rx_alloc(&fq, buf_id);
3347 if (addr == DMA_MAPPING_ERROR)
3348 return -ENOMEM;
3349
3350 buf_desc->pkt_addr = cpu_to_le64(addr);
3351 buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3352
3353 if (!idpf_queue_has(HSPLIT_EN, bufq))
3354 return 0;
3355
3356 fq.pp = bufq->hdr_pp;
3357 fq.fqes = bufq->hdr_buf;
3358 fq.truesize = bufq->hdr_truesize;
3359
3360 addr = libeth_rx_alloc(&fq, buf_id);
3361 if (addr == DMA_MAPPING_ERROR)
3362 return -ENOMEM;
3363
3364 buf_desc->hdr_addr = cpu_to_le64(addr);
3365
3366 return 0;
3367 }
3368
3369 /**
3370 * idpf_rx_clean_refillq - Clean refill queue buffers
3371 * @bufq: buffer queue to post buffers back to
3372 * @refillq: refill queue to clean
3373 *
3374 * This function takes care of the buffer refill management
3375 */
idpf_rx_clean_refillq(struct idpf_buf_queue * bufq,struct idpf_sw_queue * refillq)3376 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3377 struct idpf_sw_queue *refillq)
3378 {
3379 struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3380 u16 bufq_nta = bufq->next_to_alloc;
3381 u16 ntc = refillq->next_to_clean;
3382 int cleaned = 0;
3383
3384 buf_desc = &bufq->split_buf[bufq_nta];
3385
3386 /* make sure we stop at ring wrap in the unlikely case ring is full */
3387 while (likely(cleaned < refillq->desc_count)) {
3388 u32 buf_id, refill_desc = refillq->ring[ntc];
3389 bool failure;
3390
3391 if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3392 !!(refill_desc & IDPF_RFL_BI_GEN_M))
3393 break;
3394
3395 buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
3396 failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3397 if (failure)
3398 break;
3399
3400 if (unlikely(++ntc == refillq->desc_count)) {
3401 idpf_queue_change(RFL_GEN_CHK, refillq);
3402 ntc = 0;
3403 }
3404
3405 if (unlikely(++bufq_nta == bufq->desc_count)) {
3406 buf_desc = &bufq->split_buf[0];
3407 bufq_nta = 0;
3408 } else {
3409 buf_desc++;
3410 }
3411
3412 cleaned++;
3413 }
3414
3415 if (!cleaned)
3416 return;
3417
3418 /* We want to limit how many transactions on the bus we trigger with
3419 * tail writes so we only do it in strides. It's also important we
3420 * align the write to a multiple of 8 as required by HW.
3421 */
3422 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3423 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3424 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3425 IDPF_RX_BUF_POST_STRIDE));
3426
3427 /* update next to alloc since we have filled the ring */
3428 refillq->next_to_clean = ntc;
3429 bufq->next_to_alloc = bufq_nta;
3430 }
3431
3432 /**
3433 * idpf_rx_clean_refillq_all - Clean all refill queues
3434 * @bufq: buffer queue with refill queues
3435 * @nid: ID of the closest NUMA node with memory
3436 *
3437 * Iterates through all refill queues assigned to the buffer queue assigned to
3438 * this vector. Returns true if clean is complete within budget, false
3439 * otherwise.
3440 */
idpf_rx_clean_refillq_all(struct idpf_buf_queue * bufq,int nid)3441 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3442 {
3443 struct idpf_bufq_set *bufq_set;
3444 int i;
3445
3446 page_pool_nid_changed(bufq->pp, nid);
3447 if (bufq->hdr_pp)
3448 page_pool_nid_changed(bufq->hdr_pp, nid);
3449
3450 bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3451 for (i = 0; i < bufq_set->num_refillqs; i++)
3452 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3453 }
3454
3455 /**
3456 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3457 * @irq: interrupt number
3458 * @data: pointer to a q_vector
3459 *
3460 */
idpf_vport_intr_clean_queues(int __always_unused irq,void * data)3461 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3462 void *data)
3463 {
3464 struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3465
3466 q_vector->total_events++;
3467 napi_schedule(&q_vector->napi);
3468
3469 return IRQ_HANDLED;
3470 }
3471
3472 /**
3473 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3474 * @vport: virtual port structure
3475 *
3476 */
idpf_vport_intr_napi_del_all(struct idpf_vport * vport)3477 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3478 {
3479 u16 v_idx;
3480
3481 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3482 netif_napi_del(&vport->q_vectors[v_idx].napi);
3483 }
3484
3485 /**
3486 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3487 * @vport: main vport structure
3488 */
idpf_vport_intr_napi_dis_all(struct idpf_vport * vport)3489 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3490 {
3491 int v_idx;
3492
3493 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3494 napi_disable(&vport->q_vectors[v_idx].napi);
3495 }
3496
3497 /**
3498 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3499 * @vport: virtual port
3500 *
3501 * Free the memory allocated for interrupt vectors associated to a vport
3502 */
idpf_vport_intr_rel(struct idpf_vport * vport)3503 void idpf_vport_intr_rel(struct idpf_vport *vport)
3504 {
3505 for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3506 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3507
3508 kfree(q_vector->complq);
3509 q_vector->complq = NULL;
3510 kfree(q_vector->bufq);
3511 q_vector->bufq = NULL;
3512 kfree(q_vector->tx);
3513 q_vector->tx = NULL;
3514 kfree(q_vector->rx);
3515 q_vector->rx = NULL;
3516 }
3517
3518 kfree(vport->q_vectors);
3519 vport->q_vectors = NULL;
3520 }
3521
3522 /**
3523 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3524 * @vport: main vport structure
3525 */
idpf_vport_intr_rel_irq(struct idpf_vport * vport)3526 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3527 {
3528 struct idpf_adapter *adapter = vport->adapter;
3529 int vector;
3530
3531 for (vector = 0; vector < vport->num_q_vectors; vector++) {
3532 struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3533 int irq_num, vidx;
3534
3535 /* free only the irqs that were actually requested */
3536 if (!q_vector)
3537 continue;
3538
3539 vidx = vport->q_vector_idxs[vector];
3540 irq_num = adapter->msix_entries[vidx].vector;
3541
3542 kfree(free_irq(irq_num, q_vector));
3543 }
3544 }
3545
3546 /**
3547 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3548 * @vport: main vport structure
3549 */
idpf_vport_intr_dis_irq_all(struct idpf_vport * vport)3550 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3551 {
3552 struct idpf_q_vector *q_vector = vport->q_vectors;
3553 int q_idx;
3554
3555 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3556 writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3557 }
3558
3559 /**
3560 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3561 * @q_vector: pointer to q_vector
3562 */
idpf_vport_intr_buildreg_itr(struct idpf_q_vector * q_vector)3563 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
3564 {
3565 u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3566 int type = IDPF_NO_ITR_UPDATE_IDX;
3567 u16 itr = 0;
3568
3569 if (q_vector->wb_on_itr) {
3570 /*
3571 * Trigger a software interrupt when exiting wb_on_itr, to make
3572 * sure we catch any pending write backs that might have been
3573 * missed due to interrupt state transition.
3574 */
3575 itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3576 q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3577 type = IDPF_SW_ITR_UPDATE_IDX;
3578 itr = IDPF_ITR_20K;
3579 }
3580
3581 itr &= IDPF_ITR_MASK;
3582 /* Don't clear PBA because that can cause lost interrupts that
3583 * came in while we were cleaning/polling
3584 */
3585 itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3586 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3587
3588 return itr_val;
3589 }
3590
3591 /**
3592 * idpf_update_dim_sample - Update dim sample with packets and bytes
3593 * @q_vector: the vector associated with the interrupt
3594 * @dim_sample: dim sample to update
3595 * @dim: dim instance structure
3596 * @packets: total packets
3597 * @bytes: total bytes
3598 *
3599 * Update the dim sample with the packets and bytes which are passed to this
3600 * function. Set the dim state appropriately if the dim settings gets stale.
3601 */
idpf_update_dim_sample(struct idpf_q_vector * q_vector,struct dim_sample * dim_sample,struct dim * dim,u64 packets,u64 bytes)3602 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3603 struct dim_sample *dim_sample,
3604 struct dim *dim, u64 packets, u64 bytes)
3605 {
3606 dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3607 dim_sample->comp_ctr = 0;
3608
3609 /* if dim settings get stale, like when not updated for 1 second or
3610 * longer, force it to start again. This addresses the frequent case
3611 * of an idle queue being switched to by the scheduler.
3612 */
3613 if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3614 dim->state = DIM_START_MEASURE;
3615 }
3616
3617 /**
3618 * idpf_net_dim - Update net DIM algorithm
3619 * @q_vector: the vector associated with the interrupt
3620 *
3621 * Create a DIM sample and notify net_dim() so that it can possibly decide
3622 * a new ITR value based on incoming packets, bytes, and interrupts.
3623 *
3624 * This function is a no-op if the queue is not configured to dynamic ITR.
3625 */
idpf_net_dim(struct idpf_q_vector * q_vector)3626 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3627 {
3628 struct dim_sample dim_sample = { };
3629 u64 packets, bytes;
3630 u32 i;
3631
3632 if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3633 goto check_rx_itr;
3634
3635 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3636 struct idpf_tx_queue *txq = q_vector->tx[i];
3637 unsigned int start;
3638
3639 do {
3640 start = u64_stats_fetch_begin(&txq->stats_sync);
3641 packets += u64_stats_read(&txq->q_stats.packets);
3642 bytes += u64_stats_read(&txq->q_stats.bytes);
3643 } while (u64_stats_fetch_retry(&txq->stats_sync, start));
3644 }
3645
3646 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3647 packets, bytes);
3648 net_dim(&q_vector->tx_dim, &dim_sample);
3649
3650 check_rx_itr:
3651 if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3652 return;
3653
3654 for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3655 struct idpf_rx_queue *rxq = q_vector->rx[i];
3656 unsigned int start;
3657
3658 do {
3659 start = u64_stats_fetch_begin(&rxq->stats_sync);
3660 packets += u64_stats_read(&rxq->q_stats.packets);
3661 bytes += u64_stats_read(&rxq->q_stats.bytes);
3662 } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3663 }
3664
3665 idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3666 packets, bytes);
3667 net_dim(&q_vector->rx_dim, &dim_sample);
3668 }
3669
3670 /**
3671 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3672 * @q_vector: q_vector for which itr is being updated and interrupt enabled
3673 *
3674 * Update the net_dim() algorithm and re-enable the interrupt associated with
3675 * this vector.
3676 */
idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector * q_vector)3677 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3678 {
3679 u32 intval;
3680
3681 /* net_dim() updates ITR out-of-band using a work item */
3682 idpf_net_dim(q_vector);
3683
3684 intval = idpf_vport_intr_buildreg_itr(q_vector);
3685 q_vector->wb_on_itr = false;
3686
3687 writel(intval, q_vector->intr_reg.dyn_ctl);
3688 }
3689
3690 /**
3691 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3692 * @vport: main vport structure
3693 */
idpf_vport_intr_req_irq(struct idpf_vport * vport)3694 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3695 {
3696 struct idpf_adapter *adapter = vport->adapter;
3697 const char *drv_name, *if_name, *vec_name;
3698 int vector, err, irq_num, vidx;
3699
3700 drv_name = dev_driver_string(&adapter->pdev->dev);
3701 if_name = netdev_name(vport->netdev);
3702
3703 for (vector = 0; vector < vport->num_q_vectors; vector++) {
3704 struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3705 char *name;
3706
3707 vidx = vport->q_vector_idxs[vector];
3708 irq_num = adapter->msix_entries[vidx].vector;
3709
3710 if (q_vector->num_rxq && q_vector->num_txq)
3711 vec_name = "TxRx";
3712 else if (q_vector->num_rxq)
3713 vec_name = "Rx";
3714 else if (q_vector->num_txq)
3715 vec_name = "Tx";
3716 else
3717 continue;
3718
3719 name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3720 vec_name, vidx);
3721
3722 err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3723 name, q_vector);
3724 if (err) {
3725 netdev_err(vport->netdev,
3726 "Request_irq failed, error: %d\n", err);
3727 goto free_q_irqs;
3728 }
3729 }
3730
3731 return 0;
3732
3733 free_q_irqs:
3734 while (--vector >= 0) {
3735 vidx = vport->q_vector_idxs[vector];
3736 irq_num = adapter->msix_entries[vidx].vector;
3737 kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3738 }
3739
3740 return err;
3741 }
3742
3743 /**
3744 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3745 * @q_vector: q_vector structure
3746 * @itr: Interrupt throttling rate
3747 * @tx: Tx or Rx ITR
3748 */
idpf_vport_intr_write_itr(struct idpf_q_vector * q_vector,u16 itr,bool tx)3749 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3750 {
3751 struct idpf_intr_reg *intr_reg;
3752
3753 if (tx && !q_vector->tx)
3754 return;
3755 else if (!tx && !q_vector->rx)
3756 return;
3757
3758 intr_reg = &q_vector->intr_reg;
3759 writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3760 tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3761 }
3762
3763 /**
3764 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3765 * @vport: main vport structure
3766 */
idpf_vport_intr_ena_irq_all(struct idpf_vport * vport)3767 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3768 {
3769 bool dynamic;
3770 int q_idx;
3771 u16 itr;
3772
3773 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3774 struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3775
3776 /* Set the initial ITR values */
3777 if (qv->num_txq) {
3778 dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3779 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3780 idpf_vport_intr_write_itr(qv, dynamic ?
3781 itr : qv->tx_itr_value,
3782 true);
3783 }
3784
3785 if (qv->num_rxq) {
3786 dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3787 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3788 idpf_vport_intr_write_itr(qv, dynamic ?
3789 itr : qv->rx_itr_value,
3790 false);
3791 }
3792
3793 if (qv->num_txq || qv->num_rxq)
3794 idpf_vport_intr_update_itr_ena_irq(qv);
3795 }
3796 }
3797
3798 /**
3799 * idpf_vport_intr_deinit - Release all vector associations for the vport
3800 * @vport: main vport structure
3801 */
idpf_vport_intr_deinit(struct idpf_vport * vport)3802 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3803 {
3804 idpf_vport_intr_dis_irq_all(vport);
3805 idpf_vport_intr_napi_dis_all(vport);
3806 idpf_vport_intr_napi_del_all(vport);
3807 idpf_vport_intr_rel_irq(vport);
3808 }
3809
3810 /**
3811 * idpf_tx_dim_work - Call back from the stack
3812 * @work: work queue structure
3813 */
idpf_tx_dim_work(struct work_struct * work)3814 static void idpf_tx_dim_work(struct work_struct *work)
3815 {
3816 struct idpf_q_vector *q_vector;
3817 struct idpf_vport *vport;
3818 struct dim *dim;
3819 u16 itr;
3820
3821 dim = container_of(work, struct dim, work);
3822 q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3823 vport = q_vector->vport;
3824
3825 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3826 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3827
3828 /* look up the values in our local table */
3829 itr = vport->tx_itr_profile[dim->profile_ix];
3830
3831 idpf_vport_intr_write_itr(q_vector, itr, true);
3832
3833 dim->state = DIM_START_MEASURE;
3834 }
3835
3836 /**
3837 * idpf_rx_dim_work - Call back from the stack
3838 * @work: work queue structure
3839 */
idpf_rx_dim_work(struct work_struct * work)3840 static void idpf_rx_dim_work(struct work_struct *work)
3841 {
3842 struct idpf_q_vector *q_vector;
3843 struct idpf_vport *vport;
3844 struct dim *dim;
3845 u16 itr;
3846
3847 dim = container_of(work, struct dim, work);
3848 q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3849 vport = q_vector->vport;
3850
3851 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3852 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3853
3854 /* look up the values in our local table */
3855 itr = vport->rx_itr_profile[dim->profile_ix];
3856
3857 idpf_vport_intr_write_itr(q_vector, itr, false);
3858
3859 dim->state = DIM_START_MEASURE;
3860 }
3861
3862 /**
3863 * idpf_init_dim - Set up dynamic interrupt moderation
3864 * @qv: q_vector structure
3865 */
idpf_init_dim(struct idpf_q_vector * qv)3866 static void idpf_init_dim(struct idpf_q_vector *qv)
3867 {
3868 INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3869 qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3870 qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3871
3872 INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3873 qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3874 qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3875 }
3876
3877 /**
3878 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3879 * @vport: main vport structure
3880 */
idpf_vport_intr_napi_ena_all(struct idpf_vport * vport)3881 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3882 {
3883 int q_idx;
3884
3885 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3886 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3887
3888 idpf_init_dim(q_vector);
3889 napi_enable(&q_vector->napi);
3890 }
3891 }
3892
3893 /**
3894 * idpf_tx_splitq_clean_all- Clean completion queues
3895 * @q_vec: queue vector
3896 * @budget: Used to determine if we are in netpoll
3897 * @cleaned: returns number of packets cleaned
3898 *
3899 * Returns false if clean is not complete else returns true
3900 */
idpf_tx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)3901 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3902 int budget, int *cleaned)
3903 {
3904 u16 num_complq = q_vec->num_complq;
3905 bool clean_complete = true;
3906 int i, budget_per_q;
3907
3908 if (unlikely(!num_complq))
3909 return true;
3910
3911 budget_per_q = DIV_ROUND_UP(budget, num_complq);
3912
3913 for (i = 0; i < num_complq; i++)
3914 clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
3915 budget_per_q, cleaned);
3916
3917 return clean_complete;
3918 }
3919
3920 /**
3921 * idpf_rx_splitq_clean_all- Clean completion queues
3922 * @q_vec: queue vector
3923 * @budget: Used to determine if we are in netpoll
3924 * @cleaned: returns number of packets cleaned
3925 *
3926 * Returns false if clean is not complete else returns true
3927 */
idpf_rx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)3928 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3929 int *cleaned)
3930 {
3931 u16 num_rxq = q_vec->num_rxq;
3932 bool clean_complete = true;
3933 int pkts_cleaned = 0;
3934 int i, budget_per_q;
3935 int nid;
3936
3937 /* We attempt to distribute budget to each Rx queue fairly, but don't
3938 * allow the budget to go below 1 because that would exit polling early.
3939 */
3940 budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3941 for (i = 0; i < num_rxq; i++) {
3942 struct idpf_rx_queue *rxq = q_vec->rx[i];
3943 int pkts_cleaned_per_q;
3944
3945 pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3946 /* if we clean as many as budgeted, we must not be done */
3947 if (pkts_cleaned_per_q >= budget_per_q)
3948 clean_complete = false;
3949 pkts_cleaned += pkts_cleaned_per_q;
3950 }
3951 *cleaned = pkts_cleaned;
3952
3953 nid = numa_mem_id();
3954
3955 for (i = 0; i < q_vec->num_bufq; i++)
3956 idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
3957
3958 return clean_complete;
3959 }
3960
3961 /**
3962 * idpf_vport_splitq_napi_poll - NAPI handler
3963 * @napi: struct from which you get q_vector
3964 * @budget: budget provided by stack
3965 */
idpf_vport_splitq_napi_poll(struct napi_struct * napi,int budget)3966 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
3967 {
3968 struct idpf_q_vector *q_vector =
3969 container_of(napi, struct idpf_q_vector, napi);
3970 bool clean_complete;
3971 int work_done = 0;
3972
3973 /* Handle case where we are called by netpoll with a budget of 0 */
3974 if (unlikely(!budget)) {
3975 idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3976
3977 return 0;
3978 }
3979
3980 clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
3981 clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3982
3983 /* If work not completed, return budget and polling will return */
3984 if (!clean_complete) {
3985 idpf_vport_intr_set_wb_on_itr(q_vector);
3986 return budget;
3987 }
3988
3989 /* Switch to poll mode in the tear-down path after sending disable
3990 * queues virtchnl message, as the interrupts will be disabled after
3991 * that.
3992 */
3993 if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
3994 q_vector->tx[0])))
3995 return budget;
3996
3997 work_done = min_t(int, work_done, budget - 1);
3998
3999 /* Exit the polling mode, but don't re-enable interrupts if stack might
4000 * poll us due to busy-polling
4001 */
4002 if (likely(napi_complete_done(napi, work_done)))
4003 idpf_vport_intr_update_itr_ena_irq(q_vector);
4004 else
4005 idpf_vport_intr_set_wb_on_itr(q_vector);
4006
4007 return work_done;
4008 }
4009
4010 /**
4011 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4012 * @vport: virtual port
4013 *
4014 * Mapping for vectors to queues
4015 */
idpf_vport_intr_map_vector_to_qs(struct idpf_vport * vport)4016 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4017 {
4018 bool split = idpf_is_queue_model_split(vport->rxq_model);
4019 u16 num_txq_grp = vport->num_txq_grp;
4020 struct idpf_rxq_group *rx_qgrp;
4021 struct idpf_txq_group *tx_qgrp;
4022 u32 i, qv_idx, q_index;
4023
4024 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4025 u16 num_rxq;
4026
4027 if (qv_idx >= vport->num_q_vectors)
4028 qv_idx = 0;
4029
4030 rx_qgrp = &vport->rxq_grps[i];
4031 if (split)
4032 num_rxq = rx_qgrp->splitq.num_rxq_sets;
4033 else
4034 num_rxq = rx_qgrp->singleq.num_rxq;
4035
4036 for (u32 j = 0; j < num_rxq; j++) {
4037 struct idpf_rx_queue *q;
4038
4039 if (split)
4040 q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4041 else
4042 q = rx_qgrp->singleq.rxqs[j];
4043 q->q_vector = &vport->q_vectors[qv_idx];
4044 q_index = q->q_vector->num_rxq;
4045 q->q_vector->rx[q_index] = q;
4046 q->q_vector->num_rxq++;
4047
4048 if (split)
4049 q->napi = &q->q_vector->napi;
4050 }
4051
4052 if (split) {
4053 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4054 struct idpf_buf_queue *bufq;
4055
4056 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4057 bufq->q_vector = &vport->q_vectors[qv_idx];
4058 q_index = bufq->q_vector->num_bufq;
4059 bufq->q_vector->bufq[q_index] = bufq;
4060 bufq->q_vector->num_bufq++;
4061 }
4062 }
4063
4064 qv_idx++;
4065 }
4066
4067 split = idpf_is_queue_model_split(vport->txq_model);
4068
4069 for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4070 u16 num_txq;
4071
4072 if (qv_idx >= vport->num_q_vectors)
4073 qv_idx = 0;
4074
4075 tx_qgrp = &vport->txq_grps[i];
4076 num_txq = tx_qgrp->num_txq;
4077
4078 for (u32 j = 0; j < num_txq; j++) {
4079 struct idpf_tx_queue *q;
4080
4081 q = tx_qgrp->txqs[j];
4082 q->q_vector = &vport->q_vectors[qv_idx];
4083 q->q_vector->tx[q->q_vector->num_txq++] = q;
4084 }
4085
4086 if (split) {
4087 struct idpf_compl_queue *q = tx_qgrp->complq;
4088
4089 q->q_vector = &vport->q_vectors[qv_idx];
4090 q->q_vector->complq[q->q_vector->num_complq++] = q;
4091 }
4092
4093 qv_idx++;
4094 }
4095 }
4096
4097 /**
4098 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4099 * @vport: virtual port
4100 *
4101 * Initialize vector indexes with values returened over mailbox
4102 */
idpf_vport_intr_init_vec_idx(struct idpf_vport * vport)4103 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4104 {
4105 struct idpf_adapter *adapter = vport->adapter;
4106 struct virtchnl2_alloc_vectors *ac;
4107 u16 *vecids, total_vecs;
4108 int i;
4109
4110 ac = adapter->req_vec_chunks;
4111 if (!ac) {
4112 for (i = 0; i < vport->num_q_vectors; i++)
4113 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4114
4115 return 0;
4116 }
4117
4118 total_vecs = idpf_get_reserved_vecs(adapter);
4119 vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4120 if (!vecids)
4121 return -ENOMEM;
4122
4123 idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4124
4125 for (i = 0; i < vport->num_q_vectors; i++)
4126 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4127
4128 kfree(vecids);
4129
4130 return 0;
4131 }
4132
4133 /**
4134 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4135 * @vport: virtual port structure
4136 */
idpf_vport_intr_napi_add_all(struct idpf_vport * vport)4137 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4138 {
4139 int (*napi_poll)(struct napi_struct *napi, int budget);
4140 u16 v_idx, qv_idx;
4141 int irq_num;
4142
4143 if (idpf_is_queue_model_split(vport->txq_model))
4144 napi_poll = idpf_vport_splitq_napi_poll;
4145 else
4146 napi_poll = idpf_vport_singleq_napi_poll;
4147
4148 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4149 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4150 qv_idx = vport->q_vector_idxs[v_idx];
4151 irq_num = vport->adapter->msix_entries[qv_idx].vector;
4152
4153 netif_napi_add_config(vport->netdev, &q_vector->napi,
4154 napi_poll, v_idx);
4155 netif_napi_set_irq(&q_vector->napi, irq_num);
4156 }
4157 }
4158
4159 /**
4160 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4161 * @vport: virtual port
4162 *
4163 * We allocate one q_vector per queue interrupt. If allocation fails we
4164 * return -ENOMEM.
4165 */
idpf_vport_intr_alloc(struct idpf_vport * vport)4166 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4167 {
4168 u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4169 struct idpf_vport_user_config_data *user_config;
4170 struct idpf_q_vector *q_vector;
4171 struct idpf_q_coalesce *q_coal;
4172 u32 complqs_per_vector, v_idx;
4173 u16 idx = vport->idx;
4174
4175 user_config = &vport->adapter->vport_config[idx]->user_config;
4176 vport->q_vectors = kcalloc(vport->num_q_vectors,
4177 sizeof(struct idpf_q_vector), GFP_KERNEL);
4178 if (!vport->q_vectors)
4179 return -ENOMEM;
4180
4181 txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4182 vport->num_q_vectors);
4183 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4184 vport->num_q_vectors);
4185 bufqs_per_vector = vport->num_bufqs_per_qgrp *
4186 DIV_ROUND_UP(vport->num_rxq_grp,
4187 vport->num_q_vectors);
4188 complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4189 vport->num_q_vectors);
4190
4191 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4192 q_vector = &vport->q_vectors[v_idx];
4193 q_coal = &user_config->q_coalesce[v_idx];
4194 q_vector->vport = vport;
4195
4196 q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
4197 q_vector->tx_intr_mode = q_coal->tx_intr_mode;
4198 q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4199
4200 q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
4201 q_vector->rx_intr_mode = q_coal->rx_intr_mode;
4202 q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4203
4204 q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4205 GFP_KERNEL);
4206 if (!q_vector->tx)
4207 goto error;
4208
4209 q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4210 GFP_KERNEL);
4211 if (!q_vector->rx)
4212 goto error;
4213
4214 if (!idpf_is_queue_model_split(vport->rxq_model))
4215 continue;
4216
4217 q_vector->bufq = kcalloc(bufqs_per_vector,
4218 sizeof(*q_vector->bufq),
4219 GFP_KERNEL);
4220 if (!q_vector->bufq)
4221 goto error;
4222
4223 q_vector->complq = kcalloc(complqs_per_vector,
4224 sizeof(*q_vector->complq),
4225 GFP_KERNEL);
4226 if (!q_vector->complq)
4227 goto error;
4228 }
4229
4230 return 0;
4231
4232 error:
4233 idpf_vport_intr_rel(vport);
4234
4235 return -ENOMEM;
4236 }
4237
4238 /**
4239 * idpf_vport_intr_init - Setup all vectors for the given vport
4240 * @vport: virtual port
4241 *
4242 * Returns 0 on success or negative on failure
4243 */
idpf_vport_intr_init(struct idpf_vport * vport)4244 int idpf_vport_intr_init(struct idpf_vport *vport)
4245 {
4246 int err;
4247
4248 err = idpf_vport_intr_init_vec_idx(vport);
4249 if (err)
4250 return err;
4251
4252 idpf_vport_intr_map_vector_to_qs(vport);
4253 idpf_vport_intr_napi_add_all(vport);
4254
4255 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4256 if (err)
4257 goto unroll_vectors_alloc;
4258
4259 err = idpf_vport_intr_req_irq(vport);
4260 if (err)
4261 goto unroll_vectors_alloc;
4262
4263 return 0;
4264
4265 unroll_vectors_alloc:
4266 idpf_vport_intr_napi_del_all(vport);
4267
4268 return err;
4269 }
4270
idpf_vport_intr_ena(struct idpf_vport * vport)4271 void idpf_vport_intr_ena(struct idpf_vport *vport)
4272 {
4273 idpf_vport_intr_napi_ena_all(vport);
4274 idpf_vport_intr_ena_irq_all(vport);
4275 }
4276
4277 /**
4278 * idpf_config_rss - Send virtchnl messages to configure RSS
4279 * @vport: virtual port
4280 *
4281 * Return 0 on success, negative on failure
4282 */
idpf_config_rss(struct idpf_vport * vport)4283 int idpf_config_rss(struct idpf_vport *vport)
4284 {
4285 int err;
4286
4287 err = idpf_send_get_set_rss_key_msg(vport, false);
4288 if (err)
4289 return err;
4290
4291 return idpf_send_get_set_rss_lut_msg(vport, false);
4292 }
4293
4294 /**
4295 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4296 * @vport: virtual port structure
4297 */
idpf_fill_dflt_rss_lut(struct idpf_vport * vport)4298 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4299 {
4300 struct idpf_adapter *adapter = vport->adapter;
4301 u16 num_active_rxq = vport->num_rxq;
4302 struct idpf_rss_data *rss_data;
4303 int i;
4304
4305 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4306
4307 for (i = 0; i < rss_data->rss_lut_size; i++) {
4308 rss_data->rss_lut[i] = i % num_active_rxq;
4309 rss_data->cached_lut[i] = rss_data->rss_lut[i];
4310 }
4311 }
4312
4313 /**
4314 * idpf_init_rss - Allocate and initialize RSS resources
4315 * @vport: virtual port
4316 *
4317 * Return 0 on success, negative on failure
4318 */
idpf_init_rss(struct idpf_vport * vport)4319 int idpf_init_rss(struct idpf_vport *vport)
4320 {
4321 struct idpf_adapter *adapter = vport->adapter;
4322 struct idpf_rss_data *rss_data;
4323 u32 lut_size;
4324
4325 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4326
4327 lut_size = rss_data->rss_lut_size * sizeof(u32);
4328 rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4329 if (!rss_data->rss_lut)
4330 return -ENOMEM;
4331
4332 rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4333 if (!rss_data->cached_lut) {
4334 kfree(rss_data->rss_lut);
4335 rss_data->rss_lut = NULL;
4336
4337 return -ENOMEM;
4338 }
4339
4340 /* Fill the default RSS lut values */
4341 idpf_fill_dflt_rss_lut(vport);
4342
4343 return idpf_config_rss(vport);
4344 }
4345
4346 /**
4347 * idpf_deinit_rss - Release RSS resources
4348 * @vport: virtual port
4349 */
idpf_deinit_rss(struct idpf_vport * vport)4350 void idpf_deinit_rss(struct idpf_vport *vport)
4351 {
4352 struct idpf_adapter *adapter = vport->adapter;
4353 struct idpf_rss_data *rss_data;
4354
4355 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4356 kfree(rss_data->cached_lut);
4357 rss_data->cached_lut = NULL;
4358 kfree(rss_data->rss_lut);
4359 rss_data->rss_lut = NULL;
4360 }
4361