Lines Matching +full:rx +full:- +full:shared

4  * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
23 * Maintained by: pv-drivers@vmware.com
81 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_enable_all_intrs()
84 !adapter->queuesExtEnabled) { in vmxnet3_enable_all_intrs()
85 adapter->shared->devRead.intrConf.intrCtrl &= in vmxnet3_enable_all_intrs()
88 adapter->shared->devReadExt.intrConfExt.intrCtrl &= in vmxnet3_enable_all_intrs()
100 !adapter->queuesExtEnabled) { in vmxnet3_disable_all_intrs()
101 adapter->shared->devRead.intrConf.intrCtrl |= in vmxnet3_disable_all_intrs()
104 adapter->shared->devReadExt.intrConfExt.intrCtrl |= in vmxnet3_disable_all_intrs()
107 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_disable_all_intrs()
122 return tq->stopped; in vmxnet3_tq_stopped()
129 tq->stopped = false; in vmxnet3_tq_start()
130 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
137 tq->stopped = false; in vmxnet3_tq_wake()
138 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
145 tq->stopped = true; in vmxnet3_tq_stop()
146 tq->num_stop++; in vmxnet3_tq_stop()
147 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
165 if (tq->tsPktCount == 1) { in vmxnet3_apply_timestamp()
167 tq->tsPktCount = rate; in vmxnet3_apply_timestamp()
170 tq->tsPktCount--; in vmxnet3_apply_timestamp()
201 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_check_link()
204 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_check_link()
206 adapter->link_speed = ret >> 16; in vmxnet3_check_link()
214 adapter->link_speed < 10000) in vmxnet3_check_link()
215 adapter->link_speed = adapter->link_speed * 1000; in vmxnet3_check_link()
216 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", in vmxnet3_check_link()
217 adapter->link_speed); in vmxnet3_check_link()
218 netif_carrier_on(adapter->netdev); in vmxnet3_check_link()
221 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
222 vmxnet3_tq_start(&adapter->tx_queue[i], in vmxnet3_check_link()
226 netdev_info(adapter->netdev, "NIC Link is Down\n"); in vmxnet3_check_link()
227 netif_carrier_off(adapter->netdev); in vmxnet3_check_link()
230 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link()
231 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); in vmxnet3_check_link()
241 u32 events = le32_to_cpu(adapter->shared->ecr); in vmxnet3_process_events()
253 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_process_events()
256 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_process_events()
258 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events()
259 if (adapter->tqd_start[i].status.stopped) in vmxnet3_process_events()
260 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
262 adapter->netdev->name, i, le32_to_cpu( in vmxnet3_process_events()
263 adapter->tqd_start[i].status.error)); in vmxnet3_process_events()
264 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_process_events()
265 if (adapter->rqd_start[i].status.stopped) in vmxnet3_process_events()
266 dev_err(&adapter->netdev->dev, in vmxnet3_process_events()
268 adapter->netdev->name, i, in vmxnet3_process_events()
269 adapter->rqd_start[i].status.error); in vmxnet3_process_events()
271 schedule_work(&adapter->work); in vmxnet3_process_events()
277 * The device expects the bitfields in shared structures to be written in
285 * In order to avoid touching bits in shared structure more than once, temporary
293 dstDesc->addr = le64_to_cpu(srcDesc->addr); in vmxnet3_RxDescToCPU()
295 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); in vmxnet3_RxDescToCPU()
306 for (i = 2; i > 0; i--) { in vmxnet3_TxDescToLe()
307 src--; in vmxnet3_TxDescToLe()
308 dst--; in vmxnet3_TxDescToLe()
332 u32 mask = ((1 << size) - 1) << pos; in get_bitfield32()
366 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
367 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
368 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
369 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
380 u32 map_type = tbi->map_type; in vmxnet3_unmap_tx_buf()
383 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
386 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, in vmxnet3_unmap_tx_buf()
391 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ in vmxnet3_unmap_tx_buf()
405 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
406 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
408 tbi = &tq->buf_info[eop_idx]; in vmxnet3_unmap_pkt()
409 BUG_ON(!tbi->skb); in vmxnet3_unmap_pkt()
410 map_type = tbi->map_type; in vmxnet3_unmap_pkt()
411 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
413 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
414 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
419 * that the tx routine incorrectly re-queues a pkt due to in vmxnet3_unmap_pkt()
422 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
427 xdp_return_frame_bulk(tbi->xdpf, bq); in vmxnet3_unmap_pkt()
429 dev_kfree_skb_any(tbi->skb); in vmxnet3_unmap_pkt()
432 tbi->skb = NULL; in vmxnet3_unmap_pkt()
449 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
450 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
451 /* Prevent any &gdesc->tcd field from being (speculatively) in vmxnet3_tq_tx_complete()
452 * read before (&gdesc->tcd)->gen is read. in vmxnet3_tq_tx_complete()
457 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
460 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
461 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
467 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
469 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
471 netif_carrier_ok(adapter->netdev))) { in vmxnet3_tq_tx_complete()
474 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
491 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
494 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
495 map_type = tbi->map_type; in vmxnet3_tq_cleanup()
497 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); in vmxnet3_tq_cleanup()
498 if (tbi->skb) { in vmxnet3_tq_cleanup()
500 xdp_return_frame_bulk(tbi->xdpf, &bq); in vmxnet3_tq_cleanup()
502 dev_kfree_skb_any(tbi->skb); in vmxnet3_tq_cleanup()
503 tbi->skb = NULL; in vmxnet3_tq_cleanup()
505 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
512 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_cleanup()
513 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
515 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
516 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
518 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
519 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
527 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
528 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
530 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
531 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
533 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
534 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
535 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
536 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
537 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
539 if (tq->ts_ring.base) { in vmxnet3_tq_destroy()
540 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_tq_destroy()
541 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_destroy()
542 tq->ts_ring.base, tq->ts_ring.basePA); in vmxnet3_tq_destroy()
543 tq->ts_ring.base = NULL; in vmxnet3_tq_destroy()
545 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
546 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
548 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
549 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
551 kfree(tq->buf_info); in vmxnet3_tq_destroy()
552 tq->buf_info = NULL; in vmxnet3_tq_destroy()
562 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all()
563 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); in vmxnet3_tq_destroy_all()
574 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
576 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
577 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
579 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
580 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
582 if (tq->ts_ring.base) in vmxnet3_tq_init()
583 memset(tq->ts_ring.base, 0, in vmxnet3_tq_init()
584 tq->tx_ring.size * tq->tx_ts_desc_size); in vmxnet3_tq_init()
587 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
589 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
590 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
593 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
594 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
595 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
605 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
606 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
608 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
609 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
610 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
611 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
612 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); in vmxnet3_tq_create()
616 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
617 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
618 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
619 if (!tq->data_ring.base) { in vmxnet3_tq_create()
620 netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); in vmxnet3_tq_create()
624 if (tq->tx_ts_desc_size != 0) { in vmxnet3_tq_create()
625 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
626 tq->tx_ring.size * tq->tx_ts_desc_size, in vmxnet3_tq_create()
627 &tq->ts_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
628 if (!tq->ts_ring.base) { in vmxnet3_tq_create()
629 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n"); in vmxnet3_tq_create()
630 tq->tx_ts_desc_size = 0; in vmxnet3_tq_create()
633 tq->ts_ring.base = NULL; in vmxnet3_tq_create()
636 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
637 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
638 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
639 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
640 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); in vmxnet3_tq_create()
644 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]), in vmxnet3_tq_create()
646 dev_to_node(&adapter->pdev->dev)); in vmxnet3_tq_create()
647 if (!tq->buf_info) in vmxnet3_tq_create()
654 return -ENOMEM; in vmxnet3_tq_create()
662 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all()
663 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); in vmxnet3_tq_cleanup_all()
667 * starting from ring->next2fill, allocate rx buffers for the given ring
668 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
677 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
678 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
685 rbi = rbi_base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
686 gd = ring->base + ring->next2fill; in vmxnet3_rq_alloc_rx_buf()
687 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_alloc_rx_buf()
689 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_alloc_rx_buf()
690 void *data = vmxnet3_pp_get_buff(rq->page_pool, in vmxnet3_rq_alloc_rx_buf()
691 &rbi->dma_addr, in vmxnet3_rq_alloc_rx_buf()
694 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
697 rbi->page = virt_to_page(data); in vmxnet3_rq_alloc_rx_buf()
699 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { in vmxnet3_rq_alloc_rx_buf()
700 if (rbi->skb == NULL) { in vmxnet3_rq_alloc_rx_buf()
701 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
702 rbi->len, in vmxnet3_rq_alloc_rx_buf()
704 if (unlikely(rbi->skb == NULL)) { in vmxnet3_rq_alloc_rx_buf()
705 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
709 rbi->dma_addr = dma_map_single( in vmxnet3_rq_alloc_rx_buf()
710 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
711 rbi->skb->data, rbi->len, in vmxnet3_rq_alloc_rx_buf()
713 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
714 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
715 dev_kfree_skb_any(rbi->skb); in vmxnet3_rq_alloc_rx_buf()
716 rbi->skb = NULL; in vmxnet3_rq_alloc_rx_buf()
717 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
721 /* rx buffer skipped by the device */ in vmxnet3_rq_alloc_rx_buf()
725 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || in vmxnet3_rq_alloc_rx_buf()
726 rbi->len != PAGE_SIZE); in vmxnet3_rq_alloc_rx_buf()
728 if (rbi->page == NULL) { in vmxnet3_rq_alloc_rx_buf()
729 rbi->page = alloc_page(GFP_ATOMIC); in vmxnet3_rq_alloc_rx_buf()
730 if (unlikely(rbi->page == NULL)) { in vmxnet3_rq_alloc_rx_buf()
731 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
734 rbi->dma_addr = dma_map_page( in vmxnet3_rq_alloc_rx_buf()
735 &adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
736 rbi->page, 0, PAGE_SIZE, in vmxnet3_rq_alloc_rx_buf()
738 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_alloc_rx_buf()
739 rbi->dma_addr)) { in vmxnet3_rq_alloc_rx_buf()
740 put_page(rbi->page); in vmxnet3_rq_alloc_rx_buf()
741 rbi->page = NULL; in vmxnet3_rq_alloc_rx_buf()
742 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
746 /* rx buffers skipped by the device */ in vmxnet3_rq_alloc_rx_buf()
751 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_alloc_rx_buf()
752 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) in vmxnet3_rq_alloc_rx_buf()
753 | val | rbi->len); in vmxnet3_rq_alloc_rx_buf()
758 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_alloc_rx_buf()
762 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); in vmxnet3_rq_alloc_rx_buf()
767 netdev_dbg(adapter->netdev, in vmxnet3_rq_alloc_rx_buf()
769 num_allocated, ring->next2fill, ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
772 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); in vmxnet3_rq_alloc_rx_buf()
782 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in vmxnet3_append_frag()
784 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); in vmxnet3_append_frag()
786 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len); in vmxnet3_append_frag()
787 skb->data_len += rcd->len; in vmxnet3_append_frag()
788 skb->truesize += PAGE_SIZE; in vmxnet3_append_frag()
789 skb_shinfo(skb)->nr_frags++; in vmxnet3_append_frag()
804 BUG_ON(ctx->copy_size > skb_headlen(skb)); in vmxnet3_map_pkt()
807 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
809 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
810 gdesc = ctx->sop_txd; /* both loops below can be skipped */ in vmxnet3_map_pkt()
813 if (ctx->copy_size) { in vmxnet3_map_pkt()
814 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
815 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
816 tq->txdata_desc_size); in vmxnet3_map_pkt()
817 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); in vmxnet3_map_pkt()
818 ctx->sop_txd->dword[3] = 0; in vmxnet3_map_pkt()
820 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
821 tbi->map_type = VMXNET3_MAP_NONE; in vmxnet3_map_pkt()
823 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
825 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
826 le64_to_cpu(ctx->sop_txd->txd.addr), in vmxnet3_map_pkt()
827 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); in vmxnet3_map_pkt()
828 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
830 /* use the right gen for non-SOP desc */ in vmxnet3_map_pkt()
831 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
835 len = skb_headlen(skb) - ctx->copy_size; in vmxnet3_map_pkt()
836 buf_offset = ctx->copy_size; in vmxnet3_map_pkt()
848 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
849 tbi->map_type = VMXNET3_MAP_SINGLE; in vmxnet3_map_pkt()
850 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, in vmxnet3_map_pkt()
851 skb->data + buf_offset, buf_size, in vmxnet3_map_pkt()
853 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
854 return -EFAULT; in vmxnet3_map_pkt()
856 tbi->len = buf_size; in vmxnet3_map_pkt()
858 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
859 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
861 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
862 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
863 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
865 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
867 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
868 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
869 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
870 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
872 len -= buf_size; in vmxnet3_map_pkt()
876 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vmxnet3_map_pkt()
877 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in vmxnet3_map_pkt()
883 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
891 tbi->map_type = VMXNET3_MAP_PAGE; in vmxnet3_map_pkt()
892 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, in vmxnet3_map_pkt()
895 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) in vmxnet3_map_pkt()
896 return -EFAULT; in vmxnet3_map_pkt()
898 tbi->len = buf_size; in vmxnet3_map_pkt()
900 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
901 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
903 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); in vmxnet3_map_pkt()
904 gdesc->dword[2] = cpu_to_le32(dw2); in vmxnet3_map_pkt()
905 gdesc->dword[3] = 0; in vmxnet3_map_pkt()
907 netdev_dbg(adapter->netdev, in vmxnet3_map_pkt()
909 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
910 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); in vmxnet3_map_pkt()
911 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
912 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
914 len -= buf_size; in vmxnet3_map_pkt()
919 ctx->eop_txd = gdesc; in vmxnet3_map_pkt()
922 tbi->skb = skb; in vmxnet3_map_pkt()
923 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
924 if (tq->tx_ts_desc_size != 0) { in vmxnet3_map_pkt()
925 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base + in vmxnet3_map_pkt()
926 tbi->sop_idx * tq->tx_ts_desc_size); in vmxnet3_map_pkt()
927 ctx->ts_txd->ts.tsi = 0; in vmxnet3_map_pkt()
940 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all()
941 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); in vmxnet3_tq_init_all()
952 * -1: error happens during parsing
958 * 2. ctx->copy_size is # of bytes copied
969 if (ctx->mss) { /* TSO */ in vmxnet3_parse_hdr()
970 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_parse_hdr()
971 ctx->l4_offset = skb_inner_transport_offset(skb); in vmxnet3_parse_hdr()
972 ctx->l4_hdr_size = inner_tcp_hdrlen(skb); in vmxnet3_parse_hdr()
973 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
975 ctx->l4_offset = skb_transport_offset(skb); in vmxnet3_parse_hdr()
976 ctx->l4_hdr_size = tcp_hdrlen(skb); in vmxnet3_parse_hdr()
977 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; in vmxnet3_parse_hdr()
980 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_parse_hdr()
983 * well as non-encap case in vmxnet3_parse_hdr()
985 ctx->l4_offset = skb_checksum_start_offset(skb); in vmxnet3_parse_hdr()
988 skb->encapsulation) { in vmxnet3_parse_hdr()
991 if (iph->version == 4) { in vmxnet3_parse_hdr()
992 protocol = iph->protocol; in vmxnet3_parse_hdr()
997 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
1000 if (ctx->ipv4) { in vmxnet3_parse_hdr()
1003 protocol = iph->protocol; in vmxnet3_parse_hdr()
1004 } else if (ctx->ipv6) { in vmxnet3_parse_hdr()
1008 protocol = ipv6h->nexthdr; in vmxnet3_parse_hdr()
1014 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : in vmxnet3_parse_hdr()
1018 ctx->l4_hdr_size = sizeof(struct udphdr); in vmxnet3_parse_hdr()
1021 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
1025 ctx->copy_size = min(ctx->l4_offset + in vmxnet3_parse_hdr()
1026 ctx->l4_hdr_size, skb->len); in vmxnet3_parse_hdr()
1028 ctx->l4_offset = 0; in vmxnet3_parse_hdr()
1029 ctx->l4_hdr_size = 0; in vmxnet3_parse_hdr()
1031 ctx->copy_size = min_t(unsigned int, in vmxnet3_parse_hdr()
1032 tq->txdata_desc_size, in vmxnet3_parse_hdr()
1036 if (skb->len <= tq->txdata_desc_size) in vmxnet3_parse_hdr()
1037 ctx->copy_size = skb->len; in vmxnet3_parse_hdr()
1040 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) in vmxnet3_parse_hdr()
1044 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
1045 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
1046 ctx->copy_size = 0; in vmxnet3_parse_hdr()
1052 return -1; in vmxnet3_parse_hdr()
1072 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
1073 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
1074 tq->txdata_desc_size); in vmxnet3_copy_hdr()
1076 memcpy(tdd->data, skb->data, ctx->copy_size); in vmxnet3_copy_hdr()
1077 netdev_dbg(adapter->netdev, in vmxnet3_copy_hdr()
1079 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
1090 if (iph->version == 4) { in vmxnet3_prepare_inner_tso()
1091 iph->check = 0; in vmxnet3_prepare_inner_tso()
1092 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1097 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, in vmxnet3_prepare_inner_tso()
1108 if (ctx->ipv4) { in vmxnet3_prepare_tso()
1111 iph->check = 0; in vmxnet3_prepare_tso()
1112 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, in vmxnet3_prepare_tso()
1114 } else if (ctx->ipv6) { in vmxnet3_prepare_tso()
1124 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in txd_estimate()
1125 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in txd_estimate()
1139 * Side-effects:
1142 * 3. shared->txNumDeferred may be updated
1166 ctx.mss = skb_shinfo(skb)->gso_size; in vmxnet3_tq_xmit()
1171 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1174 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1181 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1184 tq->stats.linearized++; in vmxnet3_tq_xmit()
1189 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1193 if (skb->encapsulation) { in vmxnet3_tq_xmit()
1201 /* non-tso pkts must not use more than in vmxnet3_tq_xmit()
1205 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1208 tq->stats.linearized++; in vmxnet3_tq_xmit()
1222 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1226 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1228 skb->csum_offset > in vmxnet3_tq_xmit()
1230 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1236 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1240 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1242 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1243 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1244 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1246 " next2fill %u\n", adapter->netdev->name, in vmxnet3_tq_xmit()
1247 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1250 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1258 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1262 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); in vmxnet3_tq_xmit()
1267 gdesc->dword[2] = ctx.sop_txd->dword[2]; in vmxnet3_tq_xmit()
1268 gdesc->dword[3] = ctx.sop_txd->dword[3]; in vmxnet3_tq_xmit()
1272 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1274 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { in vmxnet3_tq_xmit()
1275 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1277 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1278 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1280 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1282 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1284 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in vmxnet3_tq_xmit()
1285 gdesc->txd.oco = 1; in vmxnet3_tq_xmit()
1287 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; in vmxnet3_tq_xmit()
1288 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_tq_xmit()
1289 gdesc->txd.msscof = ctx.mss; in vmxnet3_tq_xmit()
1291 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; in vmxnet3_tq_xmit()
1293 if (skb->ip_summed == CHECKSUM_PARTIAL) { in vmxnet3_tq_xmit()
1295 skb->encapsulation) { in vmxnet3_tq_xmit()
1296 gdesc->txd.hlen = ctx.l4_offset + in vmxnet3_tq_xmit()
1299 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1300 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1301 skb->csum_offset; in vmxnet3_tq_xmit()
1302 gdesc->txd.ext1 = 1; in vmxnet3_tq_xmit()
1304 gdesc->txd.om = VMXNET3_OM_ENCAP; in vmxnet3_tq_xmit()
1305 gdesc->txd.msscof = 0; /* Reserved */ in vmxnet3_tq_xmit()
1308 gdesc->txd.hlen = ctx.l4_offset; in vmxnet3_tq_xmit()
1309 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_tq_xmit()
1310 gdesc->txd.msscof = ctx.l4_offset + in vmxnet3_tq_xmit()
1311 skb->csum_offset; in vmxnet3_tq_xmit()
1314 gdesc->txd.om = 0; in vmxnet3_tq_xmit()
1315 gdesc->txd.msscof = 0; in vmxnet3_tq_xmit()
1319 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1323 gdesc->txd.ti = 1; in vmxnet3_tq_xmit()
1324 gdesc->txd.tci = skb_vlan_tag_get(skb); in vmxnet3_tq_xmit()
1327 if (tq->tx_ts_desc_size != 0 && in vmxnet3_tq_xmit()
1328 adapter->latencyConf->sampleRate != 0) { in vmxnet3_tq_xmit()
1329 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) { in vmxnet3_tq_xmit()
1330 ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); in vmxnet3_tq_xmit()
1331 ctx.ts_txd->ts.tsi = 1; in vmxnet3_tq_xmit()
1335 /* Ensure that the write to (&gdesc->txd)->gen will be observed after in vmxnet3_tq_xmit()
1336 * all other writes to &gdesc->txd. in vmxnet3_tq_xmit()
1341 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ in vmxnet3_tq_xmit()
1351 netdev_dbg(adapter->netdev, in vmxnet3_tq_xmit()
1353 (u32)(ctx.sop_txd - in vmxnet3_tq_xmit()
1354 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1355 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); in vmxnet3_tq_xmit()
1357 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1359 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1360 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1362 adapter->tx_prod_offset + tq->qid * 8, in vmxnet3_tq_xmit()
1363 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1369 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1371 tq->stats.drop_total++; in vmxnet3_tq_xmit()
1386 .dev = &adapter->pdev->dev, in vmxnet3_create_pp()
1398 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid, in vmxnet3_create_pp()
1399 rq->napi.napi_id); in vmxnet3_create_pp()
1403 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp); in vmxnet3_create_pp()
1407 rq->page_pool = pp; in vmxnet3_create_pp()
1412 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_create_pp()
1429 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset; in vmxnet3_pp_get_buff()
1439 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame()
1441 &adapter->tx_queue[skb->queue_mapping], in vmxnet3_xmit_frame()
1451 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1452 if (gdesc->rcd.v4 && in vmxnet3_rx_csum()
1453 (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1455 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1456 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1458 skb->csum_level = 1; in vmxnet3_rx_csum()
1460 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1461 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1463 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1464 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1466 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & in vmxnet3_rx_csum()
1468 skb->ip_summed = CHECKSUM_UNNECESSARY; in vmxnet3_rx_csum()
1469 if ((le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1471 skb->csum_level = 1; in vmxnet3_rx_csum()
1473 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1474 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1476 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
1477 !(le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rx_csum()
1480 if (gdesc->rcd.csum) { in vmxnet3_rx_csum()
1481 skb->csum = htons(gdesc->rcd.csum); in vmxnet3_rx_csum()
1482 skb->ip_summed = CHECKSUM_PARTIAL; in vmxnet3_rx_csum()
1497 rq->stats.drop_err++; in vmxnet3_rx_error()
1498 if (!rcd->fcs) in vmxnet3_rx_error()
1499 rq->stats.drop_fcs++; in vmxnet3_rx_error()
1501 rq->stats.drop_total++; in vmxnet3_rx_error()
1504 * We do not unmap and chain the rx buffer to the skb. in vmxnet3_rx_error()
1510 * ctx->skb may be NULL if this is the first and the only one in vmxnet3_rx_error()
1513 if (ctx->skb) in vmxnet3_rx_error()
1514 dev_kfree_skb_irq(ctx->skb); in vmxnet3_rx_error()
1516 ctx->skb = NULL; in vmxnet3_rx_error()
1533 BUG_ON(gdesc->rcd.tcp == 0); in vmxnet3_get_hdr_len()
1539 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || in vmxnet3_get_hdr_len()
1540 skb->protocol == cpu_to_be16(ETH_P_8021AD)) in vmxnet3_get_hdr_len()
1546 if (gdesc->rcd.v4) { in vmxnet3_get_hdr_len()
1547 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && in vmxnet3_get_hdr_len()
1548 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); in vmxnet3_get_hdr_len()
1550 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); in vmxnet3_get_hdr_len()
1551 hlen = hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1552 hdr.ptr += hdr.ipv4->ihl << 2; in vmxnet3_get_hdr_len()
1553 } else if (gdesc->rcd.v6) { in vmxnet3_get_hdr_len()
1554 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && in vmxnet3_get_hdr_len()
1555 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); in vmxnet3_get_hdr_len()
1560 if (hdr.ipv6->nexthdr != IPPROTO_TCP) in vmxnet3_get_hdr_len()
1565 /* Non-IP pkt, dont estimate header length */ in vmxnet3_get_hdr_len()
1572 return (hlen + (hdr.tcp->doff << 2)); in vmxnet3_get_hdr_len()
1581 struct iphdr *iph = (struct iphdr *)skb->data; in vmxnet3_lro_tunnel()
1583 if (iph->protocol == IPPROTO_UDP) in vmxnet3_lro_tunnel()
1586 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; in vmxnet3_lro_tunnel()
1588 if (iph->nexthdr == IPPROTO_UDP) in vmxnet3_lro_tunnel()
1592 if (uh->check) in vmxnet3_lro_tunnel()
1593 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; in vmxnet3_lro_tunnel()
1595 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; in vmxnet3_lro_tunnel()
1604 adapter->rx_prod_offset, adapter->rx_prod2_offset in vmxnet3_rq_rx_complete()
1610 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; in vmxnet3_rq_rx_complete()
1619 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, in vmxnet3_rq_rx_complete()
1621 while (rcd->gen == rq->comp_ring.gen) { in vmxnet3_rq_rx_complete()
1638 * rcd->gen is read. in vmxnet3_rq_rx_complete()
1642 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && in vmxnet3_rq_rx_complete()
1643 rcd->rqID != rq->dataRingQid); in vmxnet3_rq_rx_complete()
1644 idx = rcd->rxdIdx; in vmxnet3_rq_rx_complete()
1645 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1646 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1647 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, in vmxnet3_rq_rx_complete()
1649 rbi = rq->buf_info[ring_idx] + idx; in vmxnet3_rq_rx_complete()
1651 BUG_ON(rxd->addr != rbi->dma_addr || in vmxnet3_rq_rx_complete()
1652 rxd->len != rbi->len); in vmxnet3_rq_rx_complete()
1654 if (unlikely(rcd->eop && rcd->err)) { in vmxnet3_rq_rx_complete()
1659 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { in vmxnet3_rq_rx_complete()
1663 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) { in vmxnet3_rq_rx_complete()
1664 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1668 if (rbi->buf_type != VMXNET3_RX_BUF_XDP) in vmxnet3_rq_rx_complete()
1674 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1677 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1684 if (rcd->sop) { /* first buf of the pkt */ in vmxnet3_rq_rx_complete()
1688 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || in vmxnet3_rq_rx_complete()
1689 (rcd->rqID != rq->qid && in vmxnet3_rq_rx_complete()
1690 rcd->rqID != rq->dataRingQid)); in vmxnet3_rq_rx_complete()
1692 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB && in vmxnet3_rq_rx_complete()
1693 rbi->buf_type != VMXNET3_RX_BUF_XDP); in vmxnet3_rq_rx_complete()
1694 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); in vmxnet3_rq_rx_complete()
1696 if (unlikely(rcd->len == 0)) { in vmxnet3_rq_rx_complete()
1697 /* Pretend the rx buffer is skipped. */ in vmxnet3_rq_rx_complete()
1698 BUG_ON(!(rcd->sop && rcd->eop)); in vmxnet3_rq_rx_complete()
1699 netdev_dbg(adapter->netdev, in vmxnet3_rq_rx_complete()
1706 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1708 if (rq->rx_ts_desc_size != 0 && rcd->ext2) { in vmxnet3_rq_rx_complete()
1711 ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base + in vmxnet3_rq_rx_complete()
1712 idx * rq->rx_ts_desc_size); in vmxnet3_rq_rx_complete()
1713 ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); in vmxnet3_rq_rx_complete()
1714 ts_rxd->ts.tsi = 1; in vmxnet3_rq_rx_complete()
1718 VMXNET3_RX_DATA_RING(adapter, rcd->rqID); in vmxnet3_rq_rx_complete()
1719 len = rxDataRingUsed ? rcd->len : rbi->len; in vmxnet3_rq_rx_complete()
1726 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1728 &rq->data_ring.base[sz], in vmxnet3_rq_rx_complete()
1729 rcd->len, in vmxnet3_rq_rx_complete()
1732 ctx->skb = skb_xdp_pass; in vmxnet3_rq_rx_complete()
1739 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, in vmxnet3_rq_rx_complete()
1745 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1746 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1747 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1752 if (rxDataRingUsed && adapter->rxdataring_enabled) { in vmxnet3_rq_rx_complete()
1755 BUG_ON(rcd->len > rq->data_ring.desc_size); in vmxnet3_rq_rx_complete()
1757 ctx->skb = new_skb; in vmxnet3_rq_rx_complete()
1758 sz = rcd->rxdIdx * rq->data_ring.desc_size; in vmxnet3_rq_rx_complete()
1759 memcpy(new_skb->data, in vmxnet3_rq_rx_complete()
1760 &rq->data_ring.base[sz], rcd->len); in vmxnet3_rq_rx_complete()
1762 ctx->skb = rbi->skb; in vmxnet3_rq_rx_complete()
1765 dma_map_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1766 new_skb->data, rbi->len, in vmxnet3_rq_rx_complete()
1768 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1775 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1776 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1777 rq->stats.drop_total++; in vmxnet3_rq_rx_complete()
1782 dma_unmap_single(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1783 rbi->dma_addr, in vmxnet3_rq_rx_complete()
1784 rbi->len, in vmxnet3_rq_rx_complete()
1788 rbi->skb = new_skb; in vmxnet3_rq_rx_complete()
1789 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1790 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1791 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1794 skb_record_rx_queue(ctx->skb, rq->qid); in vmxnet3_rq_rx_complete()
1795 skb_put(ctx->skb, rcd->len); in vmxnet3_rq_rx_complete()
1798 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { in vmxnet3_rq_rx_complete()
1805 segCnt = rcdlro->segCnt; in vmxnet3_rq_rx_complete()
1807 mss = rcdlro->mss; in vmxnet3_rq_rx_complete()
1810 encap_lro = (le32_to_cpu(gdesc->dword[0]) & in vmxnet3_rq_rx_complete()
1816 BUG_ON(ctx->skb == NULL && !skip_page_frags); in vmxnet3_rq_rx_complete()
1819 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); in vmxnet3_rq_rx_complete()
1820 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); in vmxnet3_rq_rx_complete()
1823 * following non-sop fragments. They will be reused. in vmxnet3_rq_rx_complete()
1828 if (rcd->len) { in vmxnet3_rq_rx_complete()
1833 * processing all the following non-sop frags. in vmxnet3_rq_rx_complete()
1836 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1837 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1838 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1842 new_dma_addr = dma_map_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1846 if (dma_mapping_error(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1849 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_rx_complete()
1850 dev_kfree_skb(ctx->skb); in vmxnet3_rq_rx_complete()
1851 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1856 dma_unmap_page(&adapter->pdev->dev, in vmxnet3_rq_rx_complete()
1857 rbi->dma_addr, rbi->len, in vmxnet3_rq_rx_complete()
1860 vmxnet3_append_frag(ctx->skb, rcd, rbi); in vmxnet3_rq_rx_complete()
1863 rbi->page = new_page; in vmxnet3_rq_rx_complete()
1864 rbi->dma_addr = new_dma_addr; in vmxnet3_rq_rx_complete()
1865 rxd->addr = cpu_to_le64(rbi->dma_addr); in vmxnet3_rq_rx_complete()
1866 rxd->len = rbi->len; in vmxnet3_rq_rx_complete()
1872 skb = ctx->skb; in vmxnet3_rq_rx_complete()
1873 if (rcd->eop) { in vmxnet3_rq_rx_complete()
1874 u32 mtu = adapter->netdev->mtu; in vmxnet3_rq_rx_complete()
1875 skb->len += skb->data_len; in vmxnet3_rq_rx_complete()
1878 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && in vmxnet3_rq_rx_complete()
1879 (adapter->netdev->features & NETIF_F_RXHASH)) { in vmxnet3_rq_rx_complete()
1882 switch (rcd->rssType) { in vmxnet3_rq_rx_complete()
1898 le32_to_cpu(rcd->rssHash), in vmxnet3_rq_rx_complete()
1904 skb->protocol = eth_type_trans(skb, adapter->netdev); in vmxnet3_rq_rx_complete()
1905 if ((!rcd->tcp && !encap_lro) || in vmxnet3_rq_rx_complete()
1906 !(adapter->netdev->features & NETIF_F_LRO)) in vmxnet3_rq_rx_complete()
1910 skb_shinfo(skb)->gso_type = rcd->v4 ? in vmxnet3_rq_rx_complete()
1913 vmxnet3_lro_tunnel(skb, skb->protocol); in vmxnet3_rq_rx_complete()
1914 skb_shinfo(skb)->gso_size = mss; in vmxnet3_rq_rx_complete()
1915 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1916 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { in vmxnet3_rq_rx_complete()
1924 skb_shinfo(skb)->gso_type = in vmxnet3_rq_rx_complete()
1925 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; in vmxnet3_rq_rx_complete()
1927 skb_shinfo(skb)->gso_segs = segCnt; in vmxnet3_rq_rx_complete()
1928 skb_shinfo(skb)->gso_size = in vmxnet3_rq_rx_complete()
1929 DIV_ROUND_UP(skb->len - in vmxnet3_rq_rx_complete()
1932 skb_shinfo(skb)->gso_size = mtu - hlen; in vmxnet3_rq_rx_complete()
1936 if (unlikely(rcd->ts)) in vmxnet3_rq_rx_complete()
1937 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); in vmxnet3_rq_rx_complete()
1940 if ((adapter->netdev->features & NETIF_F_LRO) && in vmxnet3_rq_rx_complete()
1941 !rq->shared->updateRxProd) in vmxnet3_rq_rx_complete()
1944 napi_gro_receive(&rq->napi, skb); in vmxnet3_rq_rx_complete()
1946 ctx->skb = NULL; in vmxnet3_rq_rx_complete()
1952 /* device may have skipped some rx descs */ in vmxnet3_rq_rx_complete()
1953 ring = rq->rx_ring + ring_idx; in vmxnet3_rq_rx_complete()
1954 rbi->comp_state = VMXNET3_RXD_COMP_DONE; in vmxnet3_rq_rx_complete()
1957 fill_offset = (idx > ring->next2fill ? 0 : ring->size) + in vmxnet3_rq_rx_complete()
1958 idx - ring->next2fill - 1; in vmxnet3_rq_rx_complete()
1959 if (!ring->isOutOfOrder || fill_offset >= comp_offset) in vmxnet3_rq_rx_complete()
1960 ring->next2comp = idx; in vmxnet3_rq_rx_complete()
1963 /* Ensure that the writes to rxd->gen bits will be observed in vmxnet3_rq_rx_complete()
1969 rbi = rq->buf_info[ring_idx] + ring->next2fill; in vmxnet3_rq_rx_complete()
1970 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP))) in vmxnet3_rq_rx_complete()
1973 /* ring0 Type1 buffers can get skipped; re-fill them */ in vmxnet3_rq_rx_complete()
1974 if (rbi->buf_type != VMXNET3_RX_BUF_SKB) in vmxnet3_rq_rx_complete()
1977 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) { in vmxnet3_rq_rx_complete()
1979 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, in vmxnet3_rq_rx_complete()
1981 WARN_ON(!rxd->addr); in vmxnet3_rq_rx_complete()
1984 rxd->gen = ring->gen; in vmxnet3_rq_rx_complete()
1986 rbi->comp_state = VMXNET3_RXD_COMP_PENDING; in vmxnet3_rq_rx_complete()
1987 num_to_alloc--; in vmxnet3_rq_rx_complete()
1989 /* rx completion hasn't occurred */ in vmxnet3_rq_rx_complete()
1990 ring->isOutOfOrder = 1; in vmxnet3_rq_rx_complete()
1996 ring->isOutOfOrder = 0; in vmxnet3_rq_rx_complete()
2000 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) { in vmxnet3_rq_rx_complete()
2002 rxprod_reg[ring_idx] + rq->qid * 8, in vmxnet3_rq_rx_complete()
2003 ring->next2fill); in vmxnet3_rq_rx_complete()
2006 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); in vmxnet3_rq_rx_complete()
2008 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); in vmxnet3_rq_rx_complete()
2025 if (!rq->rx_ring[0].base) in vmxnet3_rq_cleanup()
2029 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { in vmxnet3_rq_cleanup()
2035 rbi = &rq->buf_info[ring_idx][i]; in vmxnet3_rq_cleanup()
2037 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); in vmxnet3_rq_cleanup()
2039 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
2040 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) { in vmxnet3_rq_cleanup()
2041 page_pool_recycle_direct(rq->page_pool, in vmxnet3_rq_cleanup()
2042 rbi->page); in vmxnet3_rq_cleanup()
2043 rbi->page = NULL; in vmxnet3_rq_cleanup()
2044 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && in vmxnet3_rq_cleanup()
2045 rbi->skb) { in vmxnet3_rq_cleanup()
2046 dma_unmap_single(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2047 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
2048 dev_kfree_skb(rbi->skb); in vmxnet3_rq_cleanup()
2049 rbi->skb = NULL; in vmxnet3_rq_cleanup()
2050 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && in vmxnet3_rq_cleanup()
2051 rbi->page) { in vmxnet3_rq_cleanup()
2052 dma_unmap_page(&adapter->pdev->dev, rxd->addr, in vmxnet3_rq_cleanup()
2053 rxd->len, DMA_FROM_DEVICE); in vmxnet3_rq_cleanup()
2054 put_page(rbi->page); in vmxnet3_rq_cleanup()
2055 rbi->page = NULL; in vmxnet3_rq_cleanup()
2059 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
2060 rq->rx_ring[ring_idx].next2fill = in vmxnet3_rq_cleanup()
2061 rq->rx_ring[ring_idx].next2comp = 0; in vmxnet3_rq_cleanup()
2064 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_cleanup()
2065 rq->comp_ring.next2proc = 0; in vmxnet3_rq_cleanup()
2067 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) in vmxnet3_rq_cleanup()
2068 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_cleanup()
2069 page_pool_destroy(rq->page_pool); in vmxnet3_rq_cleanup()
2070 rq->page_pool = NULL; in vmxnet3_rq_cleanup()
2079 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_cleanup_all()
2080 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); in vmxnet3_rq_cleanup_all()
2081 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL); in vmxnet3_rq_cleanup_all()
2091 /* all rx buffers must have already been freed */ in vmxnet3_rq_destroy()
2093 if (rq->buf_info[i]) { in vmxnet3_rq_destroy()
2094 for (j = 0; j < rq->rx_ring[i].size; j++) in vmxnet3_rq_destroy()
2095 BUG_ON(rq->buf_info[i][j].page != NULL); in vmxnet3_rq_destroy()
2101 if (rq->rx_ring[i].base) { in vmxnet3_rq_destroy()
2102 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2103 rq->rx_ring[i].size in vmxnet3_rq_destroy()
2105 rq->rx_ring[i].base, in vmxnet3_rq_destroy()
2106 rq->rx_ring[i].basePA); in vmxnet3_rq_destroy()
2107 rq->rx_ring[i].base = NULL; in vmxnet3_rq_destroy()
2111 if (rq->data_ring.base) { in vmxnet3_rq_destroy()
2112 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2113 rq->rx_ring[0].size * rq->data_ring.desc_size, in vmxnet3_rq_destroy()
2114 rq->data_ring.base, rq->data_ring.basePA); in vmxnet3_rq_destroy()
2115 rq->data_ring.base = NULL; in vmxnet3_rq_destroy()
2118 if (rq->ts_ring.base) { in vmxnet3_rq_destroy()
2119 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy()
2120 rq->rx_ring[0].size * rq->rx_ts_desc_size, in vmxnet3_rq_destroy()
2121 rq->ts_ring.base, rq->ts_ring.basePA); in vmxnet3_rq_destroy()
2122 rq->ts_ring.base = NULL; in vmxnet3_rq_destroy()
2125 if (rq->comp_ring.base) { in vmxnet3_rq_destroy()
2126 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size in vmxnet3_rq_destroy()
2128 rq->comp_ring.base, rq->comp_ring.basePA); in vmxnet3_rq_destroy()
2129 rq->comp_ring.base = NULL; in vmxnet3_rq_destroy()
2132 kfree(rq->buf_info[0]); in vmxnet3_rq_destroy()
2133 rq->buf_info[0] = NULL; in vmxnet3_rq_destroy()
2134 rq->buf_info[1] = NULL; in vmxnet3_rq_destroy()
2142 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_destroy_all_rxdataring()
2143 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_rq_destroy_all_rxdataring()
2145 if (rq->data_ring.base) { in vmxnet3_rq_destroy_all_rxdataring()
2146 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_rq_destroy_all_rxdataring()
2147 (rq->rx_ring[0].size * in vmxnet3_rq_destroy_all_rxdataring()
2148 rq->data_ring.desc_size), in vmxnet3_rq_destroy_all_rxdataring()
2149 rq->data_ring.base, in vmxnet3_rq_destroy_all_rxdataring()
2150 rq->data_ring.basePA); in vmxnet3_rq_destroy_all_rxdataring()
2151 rq->data_ring.base = NULL; in vmxnet3_rq_destroy_all_rxdataring()
2153 rq->data_ring.desc_size = 0; in vmxnet3_rq_destroy_all_rxdataring()
2164 for (i = 0; i < rq->rx_ring[0].size; i++) { in vmxnet3_rq_init()
2167 if (i % adapter->rx_buf_per_pkt == 0) { in vmxnet3_rq_init()
2168 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ? in vmxnet3_rq_init()
2171 rq->buf_info[0][i].len = adapter->skb_buf_size; in vmxnet3_rq_init()
2173 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2174 rq->buf_info[0][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2177 for (i = 0; i < rq->rx_ring[1].size; i++) { in vmxnet3_rq_init()
2178 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; in vmxnet3_rq_init()
2179 rq->buf_info[1][i].len = PAGE_SIZE; in vmxnet3_rq_init()
2184 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; in vmxnet3_rq_init()
2186 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * in vmxnet3_rq_init()
2188 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2189 rq->rx_ring[i].isOutOfOrder = 0; in vmxnet3_rq_init()
2193 rq->rx_ring[0].size + rq->rx_ring[1].size); in vmxnet3_rq_init()
2197 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, in vmxnet3_rq_init()
2199 xdp_rxq_info_unreg(&rq->xdp_rxq); in vmxnet3_rq_init()
2200 page_pool_destroy(rq->page_pool); in vmxnet3_rq_init()
2201 rq->page_pool = NULL; in vmxnet3_rq_init()
2203 /* at least has 1 rx buffer for the 1st ring */ in vmxnet3_rq_init()
2204 return -ENOMEM; in vmxnet3_rq_init()
2206 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); in vmxnet3_rq_init()
2208 if (rq->ts_ring.base) in vmxnet3_rq_init()
2209 memset(rq->ts_ring.base, 0, in vmxnet3_rq_init()
2210 rq->rx_ring[0].size * rq->rx_ts_desc_size); in vmxnet3_rq_init()
2213 rq->comp_ring.next2proc = 0; in vmxnet3_rq_init()
2214 memset(rq->comp_ring.base, 0, rq->comp_ring.size * in vmxnet3_rq_init()
2216 rq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_rq_init()
2219 rq->rx_ctx.skb = NULL; in vmxnet3_rq_init()
2231 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_init_all()
2232 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); in vmxnet3_rq_init_all()
2234 dev_err(&adapter->netdev->dev, "%s: failed to " in vmxnet3_rq_init_all()
2235 "initialize rx queue%i\n", in vmxnet3_rq_init_all()
2236 adapter->netdev->name, i); in vmxnet3_rq_init_all()
2254 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); in vmxnet3_rq_create()
2255 rq->rx_ring[i].base = dma_alloc_coherent( in vmxnet3_rq_create()
2256 &adapter->pdev->dev, sz, in vmxnet3_rq_create()
2257 &rq->rx_ring[i].basePA, in vmxnet3_rq_create()
2259 if (!rq->rx_ring[i].base) { in vmxnet3_rq_create()
2260 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2261 "failed to allocate rx ring %d\n", i); in vmxnet3_rq_create()
2266 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { in vmxnet3_rq_create()
2267 sz = rq->rx_ring[0].size * rq->data_ring.desc_size; in vmxnet3_rq_create()
2268 rq->data_ring.base = in vmxnet3_rq_create()
2269 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2270 &rq->data_ring.basePA, in vmxnet3_rq_create()
2272 if (!rq->data_ring.base) { in vmxnet3_rq_create()
2273 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2274 "rx data ring will be disabled\n"); in vmxnet3_rq_create()
2275 adapter->rxdataring_enabled = false; in vmxnet3_rq_create()
2278 rq->data_ring.base = NULL; in vmxnet3_rq_create()
2279 rq->data_ring.desc_size = 0; in vmxnet3_rq_create()
2282 if (rq->rx_ts_desc_size != 0) { in vmxnet3_rq_create()
2283 sz = rq->rx_ring[0].size * rq->rx_ts_desc_size; in vmxnet3_rq_create()
2284 rq->ts_ring.base = in vmxnet3_rq_create()
2285 dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2286 &rq->ts_ring.basePA, in vmxnet3_rq_create()
2288 if (!rq->ts_ring.base) { in vmxnet3_rq_create()
2289 netdev_err(adapter->netdev, in vmxnet3_rq_create()
2290 "rx ts ring will be disabled\n"); in vmxnet3_rq_create()
2291 rq->rx_ts_desc_size = 0; in vmxnet3_rq_create()
2294 rq->ts_ring.base = NULL; in vmxnet3_rq_create()
2297 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); in vmxnet3_rq_create()
2298 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_rq_create()
2299 &rq->comp_ring.basePA, in vmxnet3_rq_create()
2301 if (!rq->comp_ring.base) { in vmxnet3_rq_create()
2302 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); in vmxnet3_rq_create()
2306 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size, in vmxnet3_rq_create()
2307 sizeof(rq->buf_info[0][0]), GFP_KERNEL, in vmxnet3_rq_create()
2308 dev_to_node(&adapter->pdev->dev)); in vmxnet3_rq_create()
2312 rq->buf_info[0] = bi; in vmxnet3_rq_create()
2313 rq->buf_info[1] = bi + rq->rx_ring[0].size; in vmxnet3_rq_create()
2319 return -ENOMEM; in vmxnet3_rq_create()
2328 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_rq_create_all()
2330 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_rq_create_all()
2331 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); in vmxnet3_rq_create_all()
2333 dev_err(&adapter->netdev->dev, in vmxnet3_rq_create_all()
2334 "%s: failed to create rx queue%i\n", in vmxnet3_rq_create_all()
2335 adapter->netdev->name, i); in vmxnet3_rq_create_all()
2340 if (!adapter->rxdataring_enabled) in vmxnet3_rq_create_all()
2350 /* Multiple queue aware polling function for tx and rx */
2356 if (unlikely(adapter->shared->ecr)) in vmxnet3_do_poll()
2358 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll()
2359 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); in vmxnet3_do_poll()
2361 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_do_poll()
2362 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], in vmxnet3_do_poll()
2375 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); in vmxnet3_poll()
2379 vmxnet3_enable_all_intrs(rx_queue->adapter); in vmxnet3_poll()
2385 * NAPI polling function for MSI-X mode with multiple Rx queues
2386 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2394 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_poll_rx_only()
2400 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_poll_rx_only()
2402 &adapter->tx_queue[rq - adapter->rx_queue]; in vmxnet3_poll_rx_only()
2410 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_poll_rx_only()
2427 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
2429 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_tx()
2430 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2433 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_msix_tx()
2435 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx()
2436 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; in vmxnet3_msix_tx()
2442 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2449 * Handle completion interrupts on rx queues. Returns whether or not the
2457 struct vmxnet3_adapter *adapter = rq->adapter; in vmxnet3_msix_rx()
2460 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_rx()
2461 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); in vmxnet3_msix_rx()
2462 napi_schedule(&rq->napi); in vmxnet3_msix_rx()
2468 *----------------------------------------------------------------------------
2470 * vmxnet3_msix_event --
2477 *----------------------------------------------------------------------------
2487 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_msix_event()
2488 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2490 if (adapter->shared->ecr) in vmxnet3_msix_event()
2493 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); in vmxnet3_msix_event()
2508 if (adapter->intr.type == VMXNET3_IT_INTX) { in vmxnet3_intr()
2517 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) in vmxnet3_intr()
2520 napi_schedule(&adapter->rx_queue[0].napi); in vmxnet3_intr()
2533 switch (adapter->intr.type) { in vmxnet3_netpoll()
2537 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_netpoll()
2538 vmxnet3_msix_rx(0, &adapter->rx_queue[i]); in vmxnet3_netpoll()
2544 vmxnet3_intr(0, adapter->netdev); in vmxnet3_netpoll()
2554 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_request_irqs()
2559 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2560 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs()
2561 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_request_irqs()
2562 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", in vmxnet3_request_irqs()
2563 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2565 intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2567 adapter->tx_queue[i].name, in vmxnet3_request_irqs()
2568 &adapter->tx_queue[i]); in vmxnet3_request_irqs()
2570 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2571 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2574 dev_err(&adapter->netdev->dev, in vmxnet3_request_irqs()
2577 adapter->tx_queue[i].name, err); in vmxnet3_request_irqs()
2583 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { in vmxnet3_request_irqs()
2584 for (; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2585 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2590 adapter->tx_queue[i].comp_ring.intr_idx in vmxnet3_request_irqs()
2594 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2597 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2598 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) in vmxnet3_request_irqs()
2599 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", in vmxnet3_request_irqs()
2600 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2602 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", in vmxnet3_request_irqs()
2603 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2604 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2606 adapter->rx_queue[i].name, in vmxnet3_request_irqs()
2607 &(adapter->rx_queue[i])); in vmxnet3_request_irqs()
2609 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2612 adapter->rx_queue[i].name, err); in vmxnet3_request_irqs()
2616 adapter->rx_queue[i].comp_ring.intr_idx = vector++; in vmxnet3_request_irqs()
2619 sprintf(intr->event_msi_vector_name, "%s-event-%d", in vmxnet3_request_irqs()
2620 adapter->netdev->name, vector); in vmxnet3_request_irqs()
2621 err = request_irq(intr->msix_entries[vector].vector, in vmxnet3_request_irqs()
2623 intr->event_msi_vector_name, adapter->netdev); in vmxnet3_request_irqs()
2624 intr->event_intr_idx = vector; in vmxnet3_request_irqs()
2626 } else if (intr->type == VMXNET3_IT_MSI) { in vmxnet3_request_irqs()
2627 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2628 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, in vmxnet3_request_irqs()
2629 adapter->netdev->name, adapter->netdev); in vmxnet3_request_irqs()
2632 adapter->num_rx_queues = 1; in vmxnet3_request_irqs()
2633 err = request_irq(adapter->pdev->irq, vmxnet3_intr, in vmxnet3_request_irqs()
2634 IRQF_SHARED, adapter->netdev->name, in vmxnet3_request_irqs()
2635 adapter->netdev); in vmxnet3_request_irqs()
2639 intr->num_intrs = vector + 1; in vmxnet3_request_irqs()
2641 netdev_err(adapter->netdev, in vmxnet3_request_irqs()
2643 intr->type, err); in vmxnet3_request_irqs()
2645 /* Number of rx queues will not change after this */ in vmxnet3_request_irqs()
2646 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_request_irqs()
2647 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_request_irqs()
2648 rq->qid = i; in vmxnet3_request_irqs()
2649 rq->qid2 = i + adapter->num_rx_queues; in vmxnet3_request_irqs()
2650 rq->dataRingQid = i + 2 * adapter->num_rx_queues; in vmxnet3_request_irqs()
2654 for (i = 0; i < intr->num_intrs; i++) in vmxnet3_request_irqs()
2655 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; in vmxnet3_request_irqs()
2656 if (adapter->intr.type != VMXNET3_IT_MSIX) { in vmxnet3_request_irqs()
2657 adapter->intr.event_intr_idx = 0; in vmxnet3_request_irqs()
2658 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_request_irqs()
2659 adapter->tx_queue[i].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2660 adapter->rx_queue[0].comp_ring.intr_idx = 0; in vmxnet3_request_irqs()
2663 netdev_info(adapter->netdev, in vmxnet3_request_irqs()
2665 intr->type, intr->mask_mode, intr->num_intrs); in vmxnet3_request_irqs()
2675 struct vmxnet3_intr *intr = &adapter->intr; in vmxnet3_free_irqs()
2676 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); in vmxnet3_free_irqs()
2678 switch (intr->type) { in vmxnet3_free_irqs()
2684 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { in vmxnet3_free_irqs()
2685 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_free_irqs()
2686 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2687 &(adapter->tx_queue[i])); in vmxnet3_free_irqs()
2688 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) in vmxnet3_free_irqs()
2693 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_free_irqs()
2694 free_irq(intr->msix_entries[vector++].vector, in vmxnet3_free_irqs()
2695 &(adapter->rx_queue[i])); in vmxnet3_free_irqs()
2698 free_irq(intr->msix_entries[vector].vector, in vmxnet3_free_irqs()
2699 adapter->netdev); in vmxnet3_free_irqs()
2700 BUG_ON(vector >= intr->num_intrs); in vmxnet3_free_irqs()
2705 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2708 free_irq(adapter->pdev->irq, adapter->netdev); in vmxnet3_free_irqs()
2719 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_restore_vlan()
2725 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in vmxnet3_restore_vlan()
2735 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_add_vid()
2736 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_add_vid()
2740 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2743 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_add_vid()
2746 set_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_add_vid()
2757 if (!(netdev->flags & IFF_PROMISC)) { in vmxnet3_vlan_rx_kill_vid()
2758 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_vlan_rx_kill_vid()
2762 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2765 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_vlan_rx_kill_vid()
2768 clear_bit(vid, adapter->active_vlans); in vmxnet3_vlan_rx_kill_vid()
2789 memcpy(buf + i++ * ETH_ALEN, ha->addr, in vmxnet3_copy_mc()
2803 &adapter->shared->devRead.rxFilterConf; in vmxnet3_set_mc()
2809 if (netdev->flags & IFF_PROMISC) { in vmxnet3_set_mc()
2810 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; in vmxnet3_set_mc()
2818 if (netdev->flags & IFF_BROADCAST) in vmxnet3_set_mc()
2821 if (netdev->flags & IFF_ALLMULTI) in vmxnet3_set_mc()
2829 rxConf->mfTableLen = cpu_to_le16(sz); in vmxnet3_set_mc()
2831 &adapter->pdev->dev, in vmxnet3_set_mc()
2835 if (!dma_mapping_error(&adapter->pdev->dev, in vmxnet3_set_mc()
2839 rxConf->mfTablePA = cpu_to_le64( in vmxnet3_set_mc()
2851 rxConf->mfTableLen = 0; in vmxnet3_set_mc()
2852 rxConf->mfTablePA = 0; in vmxnet3_set_mc()
2855 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2856 if (new_mode != rxConf->rxMode) { in vmxnet3_set_mc()
2857 rxConf->rxMode = cpu_to_le32(new_mode); in vmxnet3_set_mc()
2866 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_set_mc()
2869 dma_unmap_single(&adapter->pdev->dev, new_table_pa, in vmxnet3_set_mc()
2870 rxConf->mfTableLen, DMA_TO_DEVICE); in vmxnet3_set_mc()
2879 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_rq_destroy_all()
2880 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); in vmxnet3_rq_destroy_all()
2891 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_setup_driver_shared() local
2892 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; in vmxnet3_setup_driver_shared()
2893 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; in vmxnet3_setup_driver_shared()
2900 memset(shared, 0, sizeof(*shared)); in vmxnet3_setup_driver_shared()
2903 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); in vmxnet3_setup_driver_shared()
2904 devRead->misc.driverInfo.version = cpu_to_le32( in vmxnet3_setup_driver_shared()
2906 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? in vmxnet3_setup_driver_shared()
2908 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; in vmxnet3_setup_driver_shared()
2909 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( in vmxnet3_setup_driver_shared()
2910 *((u32 *)&devRead->misc.driverInfo.gos)); in vmxnet3_setup_driver_shared()
2911 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2912 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); in vmxnet3_setup_driver_shared()
2914 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa); in vmxnet3_setup_driver_shared()
2915 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); in vmxnet3_setup_driver_shared()
2918 if (adapter->netdev->features & NETIF_F_RXCSUM) in vmxnet3_setup_driver_shared()
2919 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; in vmxnet3_setup_driver_shared()
2921 if (adapter->netdev->features & NETIF_F_LRO) { in vmxnet3_setup_driver_shared()
2922 devRead->misc.uptFeatures |= UPT1_F_LRO; in vmxnet3_setup_driver_shared()
2923 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); in vmxnet3_setup_driver_shared()
2925 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in vmxnet3_setup_driver_shared()
2926 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; in vmxnet3_setup_driver_shared()
2928 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_setup_driver_shared()
2930 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; in vmxnet3_setup_driver_shared()
2932 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); in vmxnet3_setup_driver_shared()
2933 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); in vmxnet3_setup_driver_shared()
2934 devRead->misc.queueDescLen = cpu_to_le32( in vmxnet3_setup_driver_shared()
2935 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + in vmxnet3_setup_driver_shared()
2936 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); in vmxnet3_setup_driver_shared()
2939 devRead->misc.numTxQueues = adapter->num_tx_queues; in vmxnet3_setup_driver_shared()
2940 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_setup_driver_shared()
2941 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared()
2942 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); in vmxnet3_setup_driver_shared()
2943 tqc = &adapter->tqd_start[i].conf; in vmxnet3_setup_driver_shared()
2944 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2945 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2946 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2947 tqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2948 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2949 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2950 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2951 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2952 tqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2953 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2955 tqtsc = &adapter->tqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2956 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA); in vmxnet3_setup_driver_shared()
2957 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size); in vmxnet3_setup_driver_shared()
2961 /* rx queue settings */ in vmxnet3_setup_driver_shared()
2962 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2963 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_setup_driver_shared()
2964 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_setup_driver_shared()
2965 rqc = &adapter->rqd_start[i].conf; in vmxnet3_setup_driver_shared()
2966 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); in vmxnet3_setup_driver_shared()
2967 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); in vmxnet3_setup_driver_shared()
2968 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2969 rqc->ddPA = cpu_to_le64(~0ULL); in vmxnet3_setup_driver_shared()
2970 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); in vmxnet3_setup_driver_shared()
2971 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); in vmxnet3_setup_driver_shared()
2972 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); in vmxnet3_setup_driver_shared()
2973 rqc->ddLen = cpu_to_le32(0); in vmxnet3_setup_driver_shared()
2974 rqc->intrIdx = rq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2976 rqc->rxDataRingBasePA = in vmxnet3_setup_driver_shared()
2977 cpu_to_le64(rq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2978 rqc->rxDataRingDescSize = in vmxnet3_setup_driver_shared()
2979 cpu_to_le16(rq->data_ring.desc_size); in vmxnet3_setup_driver_shared()
2982 rqtsc = &adapter->rqd_start[i].tsConf; in vmxnet3_setup_driver_shared()
2983 rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA); in vmxnet3_setup_driver_shared()
2984 rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size); in vmxnet3_setup_driver_shared()
2989 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); in vmxnet3_setup_driver_shared()
2991 if (adapter->rss) { in vmxnet3_setup_driver_shared()
2992 struct UPT1_RSSConf *rssConf = adapter->rss_conf; in vmxnet3_setup_driver_shared()
2994 devRead->misc.uptFeatures |= UPT1_F_RSS; in vmxnet3_setup_driver_shared()
2995 devRead->misc.numRxQueues = adapter->num_rx_queues; in vmxnet3_setup_driver_shared()
2996 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | in vmxnet3_setup_driver_shared()
3000 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; in vmxnet3_setup_driver_shared()
3001 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; in vmxnet3_setup_driver_shared()
3002 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; in vmxnet3_setup_driver_shared()
3003 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey)); in vmxnet3_setup_driver_shared()
3005 for (i = 0; i < rssConf->indTableSize; i++) in vmxnet3_setup_driver_shared()
3006 rssConf->indTable[i] = ethtool_rxfh_indir_default( in vmxnet3_setup_driver_shared()
3007 i, adapter->num_rx_queues); in vmxnet3_setup_driver_shared()
3009 devRead->rssConfDesc.confVer = 1; in vmxnet3_setup_driver_shared()
3010 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf)); in vmxnet3_setup_driver_shared()
3011 devRead->rssConfDesc.confPA = in vmxnet3_setup_driver_shared()
3012 cpu_to_le64(adapter->rss_conf_pa); in vmxnet3_setup_driver_shared()
3019 !adapter->queuesExtEnabled) { in vmxnet3_setup_driver_shared()
3020 devRead->intrConf.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
3022 devRead->intrConf.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
3023 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
3024 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
3026 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
3027 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
3029 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode == in vmxnet3_setup_driver_shared()
3031 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs; in vmxnet3_setup_driver_shared()
3032 for (i = 0; i < adapter->intr.num_intrs; i++) in vmxnet3_setup_driver_shared()
3033 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i]; in vmxnet3_setup_driver_shared()
3035 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx; in vmxnet3_setup_driver_shared()
3036 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); in vmxnet3_setup_driver_shared()
3039 /* rx filter settings */ in vmxnet3_setup_driver_shared()
3040 devRead->rxFilterConf.rxMode = 0; in vmxnet3_setup_driver_shared()
3042 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); in vmxnet3_setup_driver_shared()
3050 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_bufsize() local
3051 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_bufsize()
3057 cmdInfo->ringBufSize = adapter->ringBufSize; in vmxnet3_init_bufsize()
3058 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3061 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_bufsize()
3067 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_coalesce() local
3068 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_coalesce()
3074 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3075 cmdInfo->varConf.confVer = 1; in vmxnet3_init_coalesce()
3076 cmdInfo->varConf.confLen = in vmxnet3_init_coalesce()
3077 cpu_to_le32(sizeof(*adapter->coal_conf)); in vmxnet3_init_coalesce()
3078 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); in vmxnet3_init_coalesce()
3080 if (adapter->default_coal_mode) { in vmxnet3_init_coalesce()
3088 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_coalesce()
3094 struct Vmxnet3_DriverShared *shared = adapter->shared; in vmxnet3_init_rssfields() local
3095 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; in vmxnet3_init_rssfields()
3101 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3103 if (adapter->default_rss_fields) { in vmxnet3_init_rssfields()
3106 adapter->rss_fields = in vmxnet3_init_rssfields()
3110 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || in vmxnet3_init_rssfields()
3111 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && in vmxnet3_init_rssfields()
3112 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3114 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; in vmxnet3_init_rssfields()
3116 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); in vmxnet3_init_rssfields()
3119 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && in vmxnet3_init_rssfields()
3120 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3122 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; in vmxnet3_init_rssfields()
3124 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); in vmxnet3_init_rssfields()
3127 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && in vmxnet3_init_rssfields()
3128 vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_init_rssfields()
3130 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; in vmxnet3_init_rssfields()
3132 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); in vmxnet3_init_rssfields()
3135 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_init_rssfields()
3137 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_init_rssfields()
3139 cmdInfo->setRssFields = adapter->rss_fields; in vmxnet3_init_rssfields()
3147 adapter->rss_fields = in vmxnet3_init_rssfields()
3151 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_init_rssfields()
3161 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," in vmxnet3_activate_dev()
3162 " ring sizes %u %u %u\n", adapter->netdev->name, in vmxnet3_activate_dev()
3163 adapter->skb_buf_size, adapter->rx_buf_per_pkt, in vmxnet3_activate_dev()
3164 adapter->tx_queue[0].tx_ring.size, in vmxnet3_activate_dev()
3165 adapter->rx_queue[0].rx_ring[0].size, in vmxnet3_activate_dev()
3166 adapter->rx_queue[0].rx_ring[1].size); in vmxnet3_activate_dev()
3171 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3172 "Failed to init rx queue error %d\n", err); in vmxnet3_activate_dev()
3178 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3186 adapter->shared_pa)); in vmxnet3_activate_dev()
3188 adapter->shared_pa)); in vmxnet3_activate_dev()
3189 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3193 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_activate_dev()
3196 netdev_err(adapter->netdev, in vmxnet3_activate_dev()
3198 err = -EINVAL; in vmxnet3_activate_dev()
3206 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_activate_dev()
3208 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN, in vmxnet3_activate_dev()
3209 adapter->rx_queue[i].rx_ring[0].next2fill); in vmxnet3_activate_dev()
3210 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset + in vmxnet3_activate_dev()
3212 adapter->rx_queue[i].rx_ring[1].next2fill); in vmxnet3_activate_dev()
3215 /* Apply the rx filter settins last. */ in vmxnet3_activate_dev()
3216 vmxnet3_set_mc(adapter->netdev); in vmxnet3_activate_dev()
3223 netif_tx_wake_all_queues(adapter->netdev); in vmxnet3_activate_dev()
3224 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_activate_dev()
3225 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_activate_dev()
3227 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_activate_dev()
3246 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3248 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_reset_dev()
3257 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) in vmxnet3_quiesce_dev()
3261 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3264 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_quiesce_dev()
3267 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_quiesce_dev()
3268 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_quiesce_dev()
3269 netif_tx_disable(adapter->netdev); in vmxnet3_quiesce_dev()
3270 adapter->link_speed = 0; in vmxnet3_quiesce_dev()
3271 netif_carrier_off(adapter->netdev); in vmxnet3_quiesce_dev()
3299 dev_addr_set(netdev, addr->sa_data); in vmxnet3_set_mac_addr()
3300 vmxnet3_write_mac_addr(adapter, addr->sa_data); in vmxnet3_set_mac_addr()
3313 struct pci_dev *pdev = adapter->pdev; in vmxnet3_alloc_pci_resources()
3317 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err); in vmxnet3_alloc_pci_resources()
3321 err = pci_request_selected_regions(pdev, (1 << 2) - 1, in vmxnet3_alloc_pci_resources()
3324 dev_err(&pdev->dev, in vmxnet3_alloc_pci_resources()
3333 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3334 if (!adapter->hw_addr0) { in vmxnet3_alloc_pci_resources()
3335 dev_err(&pdev->dev, "Failed to map bar0\n"); in vmxnet3_alloc_pci_resources()
3336 err = -EIO; in vmxnet3_alloc_pci_resources()
3342 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); in vmxnet3_alloc_pci_resources()
3343 if (!adapter->hw_addr1) { in vmxnet3_alloc_pci_resources()
3344 dev_err(&pdev->dev, "Failed to map bar1\n"); in vmxnet3_alloc_pci_resources()
3345 err = -EIO; in vmxnet3_alloc_pci_resources()
3351 iounmap(adapter->hw_addr0); in vmxnet3_alloc_pci_resources()
3353 pci_release_selected_regions(pdev, (1 << 2) - 1); in vmxnet3_alloc_pci_resources()
3363 BUG_ON(!adapter->pdev); in vmxnet3_free_pci_resources()
3365 iounmap(adapter->hw_addr0); in vmxnet3_free_pci_resources()
3366 iounmap(adapter->hw_addr1); in vmxnet3_free_pci_resources()
3367 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); in vmxnet3_free_pci_resources()
3368 pci_disable_device(adapter->pdev); in vmxnet3_free_pci_resources()
3378 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - in vmxnet3_adjust_rx_ring_size()
3380 adapter->skb_buf_size = adapter->netdev->mtu + in vmxnet3_adjust_rx_ring_size()
3382 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) in vmxnet3_adjust_rx_ring_size()
3383 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3385 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3387 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; in vmxnet3_adjust_rx_ring_size()
3388 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + in vmxnet3_adjust_rx_ring_size()
3390 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; in vmxnet3_adjust_rx_ring_size()
3393 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE, in vmxnet3_adjust_rx_ring_size()
3395 adapter->rx_buf_per_pkt = 1; in vmxnet3_adjust_rx_ring_size()
3396 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size); in vmxnet3_adjust_rx_ring_size()
3397 adapter->ringBufSize.ring1BufSizeType1 = 0; in vmxnet3_adjust_rx_ring_size()
3398 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE); in vmxnet3_adjust_rx_ring_size()
3405 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; in vmxnet3_adjust_rx_ring_size()
3406 ring0_size = adapter->rx_queue[0].rx_ring[0].size; in vmxnet3_adjust_rx_ring_size()
3407 ring0_size = (ring0_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3410 ring1_size = adapter->rx_queue[0].rx_ring[1].size; in vmxnet3_adjust_rx_ring_size()
3411 ring1_size = (ring1_size + sz - 1) / sz * sz; in vmxnet3_adjust_rx_ring_size()
3421 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_adjust_rx_ring_size()
3422 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_adjust_rx_ring_size()
3424 rq->rx_ring[0].size = ring0_size; in vmxnet3_adjust_rx_ring_size()
3425 rq->rx_ring[1].size = ring1_size; in vmxnet3_adjust_rx_ring_size()
3426 rq->comp_ring.size = comp_size; in vmxnet3_adjust_rx_ring_size()
3438 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_create_queues()
3439 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues()
3440 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
3441 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
3442 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
3443 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
3444 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
3445 tq->stopped = true; in vmxnet3_create_queues()
3446 tq->adapter = adapter; in vmxnet3_create_queues()
3447 tq->qid = i; in vmxnet3_create_queues()
3448 tq->tx_ts_desc_size = adapter->tx_ts_desc_size; in vmxnet3_create_queues()
3449 tq->tsPktCount = 1; in vmxnet3_create_queues()
3459 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; in vmxnet3_create_queues()
3460 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; in vmxnet3_create_queues()
3463 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); in vmxnet3_create_queues()
3464 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_create_queues()
3465 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_create_queues()
3466 /* qid and qid2 for rx queues will be assigned later when num in vmxnet3_create_queues()
3467 * of rx queues is finalized after allocating intrs */ in vmxnet3_create_queues()
3468 rq->shared = &adapter->rqd_start[i].ctrl; in vmxnet3_create_queues()
3469 rq->adapter = adapter; in vmxnet3_create_queues()
3470 rq->data_ring.desc_size = rxdata_desc_size; in vmxnet3_create_queues()
3471 rq->rx_ts_desc_size = adapter->rx_ts_desc_size; in vmxnet3_create_queues()
3475 netdev_err(adapter->netdev, in vmxnet3_create_queues()
3476 "Could not allocate any rx queues. " in vmxnet3_create_queues()
3480 netdev_info(adapter->netdev, in vmxnet3_create_queues()
3481 "Number of rx queues changed " in vmxnet3_create_queues()
3483 adapter->num_rx_queues = i; in vmxnet3_create_queues()
3490 if (!adapter->rxdataring_enabled) in vmxnet3_create_queues()
3507 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_open()
3508 spin_lock_init(&adapter->tx_queue[i].tx_lock); in vmxnet3_open()
3515 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3519 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3525 adapter->txdata_desc_size = in vmxnet3_open()
3528 adapter->txdata_desc_size = txdata_desc_size; in vmxnet3_open()
3531 adapter->rxdata_desc_size = (ret >> 16) & 0xffff; in vmxnet3_open()
3533 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); in vmxnet3_open()
3542 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_open()
3546 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_open()
3557 adapter->tx_ts_desc_size = tx_ts_desc_size; in vmxnet3_open()
3558 adapter->rx_ts_desc_size = rx_ts_desc_size; in vmxnet3_open()
3560 adapter->tx_ts_desc_size = 0; in vmxnet3_open()
3561 adapter->rx_ts_desc_size = 0; in vmxnet3_open()
3565 adapter->tx_ring_size, in vmxnet3_open()
3566 adapter->rx_ring_size, in vmxnet3_open()
3567 adapter->rx_ring2_size, in vmxnet3_open()
3568 adapter->txdata_desc_size, in vmxnet3_open()
3569 adapter->rxdata_desc_size); in vmxnet3_open()
3596 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_close()
3604 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_close()
3620 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); in vmxnet3_force_close()
3623 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_force_close()
3624 napi_enable(&adapter->rx_queue[i].napi); in vmxnet3_force_close()
3629 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_force_close()
3630 dev_close(adapter->netdev); in vmxnet3_force_close()
3644 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_change_mtu()
3651 /* we need to re-create the rx queue based on the new mtu */ in vmxnet3_change_mtu()
3653 WRITE_ONCE(netdev->mtu, new_mtu); in vmxnet3_change_mtu()
3658 "failed to re-create rx queues, " in vmxnet3_change_mtu()
3666 "failed to re-activate, error %d. " in vmxnet3_change_mtu()
3671 WRITE_ONCE(netdev->mtu, new_mtu); in vmxnet3_change_mtu()
3675 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_change_mtu()
3686 struct net_device *netdev = adapter->netdev; in vmxnet3_declare_features()
3690 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3693 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3694 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3697 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3703 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in vmxnet3_declare_features()
3706 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | in vmxnet3_declare_features()
3713 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) { in vmxnet3_declare_features()
3714 netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); in vmxnet3_declare_features()
3715 netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); in vmxnet3_declare_features()
3718 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) { in vmxnet3_declare_features()
3719 netdev->hw_features &= ~(NETIF_F_LRO); in vmxnet3_declare_features()
3720 netdev->hw_enc_features &= ~(NETIF_F_LRO); in vmxnet3_declare_features()
3726 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3728 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3730 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3732 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3734 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3736 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; in vmxnet3_declare_features()
3738 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3740 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; in vmxnet3_declare_features()
3742 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3744 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3746 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], in vmxnet3_declare_features()
3748 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; in vmxnet3_declare_features()
3751 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_declare_features()
3752 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3754 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_declare_features()
3755 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_declare_features()
3757 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3758 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3759 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && in vmxnet3_declare_features()
3760 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { in vmxnet3_declare_features()
3761 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3762 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; in vmxnet3_declare_features()
3764 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && in vmxnet3_declare_features()
3765 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { in vmxnet3_declare_features()
3766 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3767 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; in vmxnet3_declare_features()
3771 netdev->vlan_features = netdev->hw_features & in vmxnet3_declare_features()
3774 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in vmxnet3_declare_features()
3805 int ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3806 adapter->intr.msix_entries, nvec, nvec); in vmxnet3_acquire_msix_vectors()
3808 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { in vmxnet3_acquire_msix_vectors()
3809 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3810 "Failed to enable %d MSI-X, trying %d\n", in vmxnet3_acquire_msix_vectors()
3813 ret = pci_enable_msix_range(adapter->pdev, in vmxnet3_acquire_msix_vectors()
3814 adapter->intr.msix_entries, in vmxnet3_acquire_msix_vectors()
3820 dev_err(&adapter->netdev->dev, in vmxnet3_acquire_msix_vectors()
3821 "Failed to enable MSI-X, error: %d\n", ret); in vmxnet3_acquire_msix_vectors()
3837 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3841 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_alloc_intr_resources()
3842 adapter->intr.type = cfg & 0x3; in vmxnet3_alloc_intr_resources()
3843 adapter->intr.mask_mode = (cfg >> 2) & 0x3; in vmxnet3_alloc_intr_resources()
3845 if (adapter->intr.type == VMXNET3_IT_AUTO) { in vmxnet3_alloc_intr_resources()
3846 adapter->intr.type = VMXNET3_IT_MSIX; in vmxnet3_alloc_intr_resources()
3850 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_alloc_intr_resources()
3853 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? in vmxnet3_alloc_intr_resources()
3854 1 : adapter->num_tx_queues; in vmxnet3_alloc_intr_resources()
3855 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? in vmxnet3_alloc_intr_resources()
3856 0 : adapter->num_rx_queues; in vmxnet3_alloc_intr_resources()
3862 adapter->intr.msix_entries[i].entry = i; in vmxnet3_alloc_intr_resources()
3869 * then limit the number of rx queues to 1 in vmxnet3_alloc_intr_resources()
3873 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE in vmxnet3_alloc_intr_resources()
3874 || adapter->num_rx_queues != 1) { in vmxnet3_alloc_intr_resources()
3875 adapter->share_intr = VMXNET3_INTR_TXSHARE; in vmxnet3_alloc_intr_resources()
3876 netdev_err(adapter->netdev, in vmxnet3_alloc_intr_resources()
3877 "Number of rx queues : 1\n"); in vmxnet3_alloc_intr_resources()
3878 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3882 adapter->intr.num_intrs = nvec_allocated; in vmxnet3_alloc_intr_resources()
3886 /* If we cannot allocate MSIx vectors use only one rx queue */ in vmxnet3_alloc_intr_resources()
3887 dev_info(&adapter->pdev->dev, in vmxnet3_alloc_intr_resources()
3888 "Failed to enable MSI-X, error %d. " in vmxnet3_alloc_intr_resources()
3889 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); in vmxnet3_alloc_intr_resources()
3891 adapter->intr.type = VMXNET3_IT_MSI; in vmxnet3_alloc_intr_resources()
3894 if (adapter->intr.type == VMXNET3_IT_MSI) { in vmxnet3_alloc_intr_resources()
3895 if (!pci_enable_msi(adapter->pdev)) { in vmxnet3_alloc_intr_resources()
3896 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3897 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3903 adapter->num_rx_queues = 1; in vmxnet3_alloc_intr_resources()
3904 dev_info(&adapter->netdev->dev, in vmxnet3_alloc_intr_resources()
3905 "Using INTx interrupt, #Rx queues: 1.\n"); in vmxnet3_alloc_intr_resources()
3906 adapter->intr.type = VMXNET3_IT_INTX; in vmxnet3_alloc_intr_resources()
3908 /* INT-X related setting */ in vmxnet3_alloc_intr_resources()
3909 adapter->intr.num_intrs = 1; in vmxnet3_alloc_intr_resources()
3916 if (adapter->intr.type == VMXNET3_IT_MSIX) in vmxnet3_free_intr_resources()
3917 pci_disable_msix(adapter->pdev); in vmxnet3_free_intr_resources()
3918 else if (adapter->intr.type == VMXNET3_IT_MSI) in vmxnet3_free_intr_resources()
3919 pci_disable_msi(adapter->pdev); in vmxnet3_free_intr_resources()
3921 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); in vmxnet3_free_intr_resources()
3929 adapter->tx_timeout_count++; in vmxnet3_tx_timeout()
3931 netdev_err(adapter->netdev, "tx hang\n"); in vmxnet3_tx_timeout()
3932 schedule_work(&adapter->work); in vmxnet3_tx_timeout()
3944 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_reset_work()
3949 if (netif_running(adapter->netdev)) { in vmxnet3_reset_work()
3950 netdev_notice(adapter->netdev, "resetting\n"); in vmxnet3_reset_work()
3955 netdev_info(adapter->netdev, "already closed\n"); in vmxnet3_reset_work()
3959 netif_wake_queue(adapter->netdev); in vmxnet3_reset_work()
3960 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_reset_work()
4019 return -ENOMEM; in vmxnet3_probe_device()
4023 adapter->netdev = netdev; in vmxnet3_probe_device()
4024 adapter->pdev = pdev; in vmxnet3_probe_device()
4026 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; in vmxnet3_probe_device()
4027 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; in vmxnet3_probe_device()
4028 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; in vmxnet3_probe_device()
4030 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in vmxnet3_probe_device()
4032 dev_err(&pdev->dev, "dma_set_mask failed\n"); in vmxnet3_probe_device()
4036 spin_lock_init(&adapter->cmd_lock); in vmxnet3_probe_device()
4037 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, in vmxnet3_probe_device()
4040 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { in vmxnet3_probe_device()
4041 dev_err(&pdev->dev, "Failed to map dma\n"); in vmxnet3_probe_device()
4042 err = -EFAULT; in vmxnet3_probe_device()
4045 adapter->shared = dma_alloc_coherent( in vmxnet3_probe_device()
4046 &adapter->pdev->dev, in vmxnet3_probe_device()
4048 &adapter->shared_pa, GFP_KERNEL); in vmxnet3_probe_device()
4049 if (!adapter->shared) { in vmxnet3_probe_device()
4050 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
4051 err = -ENOMEM; in vmxnet3_probe_device()
4060 for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) { in vmxnet3_probe_device()
4063 adapter->version = i + 1; in vmxnet3_probe_device()
4068 dev_err(&pdev->dev, in vmxnet3_probe_device()
4070 err = -EBUSY; in vmxnet3_probe_device()
4073 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); in vmxnet3_probe_device()
4079 dev_err(&pdev->dev, in vmxnet3_probe_device()
4081 err = -EBUSY; in vmxnet3_probe_device()
4086 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR); in vmxnet3_probe_device()
4087 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR); in vmxnet3_probe_device()
4088 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4089 adapter->dev_caps[0] = adapter->devcap_supported[0] & in vmxnet3_probe_device()
4092 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) && in vmxnet3_probe_device()
4093 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) && in vmxnet3_probe_device()
4094 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) { in vmxnet3_probe_device()
4095 adapter->dev_caps[0] |= adapter->devcap_supported[0] & in vmxnet3_probe_device()
4098 if (adapter->dev_caps[0]) in vmxnet3_probe_device()
4099 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); in vmxnet3_probe_device()
4101 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4103 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); in vmxnet3_probe_device()
4104 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4108 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) { in vmxnet3_probe_device()
4109 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD; in vmxnet3_probe_device()
4110 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD; in vmxnet3_probe_device()
4111 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2; in vmxnet3_probe_device()
4113 adapter->tx_prod_offset = VMXNET3_REG_TXPROD; in vmxnet3_probe_device()
4114 adapter->rx_prod_offset = VMXNET3_REG_RXPROD; in vmxnet3_probe_device()
4115 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2; in vmxnet3_probe_device()
4119 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4123 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_probe_device()
4125 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff)); in vmxnet3_probe_device()
4126 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff)); in vmxnet3_probe_device()
4128 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4130 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4133 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES || in vmxnet3_probe_device()
4134 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) { in vmxnet3_probe_device()
4135 adapter->queuesExtEnabled = true; in vmxnet3_probe_device()
4137 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4140 adapter->queuesExtEnabled = false; in vmxnet3_probe_device()
4143 adapter->num_rx_queues = min(num_rx_queues, in vmxnet3_probe_device()
4145 adapter->num_tx_queues = min(num_tx_queues, in vmxnet3_probe_device()
4148 dev_info(&pdev->dev, in vmxnet3_probe_device()
4149 "# of Tx queues : %d, # of Rx queues : %d\n", in vmxnet3_probe_device()
4150 adapter->num_tx_queues, adapter->num_rx_queues); in vmxnet3_probe_device()
4152 adapter->rx_buf_per_pkt = 1; in vmxnet3_probe_device()
4154 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_probe_device()
4155 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; in vmxnet3_probe_device()
4156 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, in vmxnet3_probe_device()
4157 &adapter->queue_desc_pa, in vmxnet3_probe_device()
4160 if (!adapter->tqd_start) { in vmxnet3_probe_device()
4161 dev_err(&pdev->dev, "Failed to allocate memory\n"); in vmxnet3_probe_device()
4162 err = -ENOMEM; in vmxnet3_probe_device()
4165 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + in vmxnet3_probe_device()
4166 adapter->num_tx_queues); in vmxnet3_probe_device()
4168 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf; in vmxnet3_probe_device()
4170 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4172 &adapter->pm_conf_pa, in vmxnet3_probe_device()
4174 if (adapter->pm_conf == NULL) { in vmxnet3_probe_device()
4175 err = -ENOMEM; in vmxnet3_probe_device()
4181 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4183 &adapter->rss_conf_pa, in vmxnet3_probe_device()
4185 if (adapter->rss_conf == NULL) { in vmxnet3_probe_device()
4186 err = -ENOMEM; in vmxnet3_probe_device()
4192 adapter->coal_conf = in vmxnet3_probe_device()
4193 dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4196 &adapter->coal_conf_pa, in vmxnet3_probe_device()
4198 if (!adapter->coal_conf) { in vmxnet3_probe_device()
4199 err = -ENOMEM; in vmxnet3_probe_device()
4202 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; in vmxnet3_probe_device()
4203 adapter->default_coal_mode = true; in vmxnet3_probe_device()
4207 adapter->default_rss_fields = true; in vmxnet3_probe_device()
4208 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; in vmxnet3_probe_device()
4211 SET_NETDEV_DEV(netdev, &pdev->dev); in vmxnet3_probe_device()
4213 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in vmxnet3_probe_device()
4216 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? in vmxnet3_probe_device()
4219 if (adapter->num_tx_queues == adapter->num_rx_queues) in vmxnet3_probe_device()
4220 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; in vmxnet3_probe_device()
4222 adapter->share_intr = VMXNET3_INTR_DONTSHARE; in vmxnet3_probe_device()
4227 if (adapter->num_rx_queues > 1 && in vmxnet3_probe_device()
4228 adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4229 adapter->rss = true; in vmxnet3_probe_device()
4230 netdev->hw_features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4231 netdev->features |= NETIF_F_RXHASH; in vmxnet3_probe_device()
4232 dev_dbg(&pdev->dev, "RSS is enabled.\n"); in vmxnet3_probe_device()
4234 adapter->rss = false; in vmxnet3_probe_device()
4241 netdev->netdev_ops = &vmxnet3_netdev_ops; in vmxnet3_probe_device()
4243 netdev->watchdog_timeo = 5 * HZ; in vmxnet3_probe_device()
4245 /* MTU range: 60 - 9190 */ in vmxnet3_probe_device()
4246 netdev->min_mtu = VMXNET3_MIN_MTU; in vmxnet3_probe_device()
4248 netdev->max_mtu = VMXNET3_V6_MAX_MTU; in vmxnet3_probe_device()
4250 netdev->max_mtu = VMXNET3_MAX_MTU; in vmxnet3_probe_device()
4252 INIT_WORK(&adapter->work, vmxnet3_reset_work); in vmxnet3_probe_device()
4253 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); in vmxnet3_probe_device()
4255 if (adapter->intr.type == VMXNET3_IT_MSIX) { in vmxnet3_probe_device()
4257 for (i = 0; i < adapter->num_rx_queues; i++) { in vmxnet3_probe_device()
4258 netif_napi_add(adapter->netdev, in vmxnet3_probe_device()
4259 &adapter->rx_queue[i].napi, in vmxnet3_probe_device()
4263 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, in vmxnet3_probe_device()
4267 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); in vmxnet3_probe_device()
4268 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); in vmxnet3_probe_device()
4274 dev_err(&pdev->dev, "Failed to register adapter\n"); in vmxnet3_probe_device()
4283 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4285 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_probe_device()
4290 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_probe_device()
4291 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_probe_device()
4294 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_probe_device()
4295 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_probe_device()
4297 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_probe_device()
4298 adapter->queue_desc_pa); in vmxnet3_probe_device()
4302 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_probe_device()
4304 adapter->shared, adapter->shared_pa); in vmxnet3_probe_device()
4306 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_probe_device()
4334 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4338 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_remove_device()
4349 cancel_work_sync(&adapter->work); in vmxnet3_remove_device()
4356 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4358 adapter->coal_conf, adapter->coal_conf_pa); in vmxnet3_remove_device()
4361 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), in vmxnet3_remove_device()
4362 adapter->rss_conf, adapter->rss_conf_pa); in vmxnet3_remove_device()
4364 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), in vmxnet3_remove_device()
4365 adapter->pm_conf, adapter->pm_conf_pa); in vmxnet3_remove_device()
4367 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; in vmxnet3_remove_device()
4369 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, in vmxnet3_remove_device()
4370 adapter->queue_desc_pa); in vmxnet3_remove_device()
4371 dma_free_coherent(&adapter->pdev->dev, in vmxnet3_remove_device()
4373 adapter->shared, adapter->shared_pa); in vmxnet3_remove_device()
4374 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, in vmxnet3_remove_device()
4388 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) in vmxnet3_shutdown_device()
4392 &adapter->state)) { in vmxnet3_shutdown_device()
4393 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4396 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4399 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_shutdown_device()
4402 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); in vmxnet3_shutdown_device()
4426 for (i = 0; i < adapter->num_rx_queues; i++) in vmxnet3_suspend()
4427 napi_disable(&adapter->rx_queue[i].napi); in vmxnet3_suspend()
4435 /* Create wake-up filters. */ in vmxnet3_suspend()
4436 pmConf = adapter->pm_conf; in vmxnet3_suspend()
4439 if (adapter->wol & WAKE_UCAST) { in vmxnet3_suspend()
4440 pmConf->filters[i].patternSize = ETH_ALEN; in vmxnet3_suspend()
4441 pmConf->filters[i].maskSize = 1; in vmxnet3_suspend()
4442 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); in vmxnet3_suspend()
4443 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ in vmxnet3_suspend()
4445 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4449 if (adapter->wol & WAKE_ARP) { in vmxnet3_suspend()
4458 ifa = rcu_dereference(in_dev->ifa_list); in vmxnet3_suspend()
4464 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ in vmxnet3_suspend()
4468 pmConf->filters[i].maskSize = in vmxnet3_suspend()
4469 (pmConf->filters[i].patternSize - 1) / 8 + 1; in vmxnet3_suspend()
4472 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; in vmxnet3_suspend()
4473 ehdr->h_proto = htons(ETH_P_ARP); in vmxnet3_suspend()
4476 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; in vmxnet3_suspend()
4477 ahdr->ar_op = htons(ARPOP_REQUEST); in vmxnet3_suspend()
4482 *(__be32 *)arpreq = ifa->ifa_address; in vmxnet3_suspend()
4487 pmConf->filters[i].mask[0] = 0x00; in vmxnet3_suspend()
4488 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ in vmxnet3_suspend()
4489 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ in vmxnet3_suspend()
4490 pmConf->filters[i].mask[3] = 0x00; in vmxnet3_suspend()
4491 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ in vmxnet3_suspend()
4492 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ in vmxnet3_suspend()
4494 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; in vmxnet3_suspend()
4499 if (adapter->wol & WAKE_MAGIC) in vmxnet3_suspend()
4500 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; in vmxnet3_suspend()
4502 pmConf->numFilters = i; in vmxnet3_suspend()
4504 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); in vmxnet3_suspend()
4505 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( in vmxnet3_suspend()
4507 adapter->shared->devRead.pmConfDesc.confPA = in vmxnet3_suspend()
4508 cpu_to_le64(adapter->pm_conf_pa); in vmxnet3_suspend()
4510 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4513 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_suspend()
4517 adapter->wol); in vmxnet3_suspend()
4554 spin_lock_irqsave(&adapter->cmd_lock, flags); in vmxnet3_resume()
4557 spin_unlock_irqrestore(&adapter->cmd_lock, flags); in vmxnet3_resume()
4565 "failed to re-activate on resume, error: %d", err); in vmxnet3_resume()
4597 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC, in vmxnet3_init_module()