Lines Matching +full:tx +full:- +full:csum +full:- +full:limit

1 // SPDX-License-Identifier: GPL-2.0-or-later
33 static bool csum = true, gso = true, napi_tx = true; variable
34 module_param(csum, bool, 0444);
41 #define VIRTIO_O2F_DELTA (VIRTIO_FEATURES_MAP_MIN - \
67 * at once, the weight is chosen so that the EWMA will be insensitive to short-
134 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
135 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
180 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
183 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
186 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
211 VIRTNET_STATS_DESC_RX(csum, needs_csum),
252 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
253 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
254 VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
258 VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
259 VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
306 /* TX: fragments + linear part + virtio header */
542 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; in virtnet_rss_hdr_size()
544 return struct_size(vi->rss_hdr, indirection_table, indir_table_size); in virtnet_rss_hdr_size()
549 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); in virtnet_rss_trailer_size()
574 return virtqueue_add_outbuf(sq->vq, sq->sg, num, in virtnet_add_outbuf()
598 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit()
604 stats->napi_packets++; in __free_old_xmit()
605 stats->napi_bytes += skb->len; in __free_old_xmit()
612 stats->packets++; in __free_old_xmit()
613 stats->bytes += skb->len; in __free_old_xmit()
620 stats->packets++; in __free_old_xmit()
621 stats->bytes += xdp_get_frame_len(frame); in __free_old_xmit()
626 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr); in __free_old_xmit()
627 stats->xsk++; in __free_old_xmit()
631 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); in __free_old_xmit()
641 if (stats->xsk) in virtnet_free_old_xmit()
642 virtnet_xsk_completed(sq, stats->xsk); in virtnet_free_old_xmit()
645 /* Converting between virtqueue no. and kernel tx/rx queue no.
650 return (vq->index - 1) / 2; in vq2txq()
660 return vq->index / 2; in vq2rxq()
670 if (qid == vi->max_queue_pairs * 2) in vq_type()
682 return (struct virtio_net_common_hdr *)skb->cb; in skb_vnet_common_hdr()
693 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
694 for (end = page; end->private; end = (struct page *)end->private); in give_pages()
695 end->private = (unsigned long)rq->pages; in give_pages()
696 rq->pages = page; in give_pages()
701 struct page *p = rq->pages; in get_a_page()
704 rq->pages = (struct page *)p->private; in get_a_page()
706 p->private = 0; in get_a_page()
715 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
717 else if (vi->big_packets) in virtnet_rq_free_buf()
725 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
726 vi->refill_enabled = true; in enable_delayed_refill()
727 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
732 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
733 vi->refill_enabled = false; in disable_delayed_refill()
734 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
740 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
747 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
780 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
781 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
786 if (napi->weight) in skb_xmit_done()
790 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
807 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); in mergeable_ctx_to_truesize()
820 if (len > truesize - room) { in check_mergeable_len()
822 dev->name, len, (unsigned long)(truesize - room)); in check_mergeable_len()
824 return -1; in check_mergeable_len()
863 hdr_len = vi->hdr_len; in page_to_skb()
864 if (vi->mergeable_rx_bufs) in page_to_skb()
869 buf = p - headroom; in page_to_skb()
870 len -= hdr_len; in page_to_skb()
873 tailroom = truesize - headroom - hdr_padded_len - len; in page_to_skb()
878 skb = virtnet_build_skb(buf, truesize, p - buf, len); in page_to_skb()
882 page = (struct page *)page->private; in page_to_skb()
889 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
893 /* Copy all frame if it fits skb->head, otherwise in page_to_skb()
902 len -= copy; in page_to_skb()
905 if (vi->mergeable_rx_bufs) { in page_to_skb()
915 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); in page_to_skb()
916 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, in page_to_skb()
918 len -= frag_size; in page_to_skb()
919 page = (struct page *)page->private; in page_to_skb()
937 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap()
943 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
949 --dma->ref; in virtnet_rq_unmap()
951 if (dma->need_sync && len) { in virtnet_rq_unmap()
952 offset = buf - (head + sizeof(*dma)); in virtnet_rq_unmap()
954 virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr, in virtnet_rq_unmap()
959 if (dma->ref) in virtnet_rq_unmap()
962 virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len, in virtnet_rq_unmap()
969 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf()
972 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
974 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); in virtnet_rq_get_buf()
983 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg()
989 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
991 head = page_address(rq->alloc_frag.page); in virtnet_rq_init_one_sg()
993 offset = buf - head; in virtnet_rq_init_one_sg()
997 addr = dma->addr - sizeof(*dma) + offset; in virtnet_rq_init_one_sg()
999 sg_init_table(rq->sg, 1); in virtnet_rq_init_one_sg()
1000 sg_fill_dma(rq->sg, addr, len); in virtnet_rq_init_one_sg()
1005 struct page_frag *alloc_frag = &rq->alloc_frag; in virtnet_rq_alloc()
1006 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc()
1011 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
1013 head = page_address(alloc_frag->page); in virtnet_rq_alloc()
1018 if (!alloc_frag->offset) { in virtnet_rq_alloc()
1019 if (rq->last_dma) { in virtnet_rq_alloc()
1024 virtnet_rq_unmap(rq, rq->last_dma, 0); in virtnet_rq_alloc()
1025 rq->last_dma = NULL; in virtnet_rq_alloc()
1028 dma->len = alloc_frag->size - sizeof(*dma); in virtnet_rq_alloc()
1030 addr = virtqueue_map_single_attrs(rq->vq, dma + 1, in virtnet_rq_alloc()
1031 dma->len, DMA_FROM_DEVICE, 0); in virtnet_rq_alloc()
1032 if (virtqueue_map_mapping_error(rq->vq, addr)) in virtnet_rq_alloc()
1035 dma->addr = addr; in virtnet_rq_alloc()
1036 dma->need_sync = virtqueue_map_need_sync(rq->vq, addr); in virtnet_rq_alloc()
1042 get_page(alloc_frag->page); in virtnet_rq_alloc()
1043 dma->ref = 1; in virtnet_rq_alloc()
1044 alloc_frag->offset = sizeof(*dma); in virtnet_rq_alloc()
1046 rq->last_dma = dma; in virtnet_rq_alloc()
1049 ++dma->ref; in virtnet_rq_alloc()
1051 buf = head + alloc_frag->offset; in virtnet_rq_alloc()
1053 get_page(alloc_frag->page); in virtnet_rq_alloc()
1054 alloc_frag->offset += size; in virtnet_rq_alloc()
1061 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf()
1065 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1067 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1072 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1091 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit()
1092 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); in free_old_xmit()
1093 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); in free_old_xmit()
1094 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit()
1099 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1101 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1113 qnum = sq - vi->sq; in tx_may_stop()
1120 * maintaining the TX queue stop/start state properly, and causes in tx_may_stop()
1121 * the stack to do a non-trivial amount of useless work. in tx_may_stop()
1125 if (sq->vq->num_free < MAX_SKB_FRAGS + 2) { in tx_may_stop()
1129 u64_stats_update_begin(&sq->stats.syncp); in tx_may_stop()
1130 u64_stats_inc(&sq->stats.stop); in tx_may_stop()
1131 u64_stats_update_end(&sq->stats.syncp); in tx_may_stop()
1143 bool use_napi = sq->napi.weight; in check_sq_full_and_disable()
1146 qnum = sq - vi->sq; in check_sq_full_and_disable()
1152 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in check_sq_full_and_disable()
1153 virtqueue_napi_schedule(&sq->napi, sq->vq); in check_sq_full_and_disable()
1154 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in check_sq_full_and_disable()
1157 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) { in check_sq_full_and_disable()
1159 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1160 u64_stats_inc(&sq->stats.wake); in check_sq_full_and_disable()
1161 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1162 virtqueue_disable_cb(sq->vq); in check_sq_full_and_disable()
1180 * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len in buf_to_xdp()
1186 * xsk_pool_get_rx_frame_size() + vi->hdr_len in buf_to_xdp()
1188 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); in buf_to_xdp()
1190 bufsize += vi->hdr_len; in buf_to_xdp()
1194 vi->dev->name, len, bufsize); in buf_to_xdp()
1195 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1203 xdp_prepare_buff(xdp, xdp->data_hard_start, in buf_to_xdp()
1204 XDP_PACKET_HEADROOM - vi->hdr_len, len, 1); in buf_to_xdp()
1205 xdp->flags = 0; in buf_to_xdp()
1216 unsigned int metasize = xdp->data - xdp->data_meta; in xsk_construct_skb()
1220 size = xdp->data_end - xdp->data_hard_start; in xsk_construct_skb()
1221 skb = napi_alloc_skb(&rq->napi, size); in xsk_construct_skb()
1227 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); in xsk_construct_skb()
1229 size = xdp->data_end - xdp->data_meta; in xsk_construct_skb()
1230 memcpy(__skb_put(skb, size), xdp->data_meta, size); in xsk_construct_skb()
1252 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_small()
1268 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_small()
1281 while (num_buf-- > 1) { in xsk_drop_follow_bufs()
1282 xdp = virtqueue_get_buf(rq->vq, &len); in xsk_drop_follow_bufs()
1285 dev->name, num_buf); in xsk_drop_follow_bufs()
1289 u64_stats_add(&stats->bytes, len); in xsk_drop_follow_bufs()
1309 while (--num_buf) { in xsk_append_merge_buffer()
1310 buf = virtqueue_get_buf(rq->vq, &len); in xsk_append_merge_buffer()
1313 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1314 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1315 hdr->num_buffers)); in xsk_append_merge_buffer()
1316 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1317 return -EINVAL; in xsk_append_merge_buffer()
1320 u64_stats_add(&stats->bytes, len); in xsk_append_merge_buffer()
1332 memcpy(buf, xdp->data, len); in xsk_append_merge_buffer()
1351 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1352 return -EINVAL; in xsk_append_merge_buffer()
1365 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1366 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1370 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_merge()
1407 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_merge()
1416 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1421 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1423 u64_stats_add(&stats->bytes, len); in virtnet_receive_xsk_buf()
1430 pr_debug("%s: short packet %i\n", dev->name, len); in virtnet_receive_xsk_buf()
1436 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1438 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1456 xsk_buffs = rq->xsk_buffs; in virtnet_add_recvbuf_xsk()
1458 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); in virtnet_add_recvbuf_xsk()
1460 return -ENOMEM; in virtnet_add_recvbuf_xsk()
1462 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1466 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. in virtnet_add_recvbuf_xsk()
1469 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1471 sg_init_table(rq->sg, 1); in virtnet_add_recvbuf_xsk()
1472 sg_fill_dma(rq->sg, addr, len); in virtnet_add_recvbuf_xsk()
1474 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, in virtnet_add_recvbuf_xsk()
1505 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1507 addr = xsk_buff_raw_get_dma(pool, desc->addr); in virtnet_xsk_xmit_one()
1508 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); in virtnet_xsk_xmit_one()
1510 sg_init_table(sq->sg, 2); in virtnet_xsk_xmit_one()
1511 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1512 sg_fill_dma(sq->sg + 1, addr, desc->len); in virtnet_xsk_xmit_one()
1514 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2, in virtnet_xsk_xmit_one()
1515 virtnet_xsk_to_ptr(desc->len), in virtnet_xsk_xmit_one()
1524 struct xdp_desc *descs = pool->tx_descs; in virtnet_xsk_xmit_batch()
1529 budget = min_t(u32, budget, sq->vq->num_free); in virtnet_xsk_xmit_batch()
1538 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); in virtnet_xsk_xmit_batch()
1545 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xsk_xmit_batch()
1554 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit()
1556 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1563 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1566 xsk_tx_completed(sq->xsk_pool, stats.xsk); in virtnet_xsk_xmit()
1570 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1571 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1576 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1580 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xsk_xmit()
1581 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xsk_xmit()
1582 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xsk_xmit()
1583 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xsk_xmit()
1584 u64_stats_add(&sq->stats.xdp_tx, sent); in virtnet_xsk_xmit()
1585 u64_stats_update_end(&sq->stats.syncp); in virtnet_xsk_xmit()
1595 if (napi_if_scheduled_mark_missed(&sq->napi)) in xsk_wakeup()
1599 virtqueue_napi_schedule(&sq->napi, sq->vq); in xsk_wakeup()
1609 return -ENETDOWN; in virtnet_xsk_wakeup()
1611 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1612 return -EINVAL; in virtnet_xsk_wakeup()
1614 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1622 xsk_tx_completed(sq->xsk_pool, num); in virtnet_xsk_completed()
1625 * wakeup the tx napi to consume the xsk tx queue, because the tx in virtnet_xsk_completed()
1640 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1641 return -EOVERFLOW; in __virtnet_xdp_xmit_one()
1645 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one()
1651 * xdp_return_frame(), which will involve to xdpf->data and in __virtnet_xdp_xmit_one()
1652 * xdpf->headroom. Therefore, we need to update the value of in __virtnet_xdp_xmit_one()
1655 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1656 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1657 /* Zero header and leave csum up to XDP layers */ in __virtnet_xdp_xmit_one()
1658 hdr = xdpf->data; in __virtnet_xdp_xmit_one()
1659 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1660 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1662 sg_init_table(sq->sg, nr_frags + 1); in __virtnet_xdp_xmit_one()
1663 sg_set_buf(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
1665 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one()
1667 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), in __virtnet_xdp_xmit_one()
1673 return -ENOSPC; /* Caller handle free/refcnt */ in __virtnet_xdp_xmit_one()
1678 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1692 if (v->curr_queue_pairs > nr_cpu_ids) { \
1693 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1695 txq = netdev_get_tx_queue(v->dev, qp); \
1698 qp = cpu % v->curr_queue_pairs; \
1699 txq = netdev_get_tx_queue(v->dev, qp); \
1702 v->sq + qp; \
1709 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1710 if (v->curr_queue_pairs > nr_cpu_ids) \
1721 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1732 xdp_prog = rcu_access_pointer(rq->xdp_prog); in virtnet_xdp_xmit()
1734 return -ENXIO; in virtnet_xdp_xmit()
1739 ret = -EINVAL; in virtnet_xdp_xmit()
1744 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1756 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1760 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
1764 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
1765 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xdp_xmit()
1766 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xdp_xmit()
1767 u64_stats_add(&sq->stats.xdp_tx, n); in virtnet_xdp_xmit()
1768 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); in virtnet_xdp_xmit()
1769 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xdp_xmit()
1770 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1784 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags()
1785 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags()
1801 u64_stats_inc(&stats->xdp_packets); in virtnet_xdp_handler()
1808 u64_stats_inc(&stats->xdp_tx); in virtnet_xdp_handler()
1826 u64_stats_inc(&stats->xdp_redirects); in virtnet_xdp_handler()
1847 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1857 * with large buffers with sufficient headroom - so it should affect
1888 while (--*num_buf) { in xdp_linearize_page()
1899 off = buf - page_address(p); in xdp_linearize_page()
1921 *len = page_off - XDP_PACKET_HEADROOM; in xdp_linearize_page()
1939 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1948 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1964 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1974 if (unlikely(hdr->hdr.gso_type)) in receive_small_xdp()
1978 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in receive_small_xdp()
1985 int offset = buf - page_address(page) + header_offset; in receive_small_xdp()
1986 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1991 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
2005 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small_xdp()
2006 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
2014 len = xdp.data_end - xdp.data; in receive_small_xdp()
2015 metasize = xdp.data - xdp.data_meta; in receive_small_xdp()
2026 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); in receive_small_xdp()
2036 u64_stats_inc(&stats->xdp_drops); in receive_small_xdp()
2038 u64_stats_inc(&stats->drops); in receive_small_xdp()
2056 /* We passed the address of virtnet header to virtio-core, in receive_small()
2059 buf -= VIRTNET_RX_PAD + xdp_headroom; in receive_small()
2061 len -= vi->hdr_len; in receive_small()
2062 u64_stats_add(&stats->bytes, len); in receive_small()
2066 dev->name, len, GOOD_PACKET_LEN); in receive_small()
2071 if (unlikely(vi->xdp_enabled)) { in receive_small()
2075 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_small()
2091 u64_stats_inc(&stats->drops); in receive_small()
2109 if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) { in receive_big()
2111 dev->name, len, in receive_big()
2112 (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE); in receive_big()
2117 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2124 u64_stats_inc(&stats->drops); in receive_big()
2137 while (num_buf-- > 1) { in mergeable_buf_free()
2141 dev->name, num_buf); in mergeable_buf_free()
2145 u64_stats_add(&stats->bytes, len); in mergeable_buf_free()
2153 * virtio-net there are 2 points that do not match its requirements:
2156 * like eth_type_trans() (which virtio-net does in receive_buf()).
2169 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { in build_skb_from_xdp_buff()
2175 nr_frags = sinfo->nr_frags; in build_skb_from_xdp_buff()
2177 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); in build_skb_from_xdp_buff()
2181 headroom = xdp->data - xdp->data_hard_start; in build_skb_from_xdp_buff()
2182 data_len = xdp->data_end - xdp->data; in build_skb_from_xdp_buff()
2186 metasize = xdp->data - xdp->data_meta; in build_skb_from_xdp_buff()
2192 xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, in build_skb_from_xdp_buff()
2220 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in virtnet_build_xdp_buff_mrg()
2221 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, in virtnet_build_xdp_buff_mrg()
2222 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2228 /* If we want to build multi-buffer xdp, we need in virtnet_build_xdp_buff_mrg()
2236 shinfo->nr_frags = 0; in virtnet_build_xdp_buff_mrg()
2237 shinfo->xdp_frags_size = 0; in virtnet_build_xdp_buff_mrg()
2241 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2243 while (--*num_buf > 0) { in virtnet_build_xdp_buff_mrg()
2247 dev->name, *num_buf, in virtnet_build_xdp_buff_mrg()
2248 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2253 u64_stats_add(&stats->bytes, len); in virtnet_build_xdp_buff_mrg()
2255 offset = buf - page_address(page); in virtnet_build_xdp_buff_mrg()
2265 frag = &shinfo->frags[shinfo->nr_frags++]; in virtnet_build_xdp_buff_mrg()
2270 shinfo->xdp_frags_size += len; in virtnet_build_xdp_buff_mrg()
2278 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2298 * in-flight packets from before XDP was enabled reach in mergeable_xdp_get_buf()
2301 if (unlikely(hdr->hdr.gso_type)) in mergeable_xdp_get_buf()
2305 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in mergeable_xdp_get_buf()
2316 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { in mergeable_xdp_get_buf()
2328 if (!xdp_prog->aux->xdp_has_frags) { in mergeable_xdp_get_buf()
2330 xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, in mergeable_xdp_get_buf()
2370 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2372 int offset = buf - page_address(page); in receive_mergeable_xdp()
2414 u64_stats_inc(&stats->xdp_drops); in receive_mergeable_xdp()
2415 u64_stats_inc(&stats->drops); in receive_mergeable_xdp()
2427 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; in virtnet_skb_append_frag()
2435 skb_shinfo(curr_skb)->frag_list = nskb; in virtnet_skb_append_frag()
2437 curr_skb->next = nskb; in virtnet_skb_append_frag()
2439 head_skb->truesize += nskb->truesize; in virtnet_skb_append_frag()
2444 head_skb->data_len += len; in virtnet_skb_append_frag()
2445 head_skb->len += len; in virtnet_skb_append_frag()
2446 head_skb->truesize += truesize; in virtnet_skb_append_frag()
2449 offset = buf - page_address(page); in virtnet_skb_append_frag()
2452 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, in virtnet_skb_append_frag()
2472 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2474 int offset = buf - page_address(page); in receive_mergeable()
2480 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2485 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2489 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_mergeable()
2504 while (--num_buf) { in receive_mergeable()
2508 dev->name, num_buf, in receive_mergeable()
2509 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2510 hdr->num_buffers)); in receive_mergeable()
2515 u64_stats_add(&stats->bytes, len); in receive_mergeable()
2528 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
2536 u64_stats_inc(&stats->drops); in receive_mergeable()
2544 return __le16_to_cpu(hdr_hash->hash_value_lo) | in virtio_net_hash_value()
2545 (__le16_to_cpu(hdr_hash->hash_value_hi) << 16); in virtio_net_hash_value()
2556 switch (__le16_to_cpu(hdr_hash->hash_report)) { in virtio_skb_set_hash()
2581 struct net_device *dev = vi->dev; in virtnet_receive_done()
2584 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2585 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); in virtnet_receive_done()
2587 hdr->hdr.flags = flags; in virtnet_receive_done()
2588 if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) { in virtnet_receive_done()
2589 net_warn_ratelimited("%s: bad csum: flags: %x, gso_type: %x rx_tnl_csum %d\n", in virtnet_receive_done()
2590 dev->name, hdr->hdr.flags, in virtnet_receive_done()
2591 hdr->hdr.gso_type, vi->rx_tnl_csum); in virtnet_receive_done()
2595 if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl, in virtnet_receive_done()
2596 vi->rx_tnl_csum, in virtnet_receive_done()
2597 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2598 net_warn_ratelimited("%s: bad gso: type: %x, size: %u, flags %x tunnel %d tnl csum %d\n", in virtnet_receive_done()
2599 dev->name, hdr->hdr.gso_type, in virtnet_receive_done()
2600 hdr->hdr.gso_size, hdr->hdr.flags, in virtnet_receive_done()
2601 vi->rx_tnl, vi->rx_tnl_csum); in virtnet_receive_done()
2605 skb_record_rx_queue(skb, vq2rxq(rq->vq)); in virtnet_receive_done()
2606 skb->protocol = eth_type_trans(skb, dev); in virtnet_receive_done()
2608 ntohs(skb->protocol), skb->len, skb->pkt_type); in virtnet_receive_done()
2610 napi_gro_receive(&rq->napi, skb); in virtnet_receive_done()
2623 struct net_device *dev = vi->dev; in receive_buf()
2627 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2628 pr_debug("%s: short packet %i\n", dev->name, len); in receive_buf()
2641 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2643 if (vi->mergeable_rx_bufs) in receive_buf()
2646 else if (vi->big_packets) in receive_buf()
2659 * not need to use mergeable_len_to_ctx here - it is enough
2668 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2674 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp))) in add_recvbuf_small()
2675 return -ENOMEM; in add_recvbuf_small()
2679 return -ENOMEM; in add_recvbuf_small()
2683 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2685 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_small()
2701 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2703 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2704 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2709 return -ENOMEM; in add_recvbuf_big()
2711 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
2714 first->private = (unsigned long)list; in add_recvbuf_big()
2721 return -ENOMEM; in add_recvbuf_big()
2725 /* rq->sg[0], rq->sg[1] share the same page */ in add_recvbuf_big()
2726 /* a separated rq->sg[0] for header - required in case !any_header_sg */ in add_recvbuf_big()
2727 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2729 /* rq->sg[1] for data packet, from offset */ in add_recvbuf_big()
2731 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
2734 first->private = (unsigned long)list; in add_recvbuf_big()
2735 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2747 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len()
2748 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2752 return PAGE_SIZE - room; in get_mergeable_buf_len()
2755 rq->min_buf_len, PAGE_SIZE - hdr_len); in get_mergeable_buf_len()
2763 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
2776 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); in add_recvbuf_mergeable()
2779 return -ENOMEM; in add_recvbuf_mergeable()
2781 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size) in add_recvbuf_mergeable()
2782 len -= sizeof(struct virtnet_rq_dma); in add_recvbuf_mergeable()
2786 return -ENOMEM; in add_recvbuf_mergeable()
2789 hole = alloc_frag->size - alloc_frag->offset; in add_recvbuf_mergeable()
2799 alloc_frag->offset += hole; in add_recvbuf_mergeable()
2805 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_mergeable()
2826 if (rq->xsk_pool) { in try_fill_recv()
2827 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2832 if (vi->mergeable_rx_bufs) in try_fill_recv()
2834 else if (vi->big_packets) in try_fill_recv()
2841 } while (rq->vq->num_free); in try_fill_recv()
2844 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { in try_fill_recv()
2847 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); in try_fill_recv()
2848 u64_stats_inc(&rq->stats.kicks); in try_fill_recv()
2849 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); in try_fill_recv()
2852 return err != -ENOMEM; in try_fill_recv()
2857 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done()
2858 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2860 rq->calls++; in skb_recv_done()
2861 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
2880 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_enable()
2881 int qidx = vq2rxq(rq->vq); in virtnet_napi_enable()
2883 virtnet_napi_do_enable(rq->vq, &rq->napi); in virtnet_napi_enable()
2884 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); in virtnet_napi_enable()
2889 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_enable()
2890 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_enable()
2891 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_enable()
2893 if (!napi->weight) in virtnet_napi_tx_enable()
2896 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only in virtnet_napi_tx_enable()
2899 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2900 napi->weight = 0; in virtnet_napi_tx_enable()
2904 virtnet_napi_do_enable(sq->vq, napi); in virtnet_napi_tx_enable()
2905 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); in virtnet_napi_tx_enable()
2910 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_disable()
2911 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_disable()
2912 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_disable()
2914 if (napi->weight) { in virtnet_napi_tx_disable()
2915 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); in virtnet_napi_tx_disable()
2922 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_disable()
2923 struct napi_struct *napi = &rq->napi; in virtnet_napi_disable()
2924 int qidx = vq2rxq(rq->vq); in virtnet_napi_disable()
2926 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); in virtnet_napi_disable()
2937 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2938 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2946 * - cancel refill_work with cancel_delayed_work (note: in refill_work()
2947 * non-sync) in refill_work()
2948 * - cancel refill_work with cancel_delayed_work_sync in in refill_work()
2950 * - wrap all of the work in a lock (perhaps the netdev in refill_work()
2952 * - check netif_running() and return early to avoid a race in refill_work()
2954 napi_disable(&rq->napi); in refill_work()
2956 virtnet_napi_do_enable(rq->vq, &rq->napi); in refill_work()
2962 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2977 buf = virtqueue_get_buf(rq->vq, &len); in virtnet_receive_xsk_bufs()
2998 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
3007 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive_packets()
3019 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
3023 if (rq->xsk_pool) in virtnet_receive()
3028 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { in virtnet_receive()
3030 spin_lock(&vi->refill_lock); in virtnet_receive()
3031 if (vi->refill_enabled) in virtnet_receive()
3032 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
3033 spin_unlock(&vi->refill_lock); in virtnet_receive()
3038 u64_stats_update_begin(&rq->stats.syncp); in virtnet_receive()
3043 item = (u64_stats_t *)((u8 *)&rq->stats + offset); in virtnet_receive()
3048 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); in virtnet_receive()
3049 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); in virtnet_receive()
3051 u64_stats_update_end(&rq->stats.syncp); in virtnet_receive()
3058 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx()
3059 unsigned int index = vq2rxq(rq->vq); in virtnet_poll_cleantx()
3060 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
3061 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
3063 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
3067 if (sq->reset) { in virtnet_poll_cleantx()
3073 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
3075 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
3077 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && in virtnet_poll_cleantx()
3079 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_cleantx()
3080 u64_stats_inc(&sq->stats.wake); in virtnet_poll_cleantx()
3081 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_cleantx()
3093 if (!rq->packets_in_napi) in virtnet_rx_dim_update()
3099 dim_update_sample(rq->calls, in virtnet_rx_dim_update()
3100 u64_stats_read(&rq->stats.packets), in virtnet_rx_dim_update()
3101 u64_stats_read(&rq->stats.bytes), in virtnet_rx_dim_update()
3104 net_dim(&rq->dim, &cur_sample); in virtnet_rx_dim_update()
3105 rq->packets_in_napi = 0; in virtnet_rx_dim_update()
3112 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
3121 rq->packets_in_napi += received; in virtnet_poll()
3128 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
3133 if (napi_complete && rq->dim_enabled) in virtnet_poll()
3139 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
3140 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
3141 u64_stats_inc(&sq->stats.kicks); in virtnet_poll()
3142 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
3152 virtnet_napi_tx_disable(&vi->sq[qp_index]); in virtnet_disable_queue_pair()
3153 virtnet_napi_disable(&vi->rq[qp_index]); in virtnet_disable_queue_pair()
3154 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3159 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3162 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3163 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3167 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3172 virtnet_napi_enable(&vi->rq[qp_index]); in virtnet_enable_queue_pair()
3173 virtnet_napi_tx_enable(&vi->sq[qp_index]); in virtnet_enable_queue_pair()
3178 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3184 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3194 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3197 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3200 vi->speed = speed; in virtnet_update_settings()
3202 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3205 vi->duplex = duplex; in virtnet_update_settings()
3215 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3216 if (i < vi->curr_queue_pairs) in virtnet_open()
3218 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3219 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3226 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3227 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3228 netif_carrier_on(vi->dev); in virtnet_open()
3229 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3231 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3239 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3241 for (i--; i >= 0; i--) { in virtnet_open()
3243 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3252 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
3253 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
3264 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3266 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3268 if (sq->xsk_pool) in virtnet_poll_tx()
3269 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget); in virtnet_poll_tx()
3273 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && in virtnet_poll_tx()
3275 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_tx()
3276 u64_stats_inc(&sq->stats.wake); in virtnet_poll_tx()
3277 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_tx()
3286 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
3291 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3296 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
3299 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3311 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; in xmit_skb()
3312 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
3315 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3318 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3321 BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr)); in xmit_skb()
3322 BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr)); in xmit_skb()
3324 can_push = vi->any_header_sg && in xmit_skb()
3325 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && in xmit_skb()
3330 hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data - in xmit_skb()
3333 hdr = &skb_vnet_common_hdr(skb)->tnl_hdr; in xmit_skb()
3335 if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl, in xmit_skb()
3336 virtio_is_little_endian(vi->vdev), 0)) in xmit_skb()
3337 return -EPROTO; in xmit_skb()
3339 if (vi->mergeable_rx_bufs) in xmit_skb()
3340 hdr->hash_hdr.hdr.num_buffers = 0; in xmit_skb()
3342 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
3345 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
3348 /* Pull header back to avoid skew in tx bytes calculations. */ in xmit_skb()
3351 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
3352 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
3366 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3370 bool use_napi = sq->napi.weight; in start_xmit()
3376 virtqueue_disable_cb(sq->vq); in start_xmit()
3388 dev_warn(&dev->dev, in start_xmit()
3407 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : in start_xmit()
3410 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
3411 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
3412 u64_stats_inc(&sq->stats.kicks); in start_xmit()
3413 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
3417 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in start_xmit()
3418 virtqueue_napi_schedule(&sq->napi, sq->vq); in start_xmit()
3426 bool running = netif_running(vi->dev); in __virtnet_rx_pause()
3430 virtnet_cancel_dim(vi, &rq->dim); in __virtnet_rx_pause()
3443 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause_all()
3444 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_rx_pause_all()
3445 __virtnet_rx_pause(vi, &vi->rq[i]); in virtnet_rx_pause_all()
3455 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause()
3463 bool running = netif_running(vi->dev); in __virtnet_rx_resume()
3472 schedule_delayed_work(&vi->refill, 0); in __virtnet_rx_resume()
3480 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_rx_resume_all()
3481 if (i < vi->curr_queue_pairs) in virtnet_rx_resume_all()
3482 __virtnet_rx_resume(vi, &vi->rq[i], true); in virtnet_rx_resume_all()
3484 __virtnet_rx_resume(vi, &vi->rq[i], false); in virtnet_rx_resume_all()
3499 qindex = rq - vi->rq; in virtnet_rx_resize()
3503 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); in virtnet_rx_resize()
3505 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3513 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3517 qindex = sq - vi->sq; in virtnet_tx_pause()
3522 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3530 sq->reset = true; in virtnet_tx_pause()
3533 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3540 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3544 qindex = sq - vi->sq; in virtnet_tx_resume()
3546 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3549 sq->reset = false; in virtnet_tx_resume()
3563 netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n", in virtnet_tx_resize()
3565 return -EINVAL; in virtnet_tx_resize()
3568 qindex = sq - vi->sq; in virtnet_tx_resize()
3572 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, in virtnet_tx_resize()
3575 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3597 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3599 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3600 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3601 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3602 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3604 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3611 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3618 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3620 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3622 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3626 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3632 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3633 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3639 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3640 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3653 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3658 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3659 return -EOPNOTSUPP; in virtnet_set_mac_address()
3663 return -ENOMEM; in virtnet_set_mac_address()
3670 sg_init_one(&sg, addr->sa_data, dev->addr_len); in virtnet_set_mac_address()
3673 dev_warn(&vdev->dev, in virtnet_set_mac_address()
3675 ret = -EINVAL; in virtnet_set_mac_address()
3683 for (i = 0; i < dev->addr_len; i++) in virtnet_set_mac_address()
3686 i, addr->sa_data[i]); in virtnet_set_mac_address()
3704 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3706 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3707 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3710 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_stats()
3711 tpackets = u64_stats_read(&sq->stats.packets); in virtnet_stats()
3712 tbytes = u64_stats_read(&sq->stats.bytes); in virtnet_stats()
3713 terrors = u64_stats_read(&sq->stats.tx_timeouts); in virtnet_stats()
3714 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_stats()
3717 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_stats()
3718 rpackets = u64_stats_read(&rq->stats.packets); in virtnet_stats()
3719 rbytes = u64_stats_read(&rq->stats.bytes); in virtnet_stats()
3720 rdrops = u64_stats_read(&rq->stats.drops); in virtnet_stats()
3721 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_stats()
3723 tot->rx_packets += rpackets; in virtnet_stats()
3724 tot->tx_packets += tpackets; in virtnet_stats()
3725 tot->rx_bytes += rbytes; in virtnet_stats()
3726 tot->tx_bytes += tbytes; in virtnet_stats()
3727 tot->rx_dropped += rdrops; in virtnet_stats()
3728 tot->tx_errors += terrors; in virtnet_stats()
3731 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); in virtnet_stats()
3732 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); in virtnet_stats()
3733 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); in virtnet_stats()
3734 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); in virtnet_stats()
3741 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3751 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3753 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); in virtnet_rss_update_by_qpairs()
3755 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); in virtnet_rss_update_by_qpairs()
3763 struct net_device *dev = vi->dev; in virtnet_set_queues()
3766 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3776 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3777 old_rss_hdr = vi->rss_hdr; in virtnet_set_queues()
3778 old_rss_trailer = vi->rss_trailer; in virtnet_set_queues()
3779 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_set_queues()
3780 if (!vi->rss_hdr) { in virtnet_set_queues()
3781 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3782 return -ENOMEM; in virtnet_set_queues()
3785 *vi->rss_hdr = *old_rss_hdr; in virtnet_set_queues()
3790 devm_kfree(&dev->dev, vi->rss_hdr); in virtnet_set_queues()
3791 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3792 vi->rss_trailer = old_rss_trailer; in virtnet_set_queues()
3794 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", in virtnet_set_queues()
3796 return -EINVAL; in virtnet_set_queues()
3798 devm_kfree(&dev->dev, old_rss_hdr); in virtnet_set_queues()
3804 return -ENOMEM; in virtnet_set_queues()
3806 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3811 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", in virtnet_set_queues()
3813 return -EINVAL; in virtnet_set_queues()
3816 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3818 spin_lock_bh(&vi->refill_lock); in virtnet_set_queues()
3819 if (dev->flags & IFF_UP && vi->refill_enabled) in virtnet_set_queues()
3820 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3821 spin_unlock_bh(&vi->refill_lock); in virtnet_set_queues()
3833 /* Make sure refill_work doesn't re-enable napi! */ in virtnet_close()
3834 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3838 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3842 cancel_work_sync(&vi->config_work); in virtnet_close()
3844 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3846 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3859 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3869 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3874 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); in virtnet_rx_mode_work()
3880 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); in virtnet_rx_mode_work()
3885 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", in virtnet_rx_mode_work()
3888 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); in virtnet_rx_mode_work()
3893 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", in virtnet_rx_mode_work()
3900 /* MAC filter - use one buffer for both lists */ in virtnet_rx_mode_work()
3902 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); in virtnet_rx_mode_work()
3913 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3916 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3919 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3922 mac_data = (void *)&mac_data->macs[uc_count][0]; in virtnet_rx_mode_work()
3924 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3927 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3932 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3936 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); in virtnet_rx_mode_work()
3947 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3948 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3960 return -ENOMEM; in virtnet_vlan_rx_add_vid()
3962 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3967 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); in virtnet_vlan_rx_add_vid()
3980 return -ENOMEM; in virtnet_vlan_rx_kill_vid()
3982 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3987 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); in virtnet_vlan_rx_kill_vid()
3995 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3996 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3997 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3998 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
4001 vi->affinity_hint_set = false; in virtnet_clean_affinity()
4020 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
4021 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
4022 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
4025 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
4029 if (!group_size--) { in virtnet_set_affinity()
4036 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
4037 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
4038 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
4042 vi->affinity_hint_set = true; in virtnet_set_affinity()
4077 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4081 &vi->node_dead); in virtnet_cpu_notif_add()
4084 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4090 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
4092 &vi->node_dead); in virtnet_cpu_notif_remove()
4103 return -ENOMEM; in virtnet_send_ctrl_coal_vq_cmd()
4105 coal_vq->vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
4106 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
4107 coal_vq->coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
4113 return -EINVAL; in virtnet_send_ctrl_coal_vq_cmd()
4124 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
4125 return -EOPNOTSUPP; in virtnet_send_rx_ctrl_coal_vq_cmd()
4132 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
4133 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
4144 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
4145 return -EOPNOTSUPP; in virtnet_send_tx_ctrl_coal_vq_cmd()
4152 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
4153 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
4165 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
4166 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
4167 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
4168 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
4182 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in virtnet_set_ringparam()
4183 return -EINVAL; in virtnet_set_ringparam()
4185 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4186 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4188 if (ring->rx_pending == rx_pending && in virtnet_set_ringparam()
4189 ring->tx_pending == tx_pending) in virtnet_set_ringparam()
4192 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4193 return -EINVAL; in virtnet_set_ringparam()
4195 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4196 return -EINVAL; in virtnet_set_ringparam()
4198 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4199 rq = vi->rq + i; in virtnet_set_ringparam()
4200 sq = vi->sq + i; in virtnet_set_ringparam()
4202 if (ring->tx_pending != tx_pending) { in virtnet_set_ringparam()
4203 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4207 /* Upon disabling and re-enabling a transmit virtqueue, the device must in virtnet_set_ringparam()
4210 * did not set any TX coalescing parameters, to 0. in virtnet_set_ringparam()
4213 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4214 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4216 /* Don't break the tx resize action if the vq coalescing is not in virtnet_set_ringparam()
4219 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4223 if (ring->rx_pending != rx_pending) { in virtnet_set_ringparam()
4224 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4229 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4231 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4232 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4233 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4234 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4244 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4249 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); in virtnet_commit_rss_command()
4250 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); in virtnet_commit_rss_command()
4253 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4260 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); in virtnet_commit_rss_command()
4267 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); in virtnet_init_default_rss()
4268 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4269 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4270 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; in virtnet_init_default_rss()
4271 vi->rss_hdr->unclassified_queue = 0; in virtnet_init_default_rss()
4273 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4275 vi->rss_trailer.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4277 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); in virtnet_init_default_rss()
4285 info->data = 0; in virtnet_get_hashflow()
4286 switch (info->flow_type) { in virtnet_get_hashflow()
4288 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4289 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4291 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4292 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4296 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4297 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4299 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4300 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4304 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4305 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4307 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4308 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4312 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4313 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4315 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4316 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4320 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4321 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4325 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4326 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4330 info->data = 0; in virtnet_get_hashflow()
4342 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4343 bool is_disable = info->data & RXH_DISCARD; in virtnet_set_hashflow()
4344 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); in virtnet_set_hashflow()
4347 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) in virtnet_set_hashflow()
4348 return -EINVAL; in virtnet_set_hashflow()
4350 switch (info->flow_type) { in virtnet_set_hashflow()
4387 return -EINVAL; in virtnet_set_hashflow()
4391 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4392 return -EINVAL; in virtnet_set_hashflow()
4394 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4395 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4396 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_hashflow()
4397 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4399 return -EINVAL; in virtnet_set_hashflow()
4409 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4411 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in virtnet_get_drvinfo()
4412 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); in virtnet_get_drvinfo()
4413 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); in virtnet_get_drvinfo()
4422 u16 queue_pairs = channels->combined_count; in virtnet_set_channels()
4425 /* We don't support separate rx/tx channels. in virtnet_set_channels()
4428 if (channels->rx_count || channels->tx_count || channels->other_count) in virtnet_set_channels()
4429 return -EINVAL; in virtnet_set_channels()
4431 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4432 return -EINVAL; in virtnet_set_channels()
4438 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4439 return -EINVAL; in virtnet_set_channels()
4470 /* qid == -1: for rx/tx queue total field */
4481 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4485 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); in virtnet_get_stats_string()
4501 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4508 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4515 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4524 fmt = "tx%u_%s"; in virtnet_get_stats_string()
4532 fmt = "tx%u_hw_%s"; in virtnet_get_stats_string()
4535 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4542 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4549 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4561 /* The stats are write to qstats or ethtool -S */
4583 ctx->data = data; in virtnet_stats_ctx_init()
4584 ctx->to_qstat = to_qstat; in virtnet_stats_ctx_init()
4587 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_stats_ctx_init()
4588 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_stats_ctx_init()
4592 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4593 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4594 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_stats_ctx_init()
4595 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4598 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4599 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4600 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_stats_ctx_init()
4601 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4604 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4605 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; in virtnet_stats_ctx_init()
4606 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_stats_ctx_init()
4607 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); in virtnet_stats_ctx_init()
4610 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4611 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4612 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_stats_ctx_init()
4613 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4618 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4619 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4620 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_stats_ctx_init()
4621 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4624 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4625 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; in virtnet_stats_ctx_init()
4626 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_stats_ctx_init()
4627 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); in virtnet_stats_ctx_init()
4630 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4631 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4632 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_stats_ctx_init()
4633 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4636 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4637 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4638 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_stats_ctx_init()
4639 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4645 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_stats_ctx_init()
4646 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_stats_ctx_init()
4648 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4651 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; in virtnet_stats_ctx_init()
4652 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_stats_ctx_init()
4653 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); in virtnet_stats_ctx_init()
4658 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4659 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4660 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_stats_ctx_init()
4661 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4664 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4665 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4666 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_stats_ctx_init()
4667 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4670 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4671 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4672 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_stats_ctx_init()
4673 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4678 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4679 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4680 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_stats_ctx_init()
4681 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4684 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4685 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4686 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_stats_ctx_init()
4687 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4690 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4691 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4692 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_stats_ctx_init()
4693 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4697 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4724 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_total_fields()
4725 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_total_fields()
4726 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_total_fields()
4728 first_rx_q = ctx->data + num_rx + num_tx + num_cq; in virtnet_fill_total_fields()
4729 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4731 data = ctx->data; in virtnet_fill_total_fields()
4733 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4735 data = ctx->data + num_rx; in virtnet_fill_total_fields()
4737 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4752 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats_qstat()
4764 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4766 ctx->data[offset] = u64_stats_read(v_stat); in virtnet_fill_stats_qstat()
4831 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4833 ctx->data[offset] = le64_to_cpu(*v); in virtnet_fill_stats_qstat()
4837 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4858 if (ctx->to_qstat) in virtnet_fill_stats()
4861 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_stats()
4862 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_stats()
4863 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_stats()
4866 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats()
4872 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4962 ctx->data[offset + i] = le64_to_cpu(*v); in virtnet_fill_stats()
4970 ctx->data[offset + i] = u64_stats_read(v_stat); in virtnet_fill_stats()
4995 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { in __virtnet_get_hw_stats()
4997 qid = le16_to_cpu(hdr->vq_index); in __virtnet_get_hw_stats()
4998 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
5010 u64 bitmap = ctx->bitmap[qtype]; in virtnet_make_stat_req()
5015 req->stats[*idx].vq_index = cpu_to_le16(qid); in virtnet_make_stat_req()
5016 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); in virtnet_make_stat_req()
5020 /* qid: -1: get stats of all vq.
5032 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
5035 if (qid == -1) { in virtnet_get_hw_stats()
5036 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
5049 if (ctx->bitmap[qtype]) { in virtnet_get_hw_stats()
5051 res_size += ctx->size[qtype]; in virtnet_get_hw_stats()
5055 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { in virtnet_get_hw_stats()
5056 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; in virtnet_get_hw_stats()
5062 return -ENOMEM; in virtnet_get_hw_stats()
5067 return -ENOMEM; in virtnet_get_hw_stats()
5075 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
5094 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
5095 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
5099 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5102 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5121 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
5123 return -EOPNOTSUPP; in virtnet_get_sset_count()
5136 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
5137 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
5139 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
5140 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
5141 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
5143 stats_base = (const u8 *)&rq->stats; in virtnet_get_ethtool_stats()
5145 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_get_ethtool_stats()
5147 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5149 stats_base = (const u8 *)&sq->stats; in virtnet_get_ethtool_stats()
5151 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_get_ethtool_stats()
5153 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5164 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
5165 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
5166 channels->max_other = 0; in virtnet_get_channels()
5167 channels->rx_count = 0; in virtnet_get_channels()
5168 channels->tx_count = 0; in virtnet_get_channels()
5169 channels->other_count = 0; in virtnet_get_channels()
5178 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5186 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5187 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5188 cmd->base.port = PORT_OTHER; in virtnet_get_link_ksettings()
5202 return -ENOMEM; in virtnet_send_tx_notf_coal_cmds()
5204 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
5205 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
5211 return -EINVAL; in virtnet_send_tx_notf_coal_cmds()
5213 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5214 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5215 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5216 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5217 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5227 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_cmds()
5231 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5232 return -EOPNOTSUPP; in virtnet_send_rx_notf_coal_cmds()
5234 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5235 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5236 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5238 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5239 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5240 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5241 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5242 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5243 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5250 return -ENOMEM; in virtnet_send_rx_notf_coal_cmds()
5252 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5253 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5254 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5255 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5256 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5257 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5261 /* Since the per-queue coalescing params can be set, in virtnet_send_rx_notf_coal_cmds()
5265 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
5266 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
5272 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5274 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5275 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5276 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5277 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5278 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5279 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5280 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5306 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_vq_cmds()
5311 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5312 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5313 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5314 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5316 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || in virtnet_send_rx_notf_coal_vq_cmds()
5317 ec->rx_max_coalesced_frames != max_packets)) { in virtnet_send_rx_notf_coal_vq_cmds()
5318 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5319 return -EINVAL; in virtnet_send_rx_notf_coal_vq_cmds()
5323 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5324 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5329 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5335 ec->rx_coalesce_usecs, in virtnet_send_rx_notf_coal_vq_cmds()
5336 ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_vq_cmds()
5337 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5352 ec->tx_coalesce_usecs, in virtnet_send_notf_coal_vq_cmds()
5353 ec->tx_max_coalesced_frames); in virtnet_send_notf_coal_vq_cmds()
5365 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work()
5366 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5370 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5372 mutex_lock(&rq->dim_lock); in virtnet_rx_dim_work()
5373 if (!rq->dim_enabled) in virtnet_rx_dim_work()
5377 if (update_moder.usec != rq->intr_coal.max_usecs || in virtnet_rx_dim_work()
5378 update_moder.pkts != rq->intr_coal.max_packets) { in virtnet_rx_dim_work()
5384 dev->name, qnum); in virtnet_rx_dim_work()
5387 dim->state = DIM_START_MEASURE; in virtnet_rx_dim_work()
5388 mutex_unlock(&rq->dim_lock); in virtnet_rx_dim_work()
5396 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) in virtnet_coal_params_supported()
5397 return -EOPNOTSUPP; in virtnet_coal_params_supported()
5399 if (ec->tx_max_coalesced_frames > 1 || in virtnet_coal_params_supported()
5400 ec->rx_max_coalesced_frames != 1) in virtnet_coal_params_supported()
5401 return -EINVAL; in virtnet_coal_params_supported()
5411 return -EBUSY; in virtnet_should_update_vq_weight()
5428 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_coalesce()
5429 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5430 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_coalesce()
5431 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5437 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5444 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5453 /* xsk xmit depends on the tx napi. So if xsk is active, in virtnet_set_coalesce()
5454 * prevent modifications to tx napi. in virtnet_set_coalesce()
5456 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5457 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5458 return -EBUSY; in virtnet_set_coalesce()
5461 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5462 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5475 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5476 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5477 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5478 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5479 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5480 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5482 ec->rx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5484 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5485 ec->tx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5499 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5500 return -EINVAL; in virtnet_set_per_queue_coalesce()
5503 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_per_queue_coalesce()
5504 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_per_queue_coalesce()
5505 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5510 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5519 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5530 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5531 return -EINVAL; in virtnet_get_per_queue_coalesce()
5533 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5534 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5535 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5536 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5537 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5538 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5539 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5540 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5542 ec->rx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5544 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5545 ec->tx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5555 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5556 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5561 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; in virtnet_get_rxfh_key_size()
5566 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; in virtnet_get_rxfh_indir_size()
5575 if (rxfh->indir) { in virtnet_get_rxfh()
5576 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5577 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); in virtnet_get_rxfh()
5580 if (rxfh->key) in virtnet_get_rxfh()
5581 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); in virtnet_get_rxfh()
5583 rxfh->hfunc = ETH_RSS_HASH_TOP; in virtnet_get_rxfh()
5596 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in virtnet_set_rxfh()
5597 rxfh->hfunc != ETH_RSS_HASH_TOP) in virtnet_set_rxfh()
5598 return -EOPNOTSUPP; in virtnet_set_rxfh()
5600 if (rxfh->indir) { in virtnet_set_rxfh()
5601 if (!vi->has_rss) in virtnet_set_rxfh()
5602 return -EOPNOTSUPP; in virtnet_set_rxfh()
5604 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5605 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); in virtnet_set_rxfh()
5609 if (rxfh->key) { in virtnet_set_rxfh()
5614 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5615 return -EOPNOTSUPP; in virtnet_set_rxfh()
5617 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5631 return vi->curr_queue_pairs; in virtnet_get_rx_ring_count()
5666 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5672 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5679 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5685 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5690 struct netdev_queue_stats_tx *tx) in virtnet_get_base_stats() argument
5694 /* The queue stats of the virtio-net will not be reset. So here we in virtnet_get_base_stats()
5697 rx->bytes = 0; in virtnet_get_base_stats()
5698 rx->packets = 0; in virtnet_get_base_stats()
5700 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5701 rx->hw_drops = 0; in virtnet_get_base_stats()
5702 rx->hw_drop_overruns = 0; in virtnet_get_base_stats()
5705 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5706 rx->csum_unnecessary = 0; in virtnet_get_base_stats()
5707 rx->csum_none = 0; in virtnet_get_base_stats()
5708 rx->csum_bad = 0; in virtnet_get_base_stats()
5711 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5712 rx->hw_gro_packets = 0; in virtnet_get_base_stats()
5713 rx->hw_gro_bytes = 0; in virtnet_get_base_stats()
5714 rx->hw_gro_wire_packets = 0; in virtnet_get_base_stats()
5715 rx->hw_gro_wire_bytes = 0; in virtnet_get_base_stats()
5718 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5719 rx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5721 tx->bytes = 0; in virtnet_get_base_stats()
5722 tx->packets = 0; in virtnet_get_base_stats()
5723 tx->stop = 0; in virtnet_get_base_stats()
5724 tx->wake = 0; in virtnet_get_base_stats()
5726 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5727 tx->hw_drops = 0; in virtnet_get_base_stats()
5728 tx->hw_drop_errors = 0; in virtnet_get_base_stats()
5731 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5732 tx->csum_none = 0; in virtnet_get_base_stats()
5733 tx->needs_csum = 0; in virtnet_get_base_stats()
5736 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5737 tx->hw_gso_packets = 0; in virtnet_get_base_stats()
5738 tx->hw_gso_bytes = 0; in virtnet_get_base_stats()
5739 tx->hw_gso_wire_packets = 0; in virtnet_get_base_stats()
5740 tx->hw_gso_wire_bytes = 0; in virtnet_get_base_stats()
5743 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5744 tx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5747 dev->real_num_rx_queues, vi->max_queue_pairs, rx, in virtnet_get_base_stats()
5748 dev->real_num_tx_queues, vi->max_queue_pairs, tx); in virtnet_get_base_stats()
5759 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down()
5762 flush_work(&vi->config_work); in virtnet_freeze_down()
5764 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5766 if (netif_running(vi->dev)) { in virtnet_freeze_down()
5768 virtnet_close(vi->dev); in virtnet_freeze_down()
5772 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5773 netif_device_detach(vi->dev); in virtnet_freeze_down()
5774 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5781 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up()
5793 if (netif_running(vi->dev)) { in virtnet_restore_up()
5795 err = virtnet_open(vi->dev); in virtnet_restore_up()
5801 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5802 netif_device_attach(vi->dev); in virtnet_restore_up()
5803 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5814 return -ENOMEM; in virtnet_set_guest_offloads()
5816 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5822 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5823 return -EINVAL; in virtnet_set_guest_offloads()
5833 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5841 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5843 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5854 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5857 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5861 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, in virtnet_rq_bind_xsk_pool()
5866 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5871 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); in virtnet_rq_bind_xsk_pool()
5873 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5878 rq->xsk_pool = pool; in virtnet_rq_bind_xsk_pool()
5886 xdp_rxq_info_unreg(&rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5896 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5900 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, in virtnet_sq_bind_xsk_pool()
5903 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5907 sq->xsk_pool = pool; in virtnet_sq_bind_xsk_pool()
5925 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5926 return -EINVAL; in virtnet_xsk_pool_enable()
5931 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5932 return -ENOENT; in virtnet_xsk_pool_enable()
5934 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5935 return -EINVAL; in virtnet_xsk_pool_enable()
5937 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5938 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5940 /* xsk assumes that tx and rx must have the same dma device. The af-xdp in virtnet_xsk_pool_enable()
5942 * send by the tx. So the dma dev of sq and rq must be the same one. in virtnet_xsk_pool_enable()
5944 * But vq->dma_dev allows every vq has the respective dma dev. So I in virtnet_xsk_pool_enable()
5947 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) in virtnet_xsk_pool_enable()
5948 return -EINVAL; in virtnet_xsk_pool_enable()
5950 dma_dev = virtqueue_dma_dev(rq->vq); in virtnet_xsk_pool_enable()
5952 return -EINVAL; in virtnet_xsk_pool_enable()
5954 size = virtqueue_get_vring_size(rq->vq); in virtnet_xsk_pool_enable()
5956 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); in virtnet_xsk_pool_enable()
5957 if (!rq->xsk_buffs) in virtnet_xsk_pool_enable()
5958 return -ENOMEM; in virtnet_xsk_pool_enable()
5960 hdr_dma = virtqueue_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5962 if (virtqueue_map_mapping_error(sq->vq, hdr_dma)) { in virtnet_xsk_pool_enable()
5963 err = -ENOMEM; in virtnet_xsk_pool_enable()
5979 /* Now, we do not support tx offload(such as tx csum), so all the tx in virtnet_xsk_pool_enable()
5980 * virtnet hdr is zero. So all the tx packets can share a single hdr. in virtnet_xsk_pool_enable()
5982 sq->xsk_hdr_dma_addr = hdr_dma; in virtnet_xsk_pool_enable()
5991 virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
5994 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_enable()
6006 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
6007 return -EINVAL; in virtnet_xsk_pool_disable()
6009 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
6010 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
6012 pool = rq->xsk_pool; in virtnet_xsk_pool_disable()
6019 virtqueue_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, in virtnet_xsk_pool_disable()
6020 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
6021 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_disable()
6028 if (xdp->xsk.pool) in virtnet_xsk_pool_setup()
6029 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, in virtnet_xsk_pool_setup()
6030 xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
6032 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
6040 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; in virtnet_xdp_set()
6046 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
6047 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
6048 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
6049 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
6050 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
6051 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
6052 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
6053 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
6054 …MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); in virtnet_xdp_set()
6055 return -EOPNOTSUPP; in virtnet_xdp_set()
6058 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
6060 return -EINVAL; in virtnet_xdp_set()
6063 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { in virtnet_xdp_set()
6065 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); in virtnet_xdp_set()
6066 return -EINVAL; in virtnet_xdp_set()
6069 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
6074 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
6075 …quest %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", in virtnet_xdp_set()
6076 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
6080 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
6085 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6089 /* Make sure NAPI is not using any XDP TX queues for RX. */ in virtnet_xdp_set()
6091 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6092 virtnet_napi_tx_disable(&vi->sq[i]); in virtnet_xdp_set()
6096 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6097 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6108 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
6111 vi->xdp_enabled = true; in virtnet_xdp_set()
6112 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6113 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6121 vi->xdp_enabled = false; in virtnet_xdp_set()
6125 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6129 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6137 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6138 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
6143 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6144 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6147 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6153 switch (xdp->command) { in virtnet_xdp()
6155 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); in virtnet_xdp()
6159 return -EINVAL; in virtnet_xdp()
6169 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6170 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6174 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6186 if ((dev->features ^ features) & NETIF_F_GRO_HW) { in virtnet_set_features()
6187 if (vi->xdp_enabled) in virtnet_set_features()
6188 return -EBUSY; in virtnet_set_features()
6191 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6193 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6199 vi->guest_offloads = offloads; in virtnet_set_features()
6202 if ((dev->features ^ features) & NETIF_F_RXHASH) { in virtnet_set_features()
6204 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_features()
6206 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); in virtnet_set_features()
6209 return -EINVAL; in virtnet_set_features()
6218 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout()
6221 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
6222 u64_stats_inc(&sq->stats.tx_timeouts); in virtnet_tx_timeout()
6223 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
6225 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", in virtnet_tx_timeout()
6226 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
6227 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); in virtnet_tx_timeout()
6237 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6244 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6245 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6252 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6256 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6285 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6290 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6297 if (vi->status == v) in virtnet_config_changed_work()
6300 vi->status = v; in virtnet_config_changed_work()
6302 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6304 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6305 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6307 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6308 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6314 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed()
6316 schedule_work(&vi->config_work); in virtnet_config_changed()
6323 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6324 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6325 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6329 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6333 kfree(vi->rq); in virtnet_free_queues()
6334 kfree(vi->sq); in virtnet_free_queues()
6335 kfree(vi->ctrl); in virtnet_free_queues()
6343 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6344 while (vi->rq[i].pages) in _free_receive_bufs()
6345 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6347 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6348 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6364 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6365 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6366 if (vi->rq[i].last_dma) in free_receive_page_frags()
6367 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6368 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6374 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf()
6378 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6391 xsk_tx_completed(sq->xsk_pool, 1); in virtnet_sq_free_unused_buf()
6398 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done()
6401 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6409 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6410 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6416 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6417 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6427 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6431 vdev->config->del_vqs(vdev); in virtnet_del_vqs()
6442 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6444 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6448 return max(max(min_buf_len, hdr_len) - hdr_len, in mergeable_min_buf_len()
6456 int ret = -ENOMEM; in virtnet_find_vqs()
6461 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by in virtnet_find_vqs()
6462 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by in virtnet_find_vqs()
6465 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6466 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6475 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6484 if (vi->has_cvq) { in virtnet_find_vqs()
6485 vqs_info[total_vqs - 1].name = "control"; in virtnet_find_vqs()
6489 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6492 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6493 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6494 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6495 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6500 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6504 if (vi->has_cvq) { in virtnet_find_vqs()
6505 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6506 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6507 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6510 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6511 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6512 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6513 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6533 if (vi->has_cvq) { in virtnet_alloc_queues()
6534 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6535 if (!vi->ctrl) in virtnet_alloc_queues()
6538 vi->ctrl = NULL; in virtnet_alloc_queues()
6540 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6541 if (!vi->sq) in virtnet_alloc_queues()
6543 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6544 if (!vi->rq) in virtnet_alloc_queues()
6547 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6548 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6549 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6550 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6552 vi->rq[i].napi.weight = napi_weight; in virtnet_alloc_queues()
6553 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6557 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6558 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6559 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6561 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6562 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6563 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6569 kfree(vi->sq); in virtnet_alloc_queues()
6571 kfree(vi->ctrl); in virtnet_alloc_queues()
6573 return -ENOMEM; in virtnet_alloc_queues()
6605 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show()
6611 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6612 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6614 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6639 dev_err(&vdev->dev, "device advertises feature %s but not %s", in virtnet_fail_on_feature()
6679 if (!vdev->config->get) { in virtnet_validate()
6680 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtnet_validate()
6682 return -EINVAL; in virtnet_validate()
6686 return -EINVAL; in virtnet_validate()
6698 …dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, dis… in virtnet_validate()
6707 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6708 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6709 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6710 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6711 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6712 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6720 * allocate packets of maximum size, otherwise limit it to only in virtnet_set_big_packets()
6724 vi->big_packets = true; in virtnet_set_big_packets()
6725 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6752 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) in virtnet_xdp_rx_hash()
6753 return -ENODATA; in virtnet_xdp_rx_hash()
6755 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6756 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6757 hash_report = __le16_to_cpu(hdr_hash->hash_report); in virtnet_xdp_rx_hash()
6773 int i, err = -ENOMEM; in virtnet_probe()
6794 return -ENOMEM; in virtnet_probe()
6797 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | in virtnet_probe()
6799 dev->netdev_ops = &virtnet_netdev; in virtnet_probe()
6800 dev->stat_ops = &virtnet_stat_ops; in virtnet_probe()
6801 dev->features = NETIF_F_HIGHDMA; in virtnet_probe()
6803 dev->ethtool_ops = &virtnet_ethtool_ops; in virtnet_probe()
6804 SET_NETDEV_DEV(dev, &vdev->dev); in virtnet_probe()
6809 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6810 if (csum) in virtnet_probe()
6811 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6814 dev->hw_features |= NETIF_F_TSO in virtnet_probe()
6819 dev->hw_features |= NETIF_F_TSO; in virtnet_probe()
6821 dev->hw_features |= NETIF_F_TSO6; in virtnet_probe()
6823 dev->hw_features |= NETIF_F_TSO_ECN; in virtnet_probe()
6825 dev->hw_features |= NETIF_F_GSO_UDP_L4; in virtnet_probe()
6828 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; in virtnet_probe()
6829 dev->hw_enc_features = dev->hw_features; in virtnet_probe()
6831 if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL && in virtnet_probe()
6833 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in virtnet_probe()
6834 dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in virtnet_probe()
6837 dev->features |= NETIF_F_GSO_ROBUST; in virtnet_probe()
6840 dev->features |= dev->hw_features; in virtnet_probe()
6841 /* (!csum && gso) case will be fixed by register_netdev() */ in virtnet_probe()
6851 dev->features |= NETIF_F_RXCSUM; in virtnet_probe()
6855 dev->features |= NETIF_F_GRO_HW; in virtnet_probe()
6857 dev->hw_features |= NETIF_F_GRO_HW; in virtnet_probe()
6859 dev->vlan_features = dev->features; in virtnet_probe()
6860 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in virtnet_probe()
6863 /* MTU range: 68 - 65535 */ in virtnet_probe()
6864 dev->min_mtu = MIN_MTU; in virtnet_probe()
6865 dev->max_mtu = MAX_MTU; in virtnet_probe()
6877 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", in virtnet_probe()
6878 dev->dev_addr); in virtnet_probe()
6881 /* Set up our device-specific information */ in virtnet_probe()
6883 vi->dev = dev; in virtnet_probe()
6884 vi->vdev = vdev; in virtnet_probe()
6885 vdev->priv = vi; in virtnet_probe()
6887 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6888 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6889 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6892 vi->mergeable_rx_bufs = true; in virtnet_probe()
6893 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; in virtnet_probe()
6897 vi->has_rss_hash_report = true; in virtnet_probe()
6900 vi->has_rss = true; in virtnet_probe()
6902 vi->rss_indir_table_size = in virtnet_probe()
6906 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_probe()
6907 if (!vi->rss_hdr) { in virtnet_probe()
6908 err = -ENOMEM; in virtnet_probe()
6912 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6913 vi->rss_key_size = in virtnet_probe()
6915 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6916 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", in virtnet_probe()
6917 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6918 err = -EINVAL; in virtnet_probe()
6922 vi->rss_hash_types_supported = in virtnet_probe()
6924 vi->rss_hash_types_supported &= in virtnet_probe()
6929 dev->hw_features |= NETIF_F_RXHASH; in virtnet_probe()
6930 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; in virtnet_probe()
6935 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel); in virtnet_probe()
6936 else if (vi->has_rss_hash_report) in virtnet_probe()
6937 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6940 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6942 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6945 vi->rx_tnl_csum = true; in virtnet_probe()
6947 vi->rx_tnl = true; in virtnet_probe()
6949 vi->tx_tnl = true; in virtnet_probe()
6953 vi->any_header_sg = true; in virtnet_probe()
6956 vi->has_cvq = true; in virtnet_probe()
6958 mutex_init(&vi->cvq_lock); in virtnet_probe()
6964 if (mtu < dev->min_mtu) { in virtnet_probe()
6968 dev_err(&vdev->dev, in virtnet_probe()
6970 mtu, dev->min_mtu); in virtnet_probe()
6971 err = -EINVAL; in virtnet_probe()
6975 dev->mtu = mtu; in virtnet_probe()
6976 dev->max_mtu = mtu; in virtnet_probe()
6981 if (vi->any_header_sg) in virtnet_probe()
6982 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6986 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6988 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6989 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6991 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ in virtnet_probe()
6996 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6997 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6998 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6999 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
7004 if (vi->sq[0].napi.weight) in virtnet_probe()
7005 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
7007 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
7010 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
7012 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
7013 if (vi->sq[i].napi.weight) in virtnet_probe()
7014 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
7022 if (vi->mergeable_rx_bufs) in virtnet_probe()
7023 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; in virtnet_probe()
7025 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
7026 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
7031 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
7032 if (IS_ERR(vi->failover)) { in virtnet_probe()
7033 err = PTR_ERR(vi->failover); in virtnet_probe()
7038 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
7054 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
7058 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
7060 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); in virtnet_probe()
7061 dev->hw_features &= ~NETIF_F_RXHASH; in virtnet_probe()
7062 vi->has_rss_hash_report = false; in virtnet_probe()
7063 vi->has_rss = false; in virtnet_probe()
7067 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
7074 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
7077 sg_init_one(&sg, dev->dev_addr, dev->addr_len); in virtnet_probe()
7082 err = -EINVAL; in virtnet_probe()
7087 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
7095 err = -ENOMEM; in virtnet_probe()
7106 err = -EINVAL; in virtnet_probe()
7110 v = stats_cap->supported_stats_types[0]; in virtnet_probe()
7111 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
7117 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
7118 virtio_config_changed(vi->vdev); in virtnet_probe()
7120 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
7129 if (virtio_has_feature(vi->vdev, fbit)) in virtnet_probe()
7130 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
7132 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
7142 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", in virtnet_probe()
7143 dev->name, max_queue_pairs); in virtnet_probe()
7150 net_failover_destroy(vi->failover); in virtnet_probe()
7153 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
7165 virtio_reset_device(vi->vdev); in remove_vq_common()
7174 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
7175 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
7186 struct virtnet_info *vi = vdev->priv; in virtnet_remove()
7191 flush_work(&vi->config_work); in virtnet_remove()
7193 flush_work(&vi->rx_mode_work); in virtnet_remove()
7197 unregister_netdev(vi->dev); in virtnet_remove()
7199 net_failover_destroy(vi->failover); in virtnet_remove()
7203 free_netdev(vi->dev); in virtnet_remove()
7208 struct virtnet_info *vi = vdev->priv; in virtnet_freeze()
7219 struct virtnet_info *vi = vdev->priv; in virtnet_restore()
7225 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()