Lines Matching +full:region +full:- +full:freeze +full:- +full:timeout +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-or-later
41 #define VIRTIO_O2F_DELTA (VIRTIO_FEATURES_MAP_MIN - \
67 * at once, the weight is chosen so that the EWMA will be insensitive to short-
134 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
135 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
180 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
183 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
186 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
542 u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; in virtnet_rss_hdr_size()
544 return struct_size(vi->rss_hdr, indirection_table, indir_table_size); in virtnet_rss_hdr_size()
549 return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); in virtnet_rss_trailer_size()
574 return virtqueue_add_outbuf(sq->vq, sq->sg, num, in virtnet_add_outbuf()
598 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit()
604 stats->napi_packets++; in __free_old_xmit()
605 stats->napi_bytes += skb->len; in __free_old_xmit()
612 stats->packets++; in __free_old_xmit()
613 stats->bytes += skb->len; in __free_old_xmit()
620 stats->packets++; in __free_old_xmit()
621 stats->bytes += xdp_get_frame_len(frame); in __free_old_xmit()
626 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr); in __free_old_xmit()
627 stats->xsk++; in __free_old_xmit()
631 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); in __free_old_xmit()
641 if (stats->xsk) in virtnet_free_old_xmit()
642 virtnet_xsk_completed(sq, stats->xsk); in virtnet_free_old_xmit()
650 return (vq->index - 1) / 2; in vq2txq()
660 return vq->index / 2; in vq2rxq()
670 if (qid == vi->max_queue_pairs * 2) in vq_type()
682 return (struct virtio_net_common_hdr *)skb->cb; in skb_vnet_common_hdr()
693 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
694 for (end = page; end->private; end = (struct page *)end->private); in give_pages()
695 end->private = (unsigned long)rq->pages; in give_pages()
696 rq->pages = page; in give_pages()
701 struct page *p = rq->pages; in get_a_page()
704 rq->pages = (struct page *)p->private; in get_a_page()
706 p->private = 0; in get_a_page()
715 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
717 else if (vi->big_packets) in virtnet_rq_free_buf()
725 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
726 vi->refill_enabled = true; in enable_delayed_refill()
727 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
732 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
733 vi->refill_enabled = false; in disable_delayed_refill()
734 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
740 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
747 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
780 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
781 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
786 if (napi->weight) in skb_xmit_done()
790 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
807 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); in mergeable_ctx_to_truesize()
820 if (len > truesize - room) { in check_mergeable_len()
822 dev->name, len, (unsigned long)(truesize - room)); in check_mergeable_len()
824 return -1; in check_mergeable_len()
863 hdr_len = vi->hdr_len; in page_to_skb()
864 if (vi->mergeable_rx_bufs) in page_to_skb()
869 buf = p - headroom; in page_to_skb()
870 len -= hdr_len; in page_to_skb()
873 tailroom = truesize - headroom - hdr_padded_len - len; in page_to_skb()
878 skb = virtnet_build_skb(buf, truesize, p - buf, len); in page_to_skb()
882 page = (struct page *)page->private; in page_to_skb()
889 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
893 /* Copy all frame if it fits skb->head, otherwise in page_to_skb()
902 len -= copy; in page_to_skb()
905 if (vi->mergeable_rx_bufs) { in page_to_skb()
915 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); in page_to_skb()
916 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, in page_to_skb()
918 len -= frag_size; in page_to_skb()
919 page = (struct page *)page->private; in page_to_skb()
937 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap()
943 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
949 --dma->ref; in virtnet_rq_unmap()
951 if (dma->need_sync && len) { in virtnet_rq_unmap()
952 offset = buf - (head + sizeof(*dma)); in virtnet_rq_unmap()
954 virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr, in virtnet_rq_unmap()
959 if (dma->ref) in virtnet_rq_unmap()
962 virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len, in virtnet_rq_unmap()
969 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf()
972 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
974 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); in virtnet_rq_get_buf()
983 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg()
989 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
991 head = page_address(rq->alloc_frag.page); in virtnet_rq_init_one_sg()
993 offset = buf - head; in virtnet_rq_init_one_sg()
997 addr = dma->addr - sizeof(*dma) + offset; in virtnet_rq_init_one_sg()
999 sg_init_table(rq->sg, 1); in virtnet_rq_init_one_sg()
1000 sg_fill_dma(rq->sg, addr, len); in virtnet_rq_init_one_sg()
1005 struct page_frag *alloc_frag = &rq->alloc_frag; in virtnet_rq_alloc()
1006 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc()
1011 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
1013 head = page_address(alloc_frag->page); in virtnet_rq_alloc()
1018 if (!alloc_frag->offset) { in virtnet_rq_alloc()
1019 if (rq->last_dma) { in virtnet_rq_alloc()
1024 virtnet_rq_unmap(rq, rq->last_dma, 0); in virtnet_rq_alloc()
1025 rq->last_dma = NULL; in virtnet_rq_alloc()
1028 dma->len = alloc_frag->size - sizeof(*dma); in virtnet_rq_alloc()
1030 addr = virtqueue_map_single_attrs(rq->vq, dma + 1, in virtnet_rq_alloc()
1031 dma->len, DMA_FROM_DEVICE, 0); in virtnet_rq_alloc()
1032 if (virtqueue_map_mapping_error(rq->vq, addr)) in virtnet_rq_alloc()
1035 dma->addr = addr; in virtnet_rq_alloc()
1036 dma->need_sync = virtqueue_map_need_sync(rq->vq, addr); in virtnet_rq_alloc()
1042 get_page(alloc_frag->page); in virtnet_rq_alloc()
1043 dma->ref = 1; in virtnet_rq_alloc()
1044 alloc_frag->offset = sizeof(*dma); in virtnet_rq_alloc()
1046 rq->last_dma = dma; in virtnet_rq_alloc()
1049 ++dma->ref; in virtnet_rq_alloc()
1051 buf = head + alloc_frag->offset; in virtnet_rq_alloc()
1053 get_page(alloc_frag->page); in virtnet_rq_alloc()
1054 alloc_frag->offset += size; in virtnet_rq_alloc()
1061 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf()
1065 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1067 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1072 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1091 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit()
1092 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); in free_old_xmit()
1093 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); in free_old_xmit()
1094 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit()
1099 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1101 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1113 qnum = sq - vi->sq; in tx_may_stop()
1121 * the stack to do a non-trivial amount of useless work. in tx_may_stop()
1125 if (sq->vq->num_free < MAX_SKB_FRAGS + 2) { in tx_may_stop()
1129 u64_stats_update_begin(&sq->stats.syncp); in tx_may_stop()
1130 u64_stats_inc(&sq->stats.stop); in tx_may_stop()
1131 u64_stats_update_end(&sq->stats.syncp); in tx_may_stop()
1143 bool use_napi = sq->napi.weight; in check_sq_full_and_disable()
1146 qnum = sq - vi->sq; in check_sq_full_and_disable()
1152 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in check_sq_full_and_disable()
1153 virtqueue_napi_schedule(&sq->napi, sq->vq); in check_sq_full_and_disable()
1154 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in check_sq_full_and_disable()
1157 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) { in check_sq_full_and_disable()
1159 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1160 u64_stats_inc(&sq->stats.wake); in check_sq_full_and_disable()
1161 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1162 virtqueue_disable_cb(sq->vq); in check_sq_full_and_disable()
1180 * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len in buf_to_xdp()
1181 * The first buffer has virtio header so the remaining region for frame in buf_to_xdp()
1186 * xsk_pool_get_rx_frame_size() + vi->hdr_len in buf_to_xdp()
1188 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); in buf_to_xdp()
1190 bufsize += vi->hdr_len; in buf_to_xdp()
1194 vi->dev->name, len, bufsize); in buf_to_xdp()
1195 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1203 xdp_prepare_buff(xdp, xdp->data_hard_start, in buf_to_xdp()
1204 XDP_PACKET_HEADROOM - vi->hdr_len, len, 1); in buf_to_xdp()
1205 xdp->flags = 0; in buf_to_xdp()
1216 unsigned int metasize = xdp->data - xdp->data_meta; in xsk_construct_skb()
1220 size = xdp->data_end - xdp->data_hard_start; in xsk_construct_skb()
1221 skb = napi_alloc_skb(&rq->napi, size); in xsk_construct_skb()
1227 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); in xsk_construct_skb()
1229 size = xdp->data_end - xdp->data_meta; in xsk_construct_skb()
1230 memcpy(__skb_put(skb, size), xdp->data_meta, size); in xsk_construct_skb()
1252 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_small()
1268 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_small()
1281 while (num_buf-- > 1) { in xsk_drop_follow_bufs()
1282 xdp = virtqueue_get_buf(rq->vq, &len); in xsk_drop_follow_bufs()
1285 dev->name, num_buf); in xsk_drop_follow_bufs()
1289 u64_stats_add(&stats->bytes, len); in xsk_drop_follow_bufs()
1309 while (--num_buf) { in xsk_append_merge_buffer()
1310 buf = virtqueue_get_buf(rq->vq, &len); in xsk_append_merge_buffer()
1313 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1314 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1315 hdr->num_buffers)); in xsk_append_merge_buffer()
1316 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1317 return -EINVAL; in xsk_append_merge_buffer()
1320 u64_stats_add(&stats->bytes, len); in xsk_append_merge_buffer()
1332 memcpy(buf, xdp->data, len); in xsk_append_merge_buffer()
1351 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1352 return -EINVAL; in xsk_append_merge_buffer()
1365 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1366 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1370 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_merge()
1407 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_merge()
1416 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1421 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1423 u64_stats_add(&stats->bytes, len); in virtnet_receive_xsk_buf()
1430 pr_debug("%s: short packet %i\n", dev->name, len); in virtnet_receive_xsk_buf()
1436 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1438 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1456 xsk_buffs = rq->xsk_buffs; in virtnet_add_recvbuf_xsk()
1458 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); in virtnet_add_recvbuf_xsk()
1460 return -ENOMEM; in virtnet_add_recvbuf_xsk()
1462 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1466 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. in virtnet_add_recvbuf_xsk()
1469 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1471 sg_init_table(rq->sg, 1); in virtnet_add_recvbuf_xsk()
1472 sg_fill_dma(rq->sg, addr, len); in virtnet_add_recvbuf_xsk()
1474 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, in virtnet_add_recvbuf_xsk()
1505 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1507 addr = xsk_buff_raw_get_dma(pool, desc->addr); in virtnet_xsk_xmit_one()
1508 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); in virtnet_xsk_xmit_one()
1510 sg_init_table(sq->sg, 2); in virtnet_xsk_xmit_one()
1511 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1512 sg_fill_dma(sq->sg + 1, addr, desc->len); in virtnet_xsk_xmit_one()
1514 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2, in virtnet_xsk_xmit_one()
1515 virtnet_xsk_to_ptr(desc->len), in virtnet_xsk_xmit_one()
1524 struct xdp_desc *descs = pool->tx_descs; in virtnet_xsk_xmit_batch()
1529 budget = min_t(u32, budget, sq->vq->num_free); in virtnet_xsk_xmit_batch()
1538 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); in virtnet_xsk_xmit_batch()
1545 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xsk_xmit_batch()
1554 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit()
1556 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1563 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1566 xsk_tx_completed(sq->xsk_pool, stats.xsk); in virtnet_xsk_xmit()
1570 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1571 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1576 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1580 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xsk_xmit()
1581 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xsk_xmit()
1582 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xsk_xmit()
1583 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xsk_xmit()
1584 u64_stats_add(&sq->stats.xdp_tx, sent); in virtnet_xsk_xmit()
1585 u64_stats_update_end(&sq->stats.syncp); in virtnet_xsk_xmit()
1595 if (napi_if_scheduled_mark_missed(&sq->napi)) in xsk_wakeup()
1599 virtqueue_napi_schedule(&sq->napi, sq->vq); in xsk_wakeup()
1609 return -ENETDOWN; in virtnet_xsk_wakeup()
1611 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1612 return -EINVAL; in virtnet_xsk_wakeup()
1614 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1622 xsk_tx_completed(sq->xsk_pool, num); in virtnet_xsk_completed()
1640 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1641 return -EOVERFLOW; in __virtnet_xdp_xmit_one()
1645 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one()
1651 * xdp_return_frame(), which will involve to xdpf->data and in __virtnet_xdp_xmit_one()
1652 * xdpf->headroom. Therefore, we need to update the value of in __virtnet_xdp_xmit_one()
1655 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1656 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1658 hdr = xdpf->data; in __virtnet_xdp_xmit_one()
1659 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1660 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1662 sg_init_table(sq->sg, nr_frags + 1); in __virtnet_xdp_xmit_one()
1663 sg_set_buf(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
1665 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one()
1667 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), in __virtnet_xdp_xmit_one()
1673 return -ENOSPC; /* Caller handle free/refcnt */ in __virtnet_xdp_xmit_one()
1678 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1692 if (v->curr_queue_pairs > nr_cpu_ids) { \
1693 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1695 txq = netdev_get_tx_queue(v->dev, qp); \
1698 qp = cpu % v->curr_queue_pairs; \
1699 txq = netdev_get_tx_queue(v->dev, qp); \
1702 v->sq + qp; \
1709 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1710 if (v->curr_queue_pairs > nr_cpu_ids) \
1721 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1732 xdp_prog = rcu_access_pointer(rq->xdp_prog); in virtnet_xdp_xmit()
1734 return -ENXIO; in virtnet_xdp_xmit()
1739 ret = -EINVAL; in virtnet_xdp_xmit()
1744 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1756 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1760 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
1764 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
1765 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xdp_xmit()
1766 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xdp_xmit()
1767 u64_stats_add(&sq->stats.xdp_tx, n); in virtnet_xdp_xmit()
1768 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); in virtnet_xdp_xmit()
1769 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xdp_xmit()
1770 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1784 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags()
1785 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags()
1801 u64_stats_inc(&stats->xdp_packets); in virtnet_xdp_handler()
1808 u64_stats_inc(&stats->xdp_tx); in virtnet_xdp_handler()
1826 u64_stats_inc(&stats->xdp_redirects); in virtnet_xdp_handler()
1847 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1857 * with large buffers with sufficient headroom - so it should affect
1888 while (--*num_buf) { in xdp_linearize_page()
1899 off = buf - page_address(p); in xdp_linearize_page()
1921 *len = page_off - XDP_PACKET_HEADROOM; in xdp_linearize_page()
1939 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1948 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1964 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1974 if (unlikely(hdr->hdr.gso_type)) in receive_small_xdp()
1978 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in receive_small_xdp()
1985 int offset = buf - page_address(page) + header_offset; in receive_small_xdp()
1986 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1991 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
2005 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small_xdp()
2006 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
2014 len = xdp.data_end - xdp.data; in receive_small_xdp()
2015 metasize = xdp.data - xdp.data_meta; in receive_small_xdp()
2026 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); in receive_small_xdp()
2036 u64_stats_inc(&stats->xdp_drops); in receive_small_xdp()
2038 u64_stats_inc(&stats->drops); in receive_small_xdp()
2056 /* We passed the address of virtnet header to virtio-core, in receive_small()
2059 buf -= VIRTNET_RX_PAD + xdp_headroom; in receive_small()
2061 len -= vi->hdr_len; in receive_small()
2062 u64_stats_add(&stats->bytes, len); in receive_small()
2066 dev->name, len, GOOD_PACKET_LEN); in receive_small()
2071 if (unlikely(vi->xdp_enabled)) { in receive_small()
2075 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_small()
2091 u64_stats_inc(&stats->drops); in receive_small()
2109 if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) { in receive_big()
2111 dev->name, len, in receive_big()
2112 (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE); in receive_big()
2117 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2124 u64_stats_inc(&stats->drops); in receive_big()
2137 while (num_buf-- > 1) { in mergeable_buf_free()
2141 dev->name, num_buf); in mergeable_buf_free()
2145 u64_stats_add(&stats->bytes, len); in mergeable_buf_free()
2153 * virtio-net there are 2 points that do not match its requirements:
2156 * like eth_type_trans() (which virtio-net does in receive_buf()).
2169 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { in build_skb_from_xdp_buff()
2175 nr_frags = sinfo->nr_frags; in build_skb_from_xdp_buff()
2177 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); in build_skb_from_xdp_buff()
2181 headroom = xdp->data - xdp->data_hard_start; in build_skb_from_xdp_buff()
2182 data_len = xdp->data_end - xdp->data; in build_skb_from_xdp_buff()
2186 metasize = xdp->data - xdp->data_meta; in build_skb_from_xdp_buff()
2192 xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, in build_skb_from_xdp_buff()
2220 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in virtnet_build_xdp_buff_mrg()
2221 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, in virtnet_build_xdp_buff_mrg()
2222 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2228 /* If we want to build multi-buffer xdp, we need in virtnet_build_xdp_buff_mrg()
2236 shinfo->nr_frags = 0; in virtnet_build_xdp_buff_mrg()
2237 shinfo->xdp_frags_size = 0; in virtnet_build_xdp_buff_mrg()
2241 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2243 while (--*num_buf > 0) { in virtnet_build_xdp_buff_mrg()
2247 dev->name, *num_buf, in virtnet_build_xdp_buff_mrg()
2248 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2253 u64_stats_add(&stats->bytes, len); in virtnet_build_xdp_buff_mrg()
2255 offset = buf - page_address(page); in virtnet_build_xdp_buff_mrg()
2265 frag = &shinfo->frags[shinfo->nr_frags++]; in virtnet_build_xdp_buff_mrg()
2270 shinfo->xdp_frags_size += len; in virtnet_build_xdp_buff_mrg()
2278 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2298 * in-flight packets from before XDP was enabled reach in mergeable_xdp_get_buf()
2301 if (unlikely(hdr->hdr.gso_type)) in mergeable_xdp_get_buf()
2305 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in mergeable_xdp_get_buf()
2316 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { in mergeable_xdp_get_buf()
2323 * In fact, vq reset can be used here to help us clean up in mergeable_xdp_get_buf()
2328 if (!xdp_prog->aux->xdp_has_frags) { in mergeable_xdp_get_buf()
2330 xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, in mergeable_xdp_get_buf()
2370 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2372 int offset = buf - page_address(page); in receive_mergeable_xdp()
2414 u64_stats_inc(&stats->xdp_drops); in receive_mergeable_xdp()
2415 u64_stats_inc(&stats->drops); in receive_mergeable_xdp()
2427 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; in virtnet_skb_append_frag()
2435 skb_shinfo(curr_skb)->frag_list = nskb; in virtnet_skb_append_frag()
2437 curr_skb->next = nskb; in virtnet_skb_append_frag()
2439 head_skb->truesize += nskb->truesize; in virtnet_skb_append_frag()
2444 head_skb->data_len += len; in virtnet_skb_append_frag()
2445 head_skb->len += len; in virtnet_skb_append_frag()
2446 head_skb->truesize += truesize; in virtnet_skb_append_frag()
2449 offset = buf - page_address(page); in virtnet_skb_append_frag()
2452 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, in virtnet_skb_append_frag()
2472 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2474 int offset = buf - page_address(page); in receive_mergeable()
2480 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2485 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2489 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_mergeable()
2504 while (--num_buf) { in receive_mergeable()
2508 dev->name, num_buf, in receive_mergeable()
2509 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2510 hdr->num_buffers)); in receive_mergeable()
2515 u64_stats_add(&stats->bytes, len); in receive_mergeable()
2528 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
2536 u64_stats_inc(&stats->drops); in receive_mergeable()
2544 return __le16_to_cpu(hdr_hash->hash_value_lo) | in virtio_net_hash_value()
2545 (__le16_to_cpu(hdr_hash->hash_value_hi) << 16); in virtio_net_hash_value()
2556 switch (__le16_to_cpu(hdr_hash->hash_report)) { in virtio_skb_set_hash()
2581 struct net_device *dev = vi->dev; in virtnet_receive_done()
2584 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2585 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); in virtnet_receive_done()
2587 hdr->hdr.flags = flags; in virtnet_receive_done()
2588 if (virtio_net_handle_csum_offload(skb, &hdr->hdr, vi->rx_tnl_csum)) { in virtnet_receive_done()
2590 dev->name, hdr->hdr.flags, in virtnet_receive_done()
2591 hdr->hdr.gso_type, vi->rx_tnl_csum); in virtnet_receive_done()
2595 if (virtio_net_hdr_tnl_to_skb(skb, &hdr->tnl_hdr, vi->rx_tnl, in virtnet_receive_done()
2596 vi->rx_tnl_csum, in virtnet_receive_done()
2597 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2599 dev->name, hdr->hdr.gso_type, in virtnet_receive_done()
2600 hdr->hdr.gso_size, hdr->hdr.flags, in virtnet_receive_done()
2601 vi->rx_tnl, vi->rx_tnl_csum); in virtnet_receive_done()
2605 skb_record_rx_queue(skb, vq2rxq(rq->vq)); in virtnet_receive_done()
2606 skb->protocol = eth_type_trans(skb, dev); in virtnet_receive_done()
2608 ntohs(skb->protocol), skb->len, skb->pkt_type); in virtnet_receive_done()
2610 napi_gro_receive(&rq->napi, skb); in virtnet_receive_done()
2623 struct net_device *dev = vi->dev; in receive_buf()
2627 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2628 pr_debug("%s: short packet %i\n", dev->name, len); in receive_buf()
2643 if (vi->mergeable_rx_bufs) { in receive_buf()
2644 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2647 } else if (vi->big_packets) { in receive_buf()
2650 flags = ((struct virtio_net_common_hdr *)p)->hdr.flags; in receive_buf()
2653 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2665 * not need to use mergeable_len_to_ctx here - it is enough
2674 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2680 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp))) in add_recvbuf_small()
2681 return -ENOMEM; in add_recvbuf_small()
2685 return -ENOMEM; in add_recvbuf_small()
2689 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2691 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_small()
2707 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2709 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2710 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2715 return -ENOMEM; in add_recvbuf_big()
2717 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
2720 first->private = (unsigned long)list; in add_recvbuf_big()
2727 return -ENOMEM; in add_recvbuf_big()
2731 /* rq->sg[0], rq->sg[1] share the same page */ in add_recvbuf_big()
2732 /* a separated rq->sg[0] for header - required in case !any_header_sg */ in add_recvbuf_big()
2733 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2735 /* rq->sg[1] for data packet, from offset */ in add_recvbuf_big()
2737 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
2740 first->private = (unsigned long)list; in add_recvbuf_big()
2741 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2753 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len()
2754 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2758 return PAGE_SIZE - room; in get_mergeable_buf_len()
2761 rq->min_buf_len, PAGE_SIZE - hdr_len); in get_mergeable_buf_len()
2769 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
2782 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); in add_recvbuf_mergeable()
2785 return -ENOMEM; in add_recvbuf_mergeable()
2787 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size) in add_recvbuf_mergeable()
2788 len -= sizeof(struct virtnet_rq_dma); in add_recvbuf_mergeable()
2792 return -ENOMEM; in add_recvbuf_mergeable()
2795 hole = alloc_frag->size - alloc_frag->offset; in add_recvbuf_mergeable()
2805 alloc_frag->offset += hole; in add_recvbuf_mergeable()
2811 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_mergeable()
2832 if (rq->xsk_pool) { in try_fill_recv()
2833 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2838 if (vi->mergeable_rx_bufs) in try_fill_recv()
2840 else if (vi->big_packets) in try_fill_recv()
2847 } while (rq->vq->num_free); in try_fill_recv()
2850 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { in try_fill_recv()
2853 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); in try_fill_recv()
2854 u64_stats_inc(&rq->stats.kicks); in try_fill_recv()
2855 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); in try_fill_recv()
2858 return err != -ENOMEM; in try_fill_recv()
2863 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done()
2864 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2866 rq->calls++; in skb_recv_done()
2867 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
2886 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_enable()
2887 int qidx = vq2rxq(rq->vq); in virtnet_napi_enable()
2889 virtnet_napi_do_enable(rq->vq, &rq->napi); in virtnet_napi_enable()
2890 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); in virtnet_napi_enable()
2895 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_enable()
2896 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_enable()
2897 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_enable()
2899 if (!napi->weight) in virtnet_napi_tx_enable()
2905 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2906 napi->weight = 0; in virtnet_napi_tx_enable()
2910 virtnet_napi_do_enable(sq->vq, napi); in virtnet_napi_tx_enable()
2911 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); in virtnet_napi_tx_enable()
2916 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_napi_tx_disable()
2917 struct napi_struct *napi = &sq->napi; in virtnet_napi_tx_disable()
2918 int qidx = vq2txq(sq->vq); in virtnet_napi_tx_disable()
2920 if (napi->weight) { in virtnet_napi_tx_disable()
2921 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); in virtnet_napi_tx_disable()
2928 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_napi_disable()
2929 struct napi_struct *napi = &rq->napi; in virtnet_napi_disable()
2930 int qidx = vq2rxq(rq->vq); in virtnet_napi_disable()
2932 netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); in virtnet_napi_disable()
2943 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2944 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2952 * - cancel refill_work with cancel_delayed_work (note: in refill_work()
2953 * non-sync) in refill_work()
2954 * - cancel refill_work with cancel_delayed_work_sync in in refill_work()
2956 * - wrap all of the work in a lock (perhaps the netdev in refill_work()
2958 * - check netif_running() and return early to avoid a race in refill_work()
2960 napi_disable(&rq->napi); in refill_work()
2962 virtnet_napi_do_enable(rq->vq, &rq->napi); in refill_work()
2968 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2983 buf = virtqueue_get_buf(rq->vq, &len); in virtnet_receive_xsk_bufs()
3004 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
3013 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive_packets()
3025 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
3029 if (rq->xsk_pool) in virtnet_receive()
3034 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { in virtnet_receive()
3036 spin_lock(&vi->refill_lock); in virtnet_receive()
3037 if (vi->refill_enabled) in virtnet_receive()
3038 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
3039 spin_unlock(&vi->refill_lock); in virtnet_receive()
3044 u64_stats_update_begin(&rq->stats.syncp); in virtnet_receive()
3049 item = (u64_stats_t *)((u8 *)&rq->stats + offset); in virtnet_receive()
3054 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); in virtnet_receive()
3055 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); in virtnet_receive()
3057 u64_stats_update_end(&rq->stats.syncp); in virtnet_receive()
3064 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx()
3065 unsigned int index = vq2rxq(rq->vq); in virtnet_poll_cleantx()
3066 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
3067 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
3069 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
3073 if (sq->reset) { in virtnet_poll_cleantx()
3079 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
3081 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
3083 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && in virtnet_poll_cleantx()
3085 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_cleantx()
3086 u64_stats_inc(&sq->stats.wake); in virtnet_poll_cleantx()
3087 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_cleantx()
3099 if (!rq->packets_in_napi) in virtnet_rx_dim_update()
3105 dim_update_sample(rq->calls, in virtnet_rx_dim_update()
3106 u64_stats_read(&rq->stats.packets), in virtnet_rx_dim_update()
3107 u64_stats_read(&rq->stats.bytes), in virtnet_rx_dim_update()
3110 net_dim(&rq->dim, &cur_sample); in virtnet_rx_dim_update()
3111 rq->packets_in_napi = 0; in virtnet_rx_dim_update()
3118 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
3127 rq->packets_in_napi += received; in virtnet_poll()
3134 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
3139 if (napi_complete && rq->dim_enabled) in virtnet_poll()
3145 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
3146 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
3147 u64_stats_inc(&sq->stats.kicks); in virtnet_poll()
3148 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
3158 virtnet_napi_tx_disable(&vi->sq[qp_index]); in virtnet_disable_queue_pair()
3159 virtnet_napi_disable(&vi->rq[qp_index]); in virtnet_disable_queue_pair()
3160 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3165 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3168 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3169 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3173 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3178 virtnet_napi_enable(&vi->rq[qp_index]); in virtnet_enable_queue_pair()
3179 virtnet_napi_tx_enable(&vi->sq[qp_index]); in virtnet_enable_queue_pair()
3184 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3190 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3200 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3203 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3206 vi->speed = speed; in virtnet_update_settings()
3208 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3211 vi->duplex = duplex; in virtnet_update_settings()
3221 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3222 if (i < vi->curr_queue_pairs) in virtnet_open()
3224 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3225 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3232 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3233 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3234 netif_carrier_on(vi->dev); in virtnet_open()
3235 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3237 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3245 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3247 for (i--; i >= 0; i--) { in virtnet_open()
3249 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3258 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
3259 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
3270 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3272 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3274 if (sq->xsk_pool) in virtnet_poll_tx()
3275 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget); in virtnet_poll_tx()
3279 if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && in virtnet_poll_tx()
3281 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_tx()
3282 u64_stats_inc(&sq->stats.wake); in virtnet_poll_tx()
3283 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_tx()
3292 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
3297 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3302 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
3305 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3317 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; in xmit_skb()
3318 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
3321 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3324 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3327 BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr)); in xmit_skb()
3328 BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr)); in xmit_skb()
3330 can_push = vi->any_header_sg && in xmit_skb()
3331 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && in xmit_skb()
3336 hdr = (struct virtio_net_hdr_v1_hash_tunnel *)(skb->data - in xmit_skb()
3339 hdr = &skb_vnet_common_hdr(skb)->tnl_hdr; in xmit_skb()
3341 if (virtio_net_hdr_tnl_from_skb(skb, hdr, vi->tx_tnl, in xmit_skb()
3342 virtio_is_little_endian(vi->vdev), 0)) in xmit_skb()
3343 return -EPROTO; in xmit_skb()
3345 if (vi->mergeable_rx_bufs) in xmit_skb()
3346 hdr->hash_hdr.hdr.num_buffers = 0; in xmit_skb()
3348 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
3351 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
3357 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
3358 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
3372 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3376 bool use_napi = sq->napi.weight; in start_xmit()
3382 virtqueue_disable_cb(sq->vq); in start_xmit()
3394 dev_warn(&dev->dev, in start_xmit()
3413 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : in start_xmit()
3416 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
3417 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
3418 u64_stats_inc(&sq->stats.kicks); in start_xmit()
3419 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
3423 if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in start_xmit()
3424 virtqueue_napi_schedule(&sq->napi, sq->vq); in start_xmit()
3432 bool running = netif_running(vi->dev); in __virtnet_rx_pause()
3436 virtnet_cancel_dim(vi, &rq->dim); in __virtnet_rx_pause()
3449 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause_all()
3450 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_rx_pause_all()
3451 __virtnet_rx_pause(vi, &vi->rq[i]); in virtnet_rx_pause_all()
3461 cancel_delayed_work_sync(&vi->refill); in virtnet_rx_pause()
3469 bool running = netif_running(vi->dev); in __virtnet_rx_resume()
3478 schedule_delayed_work(&vi->refill, 0); in __virtnet_rx_resume()
3486 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_rx_resume_all()
3487 if (i < vi->curr_queue_pairs) in virtnet_rx_resume_all()
3488 __virtnet_rx_resume(vi, &vi->rq[i], true); in virtnet_rx_resume_all()
3490 __virtnet_rx_resume(vi, &vi->rq[i], false); in virtnet_rx_resume_all()
3505 qindex = rq - vi->rq; in virtnet_rx_resize()
3509 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); in virtnet_rx_resize()
3511 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3519 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3523 qindex = sq - vi->sq; in virtnet_tx_pause()
3528 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3536 sq->reset = true; in virtnet_tx_pause()
3539 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3546 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3550 qindex = sq - vi->sq; in virtnet_tx_resume()
3552 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3555 sq->reset = false; in virtnet_tx_resume()
3569 netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n", in virtnet_tx_resize()
3571 return -EINVAL; in virtnet_tx_resize()
3574 qindex = sq - vi->sq; in virtnet_tx_resize()
3578 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, in virtnet_tx_resize()
3581 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3603 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3605 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3606 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3607 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3608 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3610 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3617 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3624 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3626 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3628 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3632 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3638 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3639 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3645 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3646 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3659 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3664 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3665 return -EOPNOTSUPP; in virtnet_set_mac_address()
3669 return -ENOMEM; in virtnet_set_mac_address()
3676 sg_init_one(&sg, addr->sa_data, dev->addr_len); in virtnet_set_mac_address()
3679 dev_warn(&vdev->dev, in virtnet_set_mac_address()
3681 ret = -EINVAL; in virtnet_set_mac_address()
3689 for (i = 0; i < dev->addr_len; i++) in virtnet_set_mac_address()
3692 i, addr->sa_data[i]); in virtnet_set_mac_address()
3710 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3712 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3713 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3716 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_stats()
3717 tpackets = u64_stats_read(&sq->stats.packets); in virtnet_stats()
3718 tbytes = u64_stats_read(&sq->stats.bytes); in virtnet_stats()
3719 terrors = u64_stats_read(&sq->stats.tx_timeouts); in virtnet_stats()
3720 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_stats()
3723 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_stats()
3724 rpackets = u64_stats_read(&rq->stats.packets); in virtnet_stats()
3725 rbytes = u64_stats_read(&rq->stats.bytes); in virtnet_stats()
3726 rdrops = u64_stats_read(&rq->stats.drops); in virtnet_stats()
3727 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_stats()
3729 tot->rx_packets += rpackets; in virtnet_stats()
3730 tot->tx_packets += tpackets; in virtnet_stats()
3731 tot->rx_bytes += rbytes; in virtnet_stats()
3732 tot->tx_bytes += tbytes; in virtnet_stats()
3733 tot->rx_dropped += rdrops; in virtnet_stats()
3734 tot->tx_errors += terrors; in virtnet_stats()
3737 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); in virtnet_stats()
3738 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); in virtnet_stats()
3739 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); in virtnet_stats()
3740 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); in virtnet_stats()
3747 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3757 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3759 vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); in virtnet_rss_update_by_qpairs()
3761 vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); in virtnet_rss_update_by_qpairs()
3769 struct net_device *dev = vi->dev; in virtnet_set_queues()
3772 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3782 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3783 old_rss_hdr = vi->rss_hdr; in virtnet_set_queues()
3784 old_rss_trailer = vi->rss_trailer; in virtnet_set_queues()
3785 vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_set_queues()
3786 if (!vi->rss_hdr) { in virtnet_set_queues()
3787 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3788 return -ENOMEM; in virtnet_set_queues()
3791 *vi->rss_hdr = *old_rss_hdr; in virtnet_set_queues()
3796 devm_kfree(&dev->dev, vi->rss_hdr); in virtnet_set_queues()
3797 vi->rss_hdr = old_rss_hdr; in virtnet_set_queues()
3798 vi->rss_trailer = old_rss_trailer; in virtnet_set_queues()
3800 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", in virtnet_set_queues()
3802 return -EINVAL; in virtnet_set_queues()
3804 devm_kfree(&dev->dev, old_rss_hdr); in virtnet_set_queues()
3810 return -ENOMEM; in virtnet_set_queues()
3812 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3817 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", in virtnet_set_queues()
3819 return -EINVAL; in virtnet_set_queues()
3822 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3824 spin_lock_bh(&vi->refill_lock); in virtnet_set_queues()
3825 if (dev->flags & IFF_UP && vi->refill_enabled) in virtnet_set_queues()
3826 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3827 spin_unlock_bh(&vi->refill_lock); in virtnet_set_queues()
3839 /* Make sure refill_work doesn't re-enable napi! */ in virtnet_close()
3840 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3844 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3848 cancel_work_sync(&vi->config_work); in virtnet_close()
3850 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3852 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3865 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3875 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3880 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); in virtnet_rx_mode_work()
3886 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); in virtnet_rx_mode_work()
3891 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", in virtnet_rx_mode_work()
3894 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); in virtnet_rx_mode_work()
3899 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", in virtnet_rx_mode_work()
3906 /* MAC filter - use one buffer for both lists */ in virtnet_rx_mode_work()
3908 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); in virtnet_rx_mode_work()
3919 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3922 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3925 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3928 mac_data = (void *)&mac_data->macs[uc_count][0]; in virtnet_rx_mode_work()
3930 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3933 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3938 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3942 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); in virtnet_rx_mode_work()
3953 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3954 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3966 return -ENOMEM; in virtnet_vlan_rx_add_vid()
3968 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3973 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); in virtnet_vlan_rx_add_vid()
3986 return -ENOMEM; in virtnet_vlan_rx_kill_vid()
3988 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3993 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); in virtnet_vlan_rx_kill_vid()
4001 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
4002 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
4003 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
4004 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
4007 vi->affinity_hint_set = false; in virtnet_clean_affinity()
4026 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
4027 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
4028 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
4031 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
4035 if (!group_size--) { in virtnet_set_affinity()
4042 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
4043 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
4044 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
4048 vi->affinity_hint_set = true; in virtnet_set_affinity()
4083 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4087 &vi->node_dead); in virtnet_cpu_notif_add()
4090 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
4096 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
4098 &vi->node_dead); in virtnet_cpu_notif_remove()
4109 return -ENOMEM; in virtnet_send_ctrl_coal_vq_cmd()
4111 coal_vq->vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
4112 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
4113 coal_vq->coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
4119 return -EINVAL; in virtnet_send_ctrl_coal_vq_cmd()
4130 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
4131 return -EOPNOTSUPP; in virtnet_send_rx_ctrl_coal_vq_cmd()
4138 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
4139 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
4150 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
4151 return -EOPNOTSUPP; in virtnet_send_tx_ctrl_coal_vq_cmd()
4158 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
4159 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
4171 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
4172 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
4173 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
4174 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
4188 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in virtnet_set_ringparam()
4189 return -EINVAL; in virtnet_set_ringparam()
4191 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4192 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4194 if (ring->rx_pending == rx_pending && in virtnet_set_ringparam()
4195 ring->tx_pending == tx_pending) in virtnet_set_ringparam()
4198 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4199 return -EINVAL; in virtnet_set_ringparam()
4201 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4202 return -EINVAL; in virtnet_set_ringparam()
4204 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4205 rq = vi->rq + i; in virtnet_set_ringparam()
4206 sq = vi->sq + i; in virtnet_set_ringparam()
4208 if (ring->tx_pending != tx_pending) { in virtnet_set_ringparam()
4209 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4213 /* Upon disabling and re-enabling a transmit virtqueue, the device must in virtnet_set_ringparam()
4219 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4220 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4225 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4229 if (ring->rx_pending != rx_pending) { in virtnet_set_ringparam()
4230 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4235 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4237 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4238 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4239 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4240 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4250 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4255 sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); in virtnet_commit_rss_command()
4256 sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); in virtnet_commit_rss_command()
4259 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4266 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); in virtnet_commit_rss_command()
4273 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); in virtnet_init_default_rss()
4274 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4275 vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4276 ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; in virtnet_init_default_rss()
4277 vi->rss_hdr->unclassified_queue = 0; in virtnet_init_default_rss()
4279 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4281 vi->rss_trailer.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4283 netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); in virtnet_init_default_rss()
4291 info->data = 0; in virtnet_get_hashflow()
4292 switch (info->flow_type) { in virtnet_get_hashflow()
4294 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4295 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4297 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4298 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4302 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4303 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4305 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4306 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4310 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4311 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4313 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4314 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4318 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4319 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4321 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4322 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4326 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4327 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4331 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4332 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4336 info->data = 0; in virtnet_get_hashflow()
4348 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4349 bool is_disable = info->data & RXH_DISCARD; in virtnet_set_hashflow()
4350 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); in virtnet_set_hashflow()
4353 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) in virtnet_set_hashflow()
4354 return -EINVAL; in virtnet_set_hashflow()
4356 switch (info->flow_type) { in virtnet_set_hashflow()
4393 return -EINVAL; in virtnet_set_hashflow()
4397 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4398 return -EINVAL; in virtnet_set_hashflow()
4400 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4401 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4402 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_hashflow()
4403 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4405 return -EINVAL; in virtnet_set_hashflow()
4415 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4417 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in virtnet_get_drvinfo()
4418 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); in virtnet_get_drvinfo()
4419 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); in virtnet_get_drvinfo()
4428 u16 queue_pairs = channels->combined_count; in virtnet_set_channels()
4434 if (channels->rx_count || channels->tx_count || channels->other_count) in virtnet_set_channels()
4435 return -EINVAL; in virtnet_set_channels()
4437 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4438 return -EINVAL; in virtnet_set_channels()
4444 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4445 return -EINVAL; in virtnet_set_channels()
4476 /* qid == -1: for rx/tx queue total field */
4487 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4491 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); in virtnet_get_stats_string()
4507 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4514 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4521 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4541 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4548 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4555 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4567 /* The stats are write to qstats or ethtool -S */
4589 ctx->data = data; in virtnet_stats_ctx_init()
4590 ctx->to_qstat = to_qstat; in virtnet_stats_ctx_init()
4593 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_stats_ctx_init()
4594 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_stats_ctx_init()
4598 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4599 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4600 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_stats_ctx_init()
4601 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4604 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4605 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4606 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_stats_ctx_init()
4607 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4610 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4611 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; in virtnet_stats_ctx_init()
4612 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_stats_ctx_init()
4613 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); in virtnet_stats_ctx_init()
4616 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4617 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4618 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_stats_ctx_init()
4619 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4624 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4625 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4626 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_stats_ctx_init()
4627 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4630 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4631 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; in virtnet_stats_ctx_init()
4632 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_stats_ctx_init()
4633 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); in virtnet_stats_ctx_init()
4636 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4637 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4638 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_stats_ctx_init()
4639 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4642 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4643 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4644 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_stats_ctx_init()
4645 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4651 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_stats_ctx_init()
4652 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_stats_ctx_init()
4654 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4657 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; in virtnet_stats_ctx_init()
4658 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_stats_ctx_init()
4659 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); in virtnet_stats_ctx_init()
4664 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4665 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4666 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_stats_ctx_init()
4667 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4670 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4671 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4672 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_stats_ctx_init()
4673 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4676 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4677 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4678 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_stats_ctx_init()
4679 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4684 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4685 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4686 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_stats_ctx_init()
4687 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4690 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4691 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4692 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_stats_ctx_init()
4693 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4696 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4697 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4698 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_stats_ctx_init()
4699 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4703 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4730 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_total_fields()
4731 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_total_fields()
4732 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_total_fields()
4734 first_rx_q = ctx->data + num_rx + num_tx + num_cq; in virtnet_fill_total_fields()
4735 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4737 data = ctx->data; in virtnet_fill_total_fields()
4739 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4741 data = ctx->data + num_rx; in virtnet_fill_total_fields()
4743 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4758 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats_qstat()
4770 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4772 ctx->data[offset] = u64_stats_read(v_stat); in virtnet_fill_stats_qstat()
4837 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4839 ctx->data[offset] = le64_to_cpu(*v); in virtnet_fill_stats_qstat()
4843 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4864 if (ctx->to_qstat) in virtnet_fill_stats()
4867 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_stats()
4868 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_stats()
4869 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_stats()
4872 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats()
4878 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4968 ctx->data[offset + i] = le64_to_cpu(*v); in virtnet_fill_stats()
4976 ctx->data[offset + i] = u64_stats_read(v_stat); in virtnet_fill_stats()
5001 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { in __virtnet_get_hw_stats()
5003 qid = le16_to_cpu(hdr->vq_index); in __virtnet_get_hw_stats()
5004 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
5016 u64 bitmap = ctx->bitmap[qtype]; in virtnet_make_stat_req()
5021 req->stats[*idx].vq_index = cpu_to_le16(qid); in virtnet_make_stat_req()
5022 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); in virtnet_make_stat_req()
5026 /* qid: -1: get stats of all vq.
5038 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
5041 if (qid == -1) { in virtnet_get_hw_stats()
5042 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
5055 if (ctx->bitmap[qtype]) { in virtnet_get_hw_stats()
5057 res_size += ctx->size[qtype]; in virtnet_get_hw_stats()
5061 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { in virtnet_get_hw_stats()
5062 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; in virtnet_get_hw_stats()
5068 return -ENOMEM; in virtnet_get_hw_stats()
5073 return -ENOMEM; in virtnet_get_hw_stats()
5081 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
5100 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
5101 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
5105 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5108 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
5127 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
5129 return -EOPNOTSUPP; in virtnet_get_sset_count()
5142 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
5143 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
5145 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
5146 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
5147 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
5149 stats_base = (const u8 *)&rq->stats; in virtnet_get_ethtool_stats()
5151 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_get_ethtool_stats()
5153 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5155 stats_base = (const u8 *)&sq->stats; in virtnet_get_ethtool_stats()
5157 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_get_ethtool_stats()
5159 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
5170 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
5171 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
5172 channels->max_other = 0; in virtnet_get_channels()
5173 channels->rx_count = 0; in virtnet_get_channels()
5174 channels->tx_count = 0; in virtnet_get_channels()
5175 channels->other_count = 0; in virtnet_get_channels()
5184 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5192 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5193 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5194 cmd->base.port = PORT_OTHER; in virtnet_get_link_ksettings()
5208 return -ENOMEM; in virtnet_send_tx_notf_coal_cmds()
5210 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
5211 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
5217 return -EINVAL; in virtnet_send_tx_notf_coal_cmds()
5219 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5220 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5221 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5222 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5223 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5233 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_cmds()
5237 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5238 return -EOPNOTSUPP; in virtnet_send_rx_notf_coal_cmds()
5240 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5241 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5242 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5244 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5245 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5246 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5247 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5248 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5249 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5256 return -ENOMEM; in virtnet_send_rx_notf_coal_cmds()
5258 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5259 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5260 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5261 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5262 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5263 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5267 /* Since the per-queue coalescing params can be set, in virtnet_send_rx_notf_coal_cmds()
5271 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
5272 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
5278 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5280 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5281 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5282 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5283 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5284 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5285 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5286 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5312 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_vq_cmds()
5317 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5318 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5319 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5320 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5322 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || in virtnet_send_rx_notf_coal_vq_cmds()
5323 ec->rx_max_coalesced_frames != max_packets)) { in virtnet_send_rx_notf_coal_vq_cmds()
5324 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5325 return -EINVAL; in virtnet_send_rx_notf_coal_vq_cmds()
5329 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5330 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5335 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5341 ec->rx_coalesce_usecs, in virtnet_send_rx_notf_coal_vq_cmds()
5342 ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_vq_cmds()
5343 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5358 ec->tx_coalesce_usecs, in virtnet_send_notf_coal_vq_cmds()
5359 ec->tx_max_coalesced_frames); in virtnet_send_notf_coal_vq_cmds()
5371 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work()
5372 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5376 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5378 mutex_lock(&rq->dim_lock); in virtnet_rx_dim_work()
5379 if (!rq->dim_enabled) in virtnet_rx_dim_work()
5383 if (update_moder.usec != rq->intr_coal.max_usecs || in virtnet_rx_dim_work()
5384 update_moder.pkts != rq->intr_coal.max_packets) { in virtnet_rx_dim_work()
5390 dev->name, qnum); in virtnet_rx_dim_work()
5393 dim->state = DIM_START_MEASURE; in virtnet_rx_dim_work()
5394 mutex_unlock(&rq->dim_lock); in virtnet_rx_dim_work()
5402 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) in virtnet_coal_params_supported()
5403 return -EOPNOTSUPP; in virtnet_coal_params_supported()
5405 if (ec->tx_max_coalesced_frames > 1 || in virtnet_coal_params_supported()
5406 ec->rx_max_coalesced_frames != 1) in virtnet_coal_params_supported()
5407 return -EINVAL; in virtnet_coal_params_supported()
5417 return -EBUSY; in virtnet_should_update_vq_weight()
5434 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_coalesce()
5435 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5436 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_coalesce()
5437 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5443 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5450 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5462 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5463 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5464 return -EBUSY; in virtnet_set_coalesce()
5467 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5468 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5481 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5482 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5483 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5484 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5485 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5486 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5488 ec->rx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5490 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5491 ec->tx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5505 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5506 return -EINVAL; in virtnet_set_per_queue_coalesce()
5509 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_per_queue_coalesce()
5510 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_per_queue_coalesce()
5511 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5516 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5525 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5536 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5537 return -EINVAL; in virtnet_get_per_queue_coalesce()
5539 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5540 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5541 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5542 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5543 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5544 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5545 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5546 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5548 ec->rx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5550 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5551 ec->tx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5561 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5562 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5567 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; in virtnet_get_rxfh_key_size()
5572 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; in virtnet_get_rxfh_indir_size()
5581 if (rxfh->indir) { in virtnet_get_rxfh()
5582 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5583 rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); in virtnet_get_rxfh()
5586 if (rxfh->key) in virtnet_get_rxfh()
5587 memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); in virtnet_get_rxfh()
5589 rxfh->hfunc = ETH_RSS_HASH_TOP; in virtnet_get_rxfh()
5602 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in virtnet_set_rxfh()
5603 rxfh->hfunc != ETH_RSS_HASH_TOP) in virtnet_set_rxfh()
5604 return -EOPNOTSUPP; in virtnet_set_rxfh()
5606 if (rxfh->indir) { in virtnet_set_rxfh()
5607 if (!vi->has_rss) in virtnet_set_rxfh()
5608 return -EOPNOTSUPP; in virtnet_set_rxfh()
5610 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5611 vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); in virtnet_set_rxfh()
5615 if (rxfh->key) { in virtnet_set_rxfh()
5620 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5621 return -EOPNOTSUPP; in virtnet_set_rxfh()
5623 memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5637 return vi->curr_queue_pairs; in virtnet_get_rx_ring_count()
5672 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5678 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5685 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5691 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5700 /* The queue stats of the virtio-net will not be reset. So here we in virtnet_get_base_stats()
5703 rx->bytes = 0; in virtnet_get_base_stats()
5704 rx->packets = 0; in virtnet_get_base_stats()
5706 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5707 rx->hw_drops = 0; in virtnet_get_base_stats()
5708 rx->hw_drop_overruns = 0; in virtnet_get_base_stats()
5711 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5712 rx->csum_unnecessary = 0; in virtnet_get_base_stats()
5713 rx->csum_none = 0; in virtnet_get_base_stats()
5714 rx->csum_bad = 0; in virtnet_get_base_stats()
5717 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5718 rx->hw_gro_packets = 0; in virtnet_get_base_stats()
5719 rx->hw_gro_bytes = 0; in virtnet_get_base_stats()
5720 rx->hw_gro_wire_packets = 0; in virtnet_get_base_stats()
5721 rx->hw_gro_wire_bytes = 0; in virtnet_get_base_stats()
5724 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5725 rx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5727 tx->bytes = 0; in virtnet_get_base_stats()
5728 tx->packets = 0; in virtnet_get_base_stats()
5729 tx->stop = 0; in virtnet_get_base_stats()
5730 tx->wake = 0; in virtnet_get_base_stats()
5732 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5733 tx->hw_drops = 0; in virtnet_get_base_stats()
5734 tx->hw_drop_errors = 0; in virtnet_get_base_stats()
5737 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5738 tx->csum_none = 0; in virtnet_get_base_stats()
5739 tx->needs_csum = 0; in virtnet_get_base_stats()
5742 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5743 tx->hw_gso_packets = 0; in virtnet_get_base_stats()
5744 tx->hw_gso_bytes = 0; in virtnet_get_base_stats()
5745 tx->hw_gso_wire_packets = 0; in virtnet_get_base_stats()
5746 tx->hw_gso_wire_bytes = 0; in virtnet_get_base_stats()
5749 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5750 tx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5753 dev->real_num_rx_queues, vi->max_queue_pairs, rx, in virtnet_get_base_stats()
5754 dev->real_num_tx_queues, vi->max_queue_pairs, tx); in virtnet_get_base_stats()
5765 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down()
5768 flush_work(&vi->config_work); in virtnet_freeze_down()
5770 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5772 if (netif_running(vi->dev)) { in virtnet_freeze_down()
5774 virtnet_close(vi->dev); in virtnet_freeze_down()
5778 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5779 netif_device_detach(vi->dev); in virtnet_freeze_down()
5780 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5787 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up()
5799 if (netif_running(vi->dev)) { in virtnet_restore_up()
5801 err = virtnet_open(vi->dev); in virtnet_restore_up()
5807 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5808 netif_device_attach(vi->dev); in virtnet_restore_up()
5809 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5820 return -ENOMEM; in virtnet_set_guest_offloads()
5822 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5828 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5829 return -EINVAL; in virtnet_set_guest_offloads()
5839 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5847 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5849 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5860 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5863 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5867 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, in virtnet_rq_bind_xsk_pool()
5872 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5877 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); in virtnet_rq_bind_xsk_pool()
5879 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5884 rq->xsk_pool = pool; in virtnet_rq_bind_xsk_pool()
5892 xdp_rxq_info_unreg(&rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5902 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5906 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, in virtnet_sq_bind_xsk_pool()
5909 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5913 sq->xsk_pool = pool; in virtnet_sq_bind_xsk_pool()
5931 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5932 return -EINVAL; in virtnet_xsk_pool_enable()
5937 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5938 return -ENOENT; in virtnet_xsk_pool_enable()
5940 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5941 return -EINVAL; in virtnet_xsk_pool_enable()
5943 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5944 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5946 /* xsk assumes that tx and rx must have the same dma device. The af-xdp in virtnet_xsk_pool_enable()
5950 * But vq->dma_dev allows every vq has the respective dma dev. So I in virtnet_xsk_pool_enable()
5953 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) in virtnet_xsk_pool_enable()
5954 return -EINVAL; in virtnet_xsk_pool_enable()
5956 dma_dev = virtqueue_dma_dev(rq->vq); in virtnet_xsk_pool_enable()
5958 return -EINVAL; in virtnet_xsk_pool_enable()
5960 size = virtqueue_get_vring_size(rq->vq); in virtnet_xsk_pool_enable()
5962 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); in virtnet_xsk_pool_enable()
5963 if (!rq->xsk_buffs) in virtnet_xsk_pool_enable()
5964 return -ENOMEM; in virtnet_xsk_pool_enable()
5966 hdr_dma = virtqueue_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5968 if (virtqueue_map_mapping_error(sq->vq, hdr_dma)) { in virtnet_xsk_pool_enable()
5969 err = -ENOMEM; in virtnet_xsk_pool_enable()
5988 sq->xsk_hdr_dma_addr = hdr_dma; in virtnet_xsk_pool_enable()
5997 virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
6000 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_enable()
6012 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
6013 return -EINVAL; in virtnet_xsk_pool_disable()
6015 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
6016 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
6018 pool = rq->xsk_pool; in virtnet_xsk_pool_disable()
6025 virtqueue_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, in virtnet_xsk_pool_disable()
6026 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
6027 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_disable()
6034 if (xdp->xsk.pool) in virtnet_xsk_pool_setup()
6035 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, in virtnet_xsk_pool_setup()
6036 xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
6038 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
6046 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; in virtnet_xdp_set()
6052 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
6053 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
6054 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
6055 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
6056 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
6057 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
6058 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
6059 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
6061 return -EOPNOTSUPP; in virtnet_xdp_set()
6064 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
6066 return -EINVAL; in virtnet_xdp_set()
6069 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { in virtnet_xdp_set()
6071 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); in virtnet_xdp_set()
6072 return -EINVAL; in virtnet_xdp_set()
6075 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
6080 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
6082 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
6086 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
6091 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6097 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6098 virtnet_napi_tx_disable(&vi->sq[i]); in virtnet_xdp_set()
6102 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6103 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6114 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
6117 vi->xdp_enabled = true; in virtnet_xdp_set()
6118 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6119 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
6127 vi->xdp_enabled = false; in virtnet_xdp_set()
6131 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
6135 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6143 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6144 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
6149 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
6150 virtnet_napi_tx_enable(&vi->sq[i]); in virtnet_xdp_set()
6153 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
6159 switch (xdp->command) { in virtnet_xdp()
6161 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); in virtnet_xdp()
6165 return -EINVAL; in virtnet_xdp()
6175 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6176 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6180 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6192 if ((dev->features ^ features) & NETIF_F_GRO_HW) { in virtnet_set_features()
6193 if (vi->xdp_enabled) in virtnet_set_features()
6194 return -EBUSY; in virtnet_set_features()
6197 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6199 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6205 vi->guest_offloads = offloads; in virtnet_set_features()
6208 if ((dev->features ^ features) & NETIF_F_RXHASH) { in virtnet_set_features()
6210 vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); in virtnet_set_features()
6212 vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); in virtnet_set_features()
6215 return -EINVAL; in virtnet_set_features()
6224 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout()
6227 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
6228 u64_stats_inc(&sq->stats.tx_timeouts); in virtnet_tx_timeout()
6229 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
6231 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", in virtnet_tx_timeout()
6232 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
6233 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); in virtnet_tx_timeout()
6243 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6250 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6251 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6258 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6262 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6291 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6296 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6303 if (vi->status == v) in virtnet_config_changed_work()
6306 vi->status = v; in virtnet_config_changed_work()
6308 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6310 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6311 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6313 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6314 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6320 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed()
6322 schedule_work(&vi->config_work); in virtnet_config_changed()
6329 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6330 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6331 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6335 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6339 kfree(vi->rq); in virtnet_free_queues()
6340 kfree(vi->sq); in virtnet_free_queues()
6341 kfree(vi->ctrl); in virtnet_free_queues()
6349 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6350 while (vi->rq[i].pages) in _free_receive_bufs()
6351 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6353 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6354 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6370 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6371 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6372 if (vi->rq[i].last_dma) in free_receive_page_frags()
6373 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6374 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6380 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf()
6384 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6397 xsk_tx_completed(sq->xsk_pool, 1); in virtnet_sq_free_unused_buf()
6404 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done()
6407 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6415 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6416 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6422 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6423 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6433 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6437 vdev->config->del_vqs(vdev); in virtnet_del_vqs()
6448 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6450 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6454 return max(max(min_buf_len, hdr_len) - hdr_len, in mergeable_min_buf_len()
6462 int ret = -ENOMEM; in virtnet_find_vqs()
6468 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by in virtnet_find_vqs()
6471 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6472 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6481 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6490 if (vi->has_cvq) { in virtnet_find_vqs()
6491 vqs_info[total_vqs - 1].name = "control"; in virtnet_find_vqs()
6495 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6498 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6499 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6500 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6501 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6506 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6510 if (vi->has_cvq) { in virtnet_find_vqs()
6511 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6512 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6513 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6516 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6517 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6518 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6519 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6539 if (vi->has_cvq) { in virtnet_alloc_queues()
6540 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6541 if (!vi->ctrl) in virtnet_alloc_queues()
6544 vi->ctrl = NULL; in virtnet_alloc_queues()
6546 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6547 if (!vi->sq) in virtnet_alloc_queues()
6549 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6550 if (!vi->rq) in virtnet_alloc_queues()
6553 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6554 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6555 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6556 netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6558 vi->rq[i].napi.weight = napi_weight; in virtnet_alloc_queues()
6559 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6563 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6564 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6565 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6567 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6568 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6569 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6575 kfree(vi->sq); in virtnet_alloc_queues()
6577 kfree(vi->ctrl); in virtnet_alloc_queues()
6579 return -ENOMEM; in virtnet_alloc_queues()
6611 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show()
6617 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6618 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6620 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6645 dev_err(&vdev->dev, "device advertises feature %s but not %s", in virtnet_fail_on_feature()
6685 if (!vdev->config->get) { in virtnet_validate()
6686 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtnet_validate()
6688 return -EINVAL; in virtnet_validate()
6692 return -EINVAL; in virtnet_validate()
6704 …dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, dis… in virtnet_validate()
6713 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6714 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6715 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6716 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6717 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6718 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6730 vi->big_packets = true; in virtnet_set_big_packets()
6731 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6758 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) in virtnet_xdp_rx_hash()
6759 return -ENODATA; in virtnet_xdp_rx_hash()
6761 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6762 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6763 hash_report = __le16_to_cpu(hdr_hash->hash_report); in virtnet_xdp_rx_hash()
6779 int i, err = -ENOMEM; in virtnet_probe()
6800 return -ENOMEM; in virtnet_probe()
6803 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | in virtnet_probe()
6805 dev->netdev_ops = &virtnet_netdev; in virtnet_probe()
6806 dev->stat_ops = &virtnet_stat_ops; in virtnet_probe()
6807 dev->features = NETIF_F_HIGHDMA; in virtnet_probe()
6809 dev->ethtool_ops = &virtnet_ethtool_ops; in virtnet_probe()
6810 SET_NETDEV_DEV(dev, &vdev->dev); in virtnet_probe()
6815 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6817 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6820 dev->hw_features |= NETIF_F_TSO in virtnet_probe()
6825 dev->hw_features |= NETIF_F_TSO; in virtnet_probe()
6827 dev->hw_features |= NETIF_F_TSO6; in virtnet_probe()
6829 dev->hw_features |= NETIF_F_TSO_ECN; in virtnet_probe()
6831 dev->hw_features |= NETIF_F_GSO_UDP_L4; in virtnet_probe()
6834 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; in virtnet_probe()
6835 dev->hw_enc_features = dev->hw_features; in virtnet_probe()
6837 if (dev->hw_features & NETIF_F_GSO_UDP_TUNNEL && in virtnet_probe()
6839 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in virtnet_probe()
6840 dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in virtnet_probe()
6843 dev->features |= NETIF_F_GSO_ROBUST; in virtnet_probe()
6846 dev->features |= dev->hw_features; in virtnet_probe()
6857 dev->features |= NETIF_F_RXCSUM; in virtnet_probe()
6861 dev->features |= NETIF_F_GRO_HW; in virtnet_probe()
6863 dev->hw_features |= NETIF_F_GRO_HW; in virtnet_probe()
6865 dev->vlan_features = dev->features; in virtnet_probe()
6866 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in virtnet_probe()
6869 /* MTU range: 68 - 65535 */ in virtnet_probe()
6870 dev->min_mtu = MIN_MTU; in virtnet_probe()
6871 dev->max_mtu = MAX_MTU; in virtnet_probe()
6883 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", in virtnet_probe()
6884 dev->dev_addr); in virtnet_probe()
6887 /* Set up our device-specific information */ in virtnet_probe()
6889 vi->dev = dev; in virtnet_probe()
6890 vi->vdev = vdev; in virtnet_probe()
6891 vdev->priv = vi; in virtnet_probe()
6893 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6894 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6895 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6898 vi->mergeable_rx_bufs = true; in virtnet_probe()
6899 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; in virtnet_probe()
6903 vi->has_rss_hash_report = true; in virtnet_probe()
6906 vi->has_rss = true; in virtnet_probe()
6908 vi->rss_indir_table_size = in virtnet_probe()
6912 vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); in virtnet_probe()
6913 if (!vi->rss_hdr) { in virtnet_probe()
6914 err = -ENOMEM; in virtnet_probe()
6918 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6919 vi->rss_key_size = in virtnet_probe()
6921 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6922 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", in virtnet_probe()
6923 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6924 err = -EINVAL; in virtnet_probe()
6928 vi->rss_hash_types_supported = in virtnet_probe()
6930 vi->rss_hash_types_supported &= in virtnet_probe()
6935 dev->hw_features |= NETIF_F_RXHASH; in virtnet_probe()
6936 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; in virtnet_probe()
6941 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash_tunnel); in virtnet_probe()
6942 else if (vi->has_rss_hash_report) in virtnet_probe()
6943 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6946 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6948 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6951 vi->rx_tnl_csum = true; in virtnet_probe()
6953 vi->rx_tnl = true; in virtnet_probe()
6955 vi->tx_tnl = true; in virtnet_probe()
6959 vi->any_header_sg = true; in virtnet_probe()
6962 vi->has_cvq = true; in virtnet_probe()
6964 mutex_init(&vi->cvq_lock); in virtnet_probe()
6970 if (mtu < dev->min_mtu) { in virtnet_probe()
6974 dev_err(&vdev->dev, in virtnet_probe()
6976 mtu, dev->min_mtu); in virtnet_probe()
6977 err = -EINVAL; in virtnet_probe()
6981 dev->mtu = mtu; in virtnet_probe()
6982 dev->max_mtu = mtu; in virtnet_probe()
6987 if (vi->any_header_sg) in virtnet_probe()
6988 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6992 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6994 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6995 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
7002 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
7003 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
7004 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
7005 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
7010 if (vi->sq[0].napi.weight) in virtnet_probe()
7011 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
7013 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
7016 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
7018 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
7019 if (vi->sq[i].napi.weight) in virtnet_probe()
7020 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
7028 if (vi->mergeable_rx_bufs) in virtnet_probe()
7029 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; in virtnet_probe()
7031 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
7032 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
7037 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
7038 if (IS_ERR(vi->failover)) { in virtnet_probe()
7039 err = PTR_ERR(vi->failover); in virtnet_probe()
7044 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
7060 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
7064 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
7066 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); in virtnet_probe()
7067 dev->hw_features &= ~NETIF_F_RXHASH; in virtnet_probe()
7068 vi->has_rss_hash_report = false; in virtnet_probe()
7069 vi->has_rss = false; in virtnet_probe()
7073 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
7080 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
7083 sg_init_one(&sg, dev->dev_addr, dev->addr_len); in virtnet_probe()
7088 err = -EINVAL; in virtnet_probe()
7093 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
7101 err = -ENOMEM; in virtnet_probe()
7112 err = -EINVAL; in virtnet_probe()
7116 v = stats_cap->supported_stats_types[0]; in virtnet_probe()
7117 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
7123 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
7124 virtio_config_changed(vi->vdev); in virtnet_probe()
7126 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
7135 if (virtio_has_feature(vi->vdev, fbit)) in virtnet_probe()
7136 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
7138 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
7149 dev->name, max_queue_pairs); in virtnet_probe()
7156 net_failover_destroy(vi->failover); in virtnet_probe()
7159 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
7171 virtio_reset_device(vi->vdev); in remove_vq_common()
7180 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
7181 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
7192 struct virtnet_info *vi = vdev->priv; in virtnet_remove()
7197 flush_work(&vi->config_work); in virtnet_remove()
7199 flush_work(&vi->rx_mode_work); in virtnet_remove()
7203 unregister_netdev(vi->dev); in virtnet_remove()
7205 net_failover_destroy(vi->failover); in virtnet_remove()
7209 free_netdev(vi->dev); in virtnet_remove()
7214 struct virtnet_info *vi = vdev->priv; in virtnet_freeze()
7225 struct virtnet_info *vi = vdev->priv; in virtnet_restore()
7231 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
7291 .freeze = virtnet_freeze,