Lines Matching +full:num +full:- +full:macs

1 // SPDX-License-Identifier: GPL-2.0-or-later
50 * at once, the weight is chosen so that the EWMA will be insensitive to short-
113 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
114 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
159 {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
162 {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
165 {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
517 static void virtnet_xsk_completed(struct send_queue *sq, int num);
529 rss->indirection_table = NULL; in rss_indirection_table_alloc()
533 rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL); in rss_indirection_table_alloc()
534 if (!rss->indirection_table) in rss_indirection_table_alloc()
535 return -ENOMEM; in rss_indirection_table_alloc()
542 kfree(rss->indirection_table); in rss_indirection_table_free()
564 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data, in virtnet_add_outbuf() argument
567 return virtqueue_add_outbuf(sq->vq, sq->sg, num, in virtnet_add_outbuf()
591 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { in __free_old_xmit()
597 stats->napi_packets++; in __free_old_xmit()
598 stats->napi_bytes += skb->len; in __free_old_xmit()
605 stats->packets++; in __free_old_xmit()
606 stats->bytes += skb->len; in __free_old_xmit()
613 stats->packets++; in __free_old_xmit()
614 stats->bytes += xdp_get_frame_len(frame); in __free_old_xmit()
619 stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr); in __free_old_xmit()
620 stats->xsk++; in __free_old_xmit()
624 netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); in __free_old_xmit()
634 if (stats->xsk) in virtnet_free_old_xmit()
635 virtnet_xsk_completed(sq, stats->xsk); in virtnet_free_old_xmit()
643 return (vq->index - 1) / 2; in vq2txq()
653 return vq->index / 2; in vq2rxq()
663 if (qid == vi->max_queue_pairs * 2) in vq_type()
675 return (struct virtio_net_common_hdr *)skb->cb; in skb_vnet_common_hdr()
686 /* Find end of list, sew whole thing into vi->rq.pages. */ in give_pages()
687 for (end = page; end->private; end = (struct page *)end->private); in give_pages()
688 end->private = (unsigned long)rq->pages; in give_pages()
689 rq->pages = page; in give_pages()
694 struct page *p = rq->pages; in get_a_page()
697 rq->pages = (struct page *)p->private; in get_a_page()
699 p->private = 0; in get_a_page()
708 if (vi->mergeable_rx_bufs) in virtnet_rq_free_buf()
710 else if (vi->big_packets) in virtnet_rq_free_buf()
718 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
719 vi->refill_enabled = true; in enable_delayed_refill()
720 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
725 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
726 vi->refill_enabled = false; in disable_delayed_refill()
727 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
733 vi->rx_mode_work_enabled = true; in enable_rx_mode_work()
740 vi->rx_mode_work_enabled = false; in disable_rx_mode_work()
773 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done()
774 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
779 if (napi->weight) in skb_xmit_done()
783 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
800 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); in mergeable_ctx_to_truesize()
836 hdr_len = vi->hdr_len; in page_to_skb()
837 if (vi->mergeable_rx_bufs) in page_to_skb()
842 buf = p - headroom; in page_to_skb()
843 len -= hdr_len; in page_to_skb()
846 tailroom = truesize - headroom - hdr_padded_len - len; in page_to_skb()
851 skb = virtnet_build_skb(buf, truesize, p - buf, len); in page_to_skb()
855 page = (struct page *)page->private; in page_to_skb()
862 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); in page_to_skb()
866 /* Copy all frame if it fits skb->head, otherwise in page_to_skb()
875 len -= copy; in page_to_skb()
878 if (vi->mergeable_rx_bufs) { in page_to_skb()
893 net_dbg_ratelimited("%s: too much data\n", skb->dev->name); in page_to_skb()
899 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); in page_to_skb()
900 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, in page_to_skb()
902 len -= frag_size; in page_to_skb()
903 page = (struct page *)page->private; in page_to_skb()
921 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_unmap()
927 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_unmap()
933 --dma->ref; in virtnet_rq_unmap()
935 if (dma->need_sync && len) { in virtnet_rq_unmap()
936 offset = buf - (head + sizeof(*dma)); in virtnet_rq_unmap()
938 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, in virtnet_rq_unmap()
943 if (dma->ref) in virtnet_rq_unmap()
946 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, in virtnet_rq_unmap()
953 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_get_buf()
956 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_get_buf()
958 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); in virtnet_rq_get_buf()
967 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_init_one_sg()
973 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_init_one_sg()
975 head = page_address(rq->alloc_frag.page); in virtnet_rq_init_one_sg()
977 offset = buf - head; in virtnet_rq_init_one_sg()
981 addr = dma->addr - sizeof(*dma) + offset; in virtnet_rq_init_one_sg()
983 sg_init_table(rq->sg, 1); in virtnet_rq_init_one_sg()
984 sg_fill_dma(rq->sg, addr, len); in virtnet_rq_init_one_sg()
989 struct page_frag *alloc_frag = &rq->alloc_frag; in virtnet_rq_alloc()
990 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rq_alloc()
995 BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs); in virtnet_rq_alloc()
997 head = page_address(alloc_frag->page); in virtnet_rq_alloc()
1002 if (!alloc_frag->offset) { in virtnet_rq_alloc()
1003 if (rq->last_dma) { in virtnet_rq_alloc()
1008 virtnet_rq_unmap(rq, rq->last_dma, 0); in virtnet_rq_alloc()
1009 rq->last_dma = NULL; in virtnet_rq_alloc()
1012 dma->len = alloc_frag->size - sizeof(*dma); in virtnet_rq_alloc()
1014 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, in virtnet_rq_alloc()
1015 dma->len, DMA_FROM_DEVICE, 0); in virtnet_rq_alloc()
1016 if (virtqueue_dma_mapping_error(rq->vq, addr)) in virtnet_rq_alloc()
1019 dma->addr = addr; in virtnet_rq_alloc()
1020 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); in virtnet_rq_alloc()
1026 get_page(alloc_frag->page); in virtnet_rq_alloc()
1027 dma->ref = 1; in virtnet_rq_alloc()
1028 alloc_frag->offset = sizeof(*dma); in virtnet_rq_alloc()
1030 rq->last_dma = dma; in virtnet_rq_alloc()
1033 ++dma->ref; in virtnet_rq_alloc()
1035 buf = head + alloc_frag->offset; in virtnet_rq_alloc()
1037 get_page(alloc_frag->page); in virtnet_rq_alloc()
1038 alloc_frag->offset += size; in virtnet_rq_alloc()
1045 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_unmap_free_buf()
1049 rq = &vi->rq[i]; in virtnet_rq_unmap_free_buf()
1051 if (rq->xsk_pool) { in virtnet_rq_unmap_free_buf()
1056 if (!vi->big_packets || vi->mergeable_rx_bufs) in virtnet_rq_unmap_free_buf()
1075 u64_stats_update_begin(&sq->stats.syncp); in free_old_xmit()
1076 u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); in free_old_xmit()
1077 u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); in free_old_xmit()
1078 u64_stats_update_end(&sq->stats.syncp); in free_old_xmit()
1083 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1085 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1095 bool use_napi = sq->napi.weight; in check_sq_full_and_disable()
1098 qnum = sq - vi->sq; in check_sq_full_and_disable()
1106 * the stack to do a non-trivial amount of useless work. in check_sq_full_and_disable()
1110 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { in check_sq_full_and_disable()
1114 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1115 u64_stats_inc(&sq->stats.stop); in check_sq_full_and_disable()
1116 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1118 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) in check_sq_full_and_disable()
1119 virtqueue_napi_schedule(&sq->napi, sq->vq); in check_sq_full_and_disable()
1120 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { in check_sq_full_and_disable()
1123 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { in check_sq_full_and_disable()
1125 u64_stats_update_begin(&sq->stats.syncp); in check_sq_full_and_disable()
1126 u64_stats_inc(&sq->stats.wake); in check_sq_full_and_disable()
1127 u64_stats_update_end(&sq->stats.syncp); in check_sq_full_and_disable()
1128 virtqueue_disable_cb(sq->vq); in check_sq_full_and_disable()
1142 bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; in buf_to_xdp()
1146 vi->dev->name, len, bufsize); in buf_to_xdp()
1147 DEV_STATS_INC(vi->dev, rx_length_errors); in buf_to_xdp()
1161 unsigned int metasize = xdp->data - xdp->data_meta; in xsk_construct_skb()
1165 size = xdp->data_end - xdp->data_hard_start; in xsk_construct_skb()
1166 skb = napi_alloc_skb(&rq->napi, size); in xsk_construct_skb()
1172 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); in xsk_construct_skb()
1174 size = xdp->data_end - xdp->data_meta; in xsk_construct_skb()
1175 memcpy(__skb_put(skb, size), xdp->data_meta, size); in xsk_construct_skb()
1197 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_small()
1213 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_small()
1226 while (num_buf-- > 1) { in xsk_drop_follow_bufs()
1227 xdp = virtqueue_get_buf(rq->vq, &len); in xsk_drop_follow_bufs()
1230 dev->name, num_buf); in xsk_drop_follow_bufs()
1234 u64_stats_add(&stats->bytes, len); in xsk_drop_follow_bufs()
1254 while (--num_buf) { in xsk_append_merge_buffer()
1255 buf = virtqueue_get_buf(rq->vq, &len); in xsk_append_merge_buffer()
1258 vi->dev->name, num_buf, in xsk_append_merge_buffer()
1259 virtio16_to_cpu(vi->vdev, in xsk_append_merge_buffer()
1260 hdr->num_buffers)); in xsk_append_merge_buffer()
1261 DEV_STATS_INC(vi->dev, rx_length_errors); in xsk_append_merge_buffer()
1262 return -EINVAL; in xsk_append_merge_buffer()
1265 u64_stats_add(&stats->bytes, len); in xsk_append_merge_buffer()
1277 memcpy(buf, xdp->data - vi->hdr_len, len); in xsk_append_merge_buffer()
1296 xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); in xsk_append_merge_buffer()
1297 return -EINVAL; in xsk_append_merge_buffer()
1310 hdr = xdp->data - vi->hdr_len; in virtnet_receive_xsk_merge()
1311 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in virtnet_receive_xsk_merge()
1315 prog = rcu_dereference(rq->xdp_prog); in virtnet_receive_xsk_merge()
1347 u64_stats_inc(&stats->drops); in virtnet_receive_xsk_merge()
1356 struct net_device *dev = vi->dev; in virtnet_receive_xsk_buf()
1361 len -= vi->hdr_len; in virtnet_receive_xsk_buf()
1363 u64_stats_add(&stats->bytes, len); in virtnet_receive_xsk_buf()
1370 pr_debug("%s: short packet %i\n", dev->name, len); in virtnet_receive_xsk_buf()
1376 flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; in virtnet_receive_xsk_buf()
1378 if (!vi->mergeable_rx_bufs) in virtnet_receive_xsk_buf()
1394 int num; in virtnet_add_recvbuf_xsk() local
1396 xsk_buffs = rq->xsk_buffs; in virtnet_add_recvbuf_xsk()
1398 num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); in virtnet_add_recvbuf_xsk()
1399 if (!num) in virtnet_add_recvbuf_xsk()
1400 return -ENOMEM; in virtnet_add_recvbuf_xsk()
1402 len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; in virtnet_add_recvbuf_xsk()
1404 for (i = 0; i < num; ++i) { in virtnet_add_recvbuf_xsk()
1406 * We assume XDP_PACKET_HEADROOM is larger than hdr->len. in virtnet_add_recvbuf_xsk()
1409 addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; in virtnet_add_recvbuf_xsk()
1411 sg_init_table(rq->sg, 1); in virtnet_add_recvbuf_xsk()
1412 sg_fill_dma(rq->sg, addr, len); in virtnet_add_recvbuf_xsk()
1414 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, in virtnet_add_recvbuf_xsk()
1420 return num; in virtnet_add_recvbuf_xsk()
1423 for (; i < num; ++i) in virtnet_add_recvbuf_xsk()
1445 vi = sq->vq->vdev->priv; in virtnet_xsk_xmit_one()
1447 addr = xsk_buff_raw_get_dma(pool, desc->addr); in virtnet_xsk_xmit_one()
1448 xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len); in virtnet_xsk_xmit_one()
1450 sg_init_table(sq->sg, 2); in virtnet_xsk_xmit_one()
1451 sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len); in virtnet_xsk_xmit_one()
1452 sg_fill_dma(sq->sg + 1, addr, desc->len); in virtnet_xsk_xmit_one()
1454 return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2, in virtnet_xsk_xmit_one()
1455 virtnet_xsk_to_ptr(desc->len), in virtnet_xsk_xmit_one()
1464 struct xdp_desc *descs = pool->tx_descs; in virtnet_xsk_xmit_batch()
1469 budget = min_t(u32, budget, sq->vq->num_free); in virtnet_xsk_xmit_batch()
1478 xsk_tx_completed(sq->xsk_pool, nb_pkts - i); in virtnet_xsk_xmit_batch()
1485 if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xsk_xmit_batch()
1494 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_xsk_xmit()
1496 struct net_device *dev = vi->dev; in virtnet_xsk_xmit()
1503 __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats); in virtnet_xsk_xmit()
1506 xsk_tx_completed(sq->xsk_pool, stats.xsk); in virtnet_xsk_xmit()
1510 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xsk_xmit()
1511 check_sq_full_and_disable(vi, vi->dev, sq); in virtnet_xsk_xmit()
1516 txq = netdev_get_tx_queue(vi->dev, sq - vi->sq); in virtnet_xsk_xmit()
1520 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xsk_xmit()
1521 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xsk_xmit()
1522 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xsk_xmit()
1523 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xsk_xmit()
1524 u64_stats_add(&sq->stats.xdp_tx, sent); in virtnet_xsk_xmit()
1525 u64_stats_update_end(&sq->stats.syncp); in virtnet_xsk_xmit()
1535 if (napi_if_scheduled_mark_missed(&sq->napi)) in xsk_wakeup()
1539 virtqueue_napi_schedule(&sq->napi, sq->vq); in xsk_wakeup()
1549 return -ENETDOWN; in virtnet_xsk_wakeup()
1551 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_wakeup()
1552 return -EINVAL; in virtnet_xsk_wakeup()
1554 sq = &vi->sq[qid]; in virtnet_xsk_wakeup()
1560 static void virtnet_xsk_completed(struct send_queue *sq, int num) in virtnet_xsk_completed() argument
1562 xsk_tx_completed(sq->xsk_pool, num); in virtnet_xsk_completed()
1580 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
1581 return -EOVERFLOW; in __virtnet_xdp_xmit_one()
1585 nr_frags = shinfo->nr_frags; in __virtnet_xdp_xmit_one()
1591 * xdp_return_frame(), which will involve to xdpf->data and in __virtnet_xdp_xmit_one()
1592 * xdpf->headroom. Therefore, we need to update the value of in __virtnet_xdp_xmit_one()
1595 xdpf->headroom -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1596 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
1598 hdr = xdpf->data; in __virtnet_xdp_xmit_one()
1599 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
1600 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
1602 sg_init_table(sq->sg, nr_frags + 1); in __virtnet_xdp_xmit_one()
1603 sg_set_buf(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
1605 skb_frag_t *frag = &shinfo->frags[i]; in __virtnet_xdp_xmit_one()
1607 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), in __virtnet_xdp_xmit_one()
1613 return -ENOSPC; /* Caller handle free/refcnt */ in __virtnet_xdp_xmit_one()
1618 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1632 if (v->curr_queue_pairs > nr_cpu_ids) { \
1633 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1635 txq = netdev_get_tx_queue(v->dev, qp); \
1638 qp = cpu % v->curr_queue_pairs; \
1639 txq = netdev_get_tx_queue(v->dev, qp); \
1642 v->sq + qp; \
1649 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1650 if (v->curr_queue_pairs > nr_cpu_ids) \
1661 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
1672 xdp_prog = rcu_access_pointer(rq->xdp_prog); in virtnet_xdp_xmit()
1674 return -ENXIO; in virtnet_xdp_xmit()
1679 ret = -EINVAL; in virtnet_xdp_xmit()
1684 virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), in virtnet_xdp_xmit()
1696 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) in virtnet_xdp_xmit()
1700 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) in virtnet_xdp_xmit()
1704 u64_stats_update_begin(&sq->stats.syncp); in virtnet_xdp_xmit()
1705 u64_stats_add(&sq->stats.bytes, stats.bytes); in virtnet_xdp_xmit()
1706 u64_stats_add(&sq->stats.packets, stats.packets); in virtnet_xdp_xmit()
1707 u64_stats_add(&sq->stats.xdp_tx, n); in virtnet_xdp_xmit()
1708 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); in virtnet_xdp_xmit()
1709 u64_stats_add(&sq->stats.kicks, kicks); in virtnet_xdp_xmit()
1710 u64_stats_update_end(&sq->stats.syncp); in virtnet_xdp_xmit()
1724 for (i = 0; i < shinfo->nr_frags; i++) { in put_xdp_frags()
1725 xdp_page = skb_frag_page(&shinfo->frags[i]); in put_xdp_frags()
1741 u64_stats_inc(&stats->xdp_packets); in virtnet_xdp_handler()
1748 u64_stats_inc(&stats->xdp_tx); in virtnet_xdp_handler()
1766 u64_stats_inc(&stats->xdp_redirects); in virtnet_xdp_handler()
1787 return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; in virtnet_get_headroom()
1797 * with large buffers with sufficient headroom - so it should affect
1824 while (--*num_buf) { in xdp_linearize_page()
1834 off = buf - page_address(p); in xdp_linearize_page()
1851 *len = page_off - XDP_PACKET_HEADROOM; in xdp_linearize_page()
1869 headroom = vi->hdr_len + header_offset; in receive_small_build_skb()
1878 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); in receive_small_build_skb()
1894 unsigned int headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1904 if (unlikely(hdr->hdr.gso_type)) in receive_small_xdp()
1908 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in receive_small_xdp()
1915 int offset = buf - page_address(page) + header_offset; in receive_small_xdp()
1916 unsigned int tlen = len + vi->hdr_len; in receive_small_xdp()
1921 headroom = vi->hdr_len + header_offset; in receive_small_xdp()
1935 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); in receive_small_xdp()
1936 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, in receive_small_xdp()
1944 len = xdp.data_end - xdp.data; in receive_small_xdp()
1945 metasize = xdp.data - xdp.data_meta; in receive_small_xdp()
1956 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); in receive_small_xdp()
1966 u64_stats_inc(&stats->xdp_drops); in receive_small_xdp()
1968 u64_stats_inc(&stats->drops); in receive_small_xdp()
1986 /* We passed the address of virtnet header to virtio-core, in receive_small()
1989 buf -= VIRTNET_RX_PAD + xdp_headroom; in receive_small()
1991 len -= vi->hdr_len; in receive_small()
1992 u64_stats_add(&stats->bytes, len); in receive_small()
1996 dev->name, len, GOOD_PACKET_LEN); in receive_small()
2001 if (unlikely(vi->xdp_enabled)) { in receive_small()
2005 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_small()
2021 u64_stats_inc(&stats->drops); in receive_small()
2037 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_big()
2044 u64_stats_inc(&stats->drops); in receive_big()
2057 while (num_buf-- > 1) { in mergeable_buf_free()
2061 dev->name, num_buf); in mergeable_buf_free()
2065 u64_stats_add(&stats->bytes, len); in mergeable_buf_free()
2073 * virtio-net there are 2 points that do not match its requirements:
2076 * like eth_type_trans() (which virtio-net does in receive_buf()).
2089 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { in build_skb_from_xdp_buff()
2095 nr_frags = sinfo->nr_frags; in build_skb_from_xdp_buff()
2097 skb = build_skb(xdp->data_hard_start, xdp->frame_sz); in build_skb_from_xdp_buff()
2101 headroom = xdp->data - xdp->data_hard_start; in build_skb_from_xdp_buff()
2102 data_len = xdp->data_end - xdp->data; in build_skb_from_xdp_buff()
2106 metasize = xdp->data - xdp->data_meta; in build_skb_from_xdp_buff()
2113 sinfo->xdp_frags_size, in build_skb_from_xdp_buff()
2142 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); in virtnet_build_xdp_buff_mrg()
2143 xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, in virtnet_build_xdp_buff_mrg()
2144 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); in virtnet_build_xdp_buff_mrg()
2150 /* If we want to build multi-buffer xdp, we need in virtnet_build_xdp_buff_mrg()
2158 shinfo->nr_frags = 0; in virtnet_build_xdp_buff_mrg()
2159 shinfo->xdp_frags_size = 0; in virtnet_build_xdp_buff_mrg()
2163 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2165 while (--*num_buf > 0) { in virtnet_build_xdp_buff_mrg()
2169 dev->name, *num_buf, in virtnet_build_xdp_buff_mrg()
2170 virtio16_to_cpu(vi->vdev, hdr->num_buffers)); in virtnet_build_xdp_buff_mrg()
2175 u64_stats_add(&stats->bytes, len); in virtnet_build_xdp_buff_mrg()
2177 offset = buf - page_address(page); in virtnet_build_xdp_buff_mrg()
2186 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { in virtnet_build_xdp_buff_mrg()
2189 dev->name, len, (unsigned long)(truesize - room)); in virtnet_build_xdp_buff_mrg()
2194 frag = &shinfo->frags[shinfo->nr_frags++]; in virtnet_build_xdp_buff_mrg()
2199 shinfo->xdp_frags_size += len; in virtnet_build_xdp_buff_mrg()
2207 return -EINVAL; in virtnet_build_xdp_buff_mrg()
2227 * in-flight packets from before XDP was enabled reach in mergeable_xdp_get_buf()
2230 if (unlikely(hdr->hdr.gso_type)) in mergeable_xdp_get_buf()
2234 if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) in mergeable_xdp_get_buf()
2245 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { in mergeable_xdp_get_buf()
2257 if (!xdp_prog->aux->xdp_has_frags) { in mergeable_xdp_get_buf()
2299 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable_xdp()
2301 int offset = buf - page_address(page); in receive_mergeable_xdp()
2343 u64_stats_inc(&stats->xdp_drops); in receive_mergeable_xdp()
2344 u64_stats_inc(&stats->drops); in receive_mergeable_xdp()
2356 num_skb_frags = skb_shinfo(curr_skb)->nr_frags; in virtnet_skb_append_frag()
2364 skb_shinfo(curr_skb)->frag_list = nskb; in virtnet_skb_append_frag()
2366 curr_skb->next = nskb; in virtnet_skb_append_frag()
2368 head_skb->truesize += nskb->truesize; in virtnet_skb_append_frag()
2373 head_skb->data_len += len; in virtnet_skb_append_frag()
2374 head_skb->len += len; in virtnet_skb_append_frag()
2375 head_skb->truesize += truesize; in virtnet_skb_append_frag()
2378 offset = buf - page_address(page); in virtnet_skb_append_frag()
2381 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, in virtnet_skb_append_frag()
2401 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
2403 int offset = buf - page_address(page); in receive_mergeable()
2411 u64_stats_add(&stats->bytes, len - vi->hdr_len); in receive_mergeable()
2413 if (unlikely(len > truesize - room)) { in receive_mergeable()
2415 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2420 if (unlikely(vi->xdp_enabled)) { in receive_mergeable()
2424 xdp_prog = rcu_dereference(rq->xdp_prog); in receive_mergeable()
2439 while (--num_buf) { in receive_mergeable()
2443 dev->name, num_buf, in receive_mergeable()
2444 virtio16_to_cpu(vi->vdev, in receive_mergeable()
2445 hdr->num_buffers)); in receive_mergeable()
2450 u64_stats_add(&stats->bytes, len); in receive_mergeable()
2457 if (unlikely(len > truesize - room)) { in receive_mergeable()
2459 dev->name, len, (unsigned long)(truesize - room)); in receive_mergeable()
2470 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
2478 u64_stats_inc(&stats->drops); in receive_mergeable()
2491 switch (__le16_to_cpu(hdr_hash->hash_report)) { in virtio_skb_set_hash()
2509 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); in virtio_skb_set_hash()
2516 struct net_device *dev = vi->dev; in virtnet_receive_done()
2519 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) in virtnet_receive_done()
2520 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); in virtnet_receive_done()
2523 skb->ip_summed = CHECKSUM_UNNECESSARY; in virtnet_receive_done()
2525 if (virtio_net_hdr_to_skb(skb, &hdr->hdr, in virtnet_receive_done()
2526 virtio_is_little_endian(vi->vdev))) { in virtnet_receive_done()
2528 dev->name, hdr->hdr.gso_type, in virtnet_receive_done()
2529 hdr->hdr.gso_size); in virtnet_receive_done()
2533 skb_record_rx_queue(skb, vq2rxq(rq->vq)); in virtnet_receive_done()
2534 skb->protocol = eth_type_trans(skb, dev); in virtnet_receive_done()
2536 ntohs(skb->protocol), skb->len, skb->pkt_type); in virtnet_receive_done()
2538 napi_gro_receive(&rq->napi, skb); in virtnet_receive_done()
2551 struct net_device *dev = vi->dev; in receive_buf()
2555 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
2556 pr_debug("%s: short packet %i\n", dev->name, len); in receive_buf()
2569 flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; in receive_buf()
2571 if (vi->mergeable_rx_bufs) in receive_buf()
2574 else if (vi->big_packets) in receive_buf()
2587 * not need to use mergeable_len_to_ctx here - it is enough
2596 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
2602 if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp))) in add_recvbuf_small()
2603 return -ENOMEM; in add_recvbuf_small()
2607 return -ENOMEM; in add_recvbuf_small()
2611 virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
2613 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_small()
2629 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2); in add_recvbuf_big()
2631 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */ in add_recvbuf_big()
2632 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) { in add_recvbuf_big()
2637 return -ENOMEM; in add_recvbuf_big()
2639 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
2642 first->private = (unsigned long)list; in add_recvbuf_big()
2649 return -ENOMEM; in add_recvbuf_big()
2653 /* rq->sg[0], rq->sg[1] share the same page */ in add_recvbuf_big()
2654 /* a separated rq->sg[0] for header - required in case !any_header_sg */ in add_recvbuf_big()
2655 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
2657 /* rq->sg[1] for data packet, from offset */ in add_recvbuf_big()
2659 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
2662 first->private = (unsigned long)list; in add_recvbuf_big()
2663 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2, in add_recvbuf_big()
2675 struct virtnet_info *vi = rq->vq->vdev->priv; in get_mergeable_buf_len()
2676 const size_t hdr_len = vi->hdr_len; in get_mergeable_buf_len()
2680 return PAGE_SIZE - room; in get_mergeable_buf_len()
2683 rq->min_buf_len, PAGE_SIZE - hdr_len); in get_mergeable_buf_len()
2691 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
2704 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); in add_recvbuf_mergeable()
2707 return -ENOMEM; in add_recvbuf_mergeable()
2709 if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size) in add_recvbuf_mergeable()
2710 len -= sizeof(struct virtnet_rq_dma); in add_recvbuf_mergeable()
2714 return -ENOMEM; in add_recvbuf_mergeable()
2717 hole = alloc_frag->size - alloc_frag->offset; in add_recvbuf_mergeable()
2727 alloc_frag->offset += hole; in add_recvbuf_mergeable()
2733 err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp); in add_recvbuf_mergeable()
2754 if (rq->xsk_pool) { in try_fill_recv()
2755 err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); in try_fill_recv()
2760 if (vi->mergeable_rx_bufs) in try_fill_recv()
2762 else if (vi->big_packets) in try_fill_recv()
2769 } while (rq->vq->num_free); in try_fill_recv()
2772 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { in try_fill_recv()
2775 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); in try_fill_recv()
2776 u64_stats_inc(&rq->stats.kicks); in try_fill_recv()
2777 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); in try_fill_recv()
2780 return err != -ENOMEM; in try_fill_recv()
2785 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done()
2786 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
2788 rq->calls++; in skb_recv_done()
2789 virtqueue_napi_schedule(&rq->napi, rvq); in skb_recv_done()
2809 if (!napi->weight) in virtnet_napi_tx_enable()
2815 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
2816 napi->weight = 0; in virtnet_napi_tx_enable()
2825 if (napi->weight) in virtnet_napi_tx_disable()
2836 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
2837 struct receive_queue *rq = &vi->rq[i]; in refill_work()
2839 napi_disable(&rq->napi); in refill_work()
2841 virtnet_napi_enable(rq->vq, &rq->napi); in refill_work()
2847 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
2862 buf = virtqueue_get_buf(rq->vq, &len); in virtnet_receive_xsk_bufs()
2883 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive_packets()
2892 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive_packets()
2904 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
2908 if (rq->xsk_pool) in virtnet_receive()
2913 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { in virtnet_receive()
2915 spin_lock(&vi->refill_lock); in virtnet_receive()
2916 if (vi->refill_enabled) in virtnet_receive()
2917 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
2918 spin_unlock(&vi->refill_lock); in virtnet_receive()
2923 u64_stats_update_begin(&rq->stats.syncp); in virtnet_receive()
2928 item = (u64_stats_t *)((u8 *)&rq->stats + offset); in virtnet_receive()
2933 u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); in virtnet_receive()
2934 u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); in virtnet_receive()
2936 u64_stats_update_end(&rq->stats.syncp); in virtnet_receive()
2943 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx()
2944 unsigned int index = vq2rxq(rq->vq); in virtnet_poll_cleantx()
2945 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
2946 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
2948 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
2952 if (sq->reset) { in virtnet_poll_cleantx()
2958 virtqueue_disable_cb(sq->vq); in virtnet_poll_cleantx()
2960 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in virtnet_poll_cleantx()
2962 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_cleantx()
2964 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_cleantx()
2965 u64_stats_inc(&sq->stats.wake); in virtnet_poll_cleantx()
2966 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_cleantx()
2979 if (!rq->packets_in_napi) in virtnet_rx_dim_update()
2985 dim_update_sample(rq->calls, in virtnet_rx_dim_update()
2986 u64_stats_read(&rq->stats.packets), in virtnet_rx_dim_update()
2987 u64_stats_read(&rq->stats.bytes), in virtnet_rx_dim_update()
2990 net_dim(&rq->dim, &cur_sample); in virtnet_rx_dim_update()
2991 rq->packets_in_napi = 0; in virtnet_rx_dim_update()
2998 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll()
3007 rq->packets_in_napi += received; in virtnet_poll()
3014 napi_complete = virtqueue_napi_complete(napi, rq->vq, received); in virtnet_poll()
3019 if (napi_complete && rq->dim_enabled) in virtnet_poll()
3025 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in virtnet_poll()
3026 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll()
3027 u64_stats_inc(&sq->stats.kicks); in virtnet_poll()
3028 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll()
3038 virtnet_napi_tx_disable(&vi->sq[qp_index].napi); in virtnet_disable_queue_pair()
3039 napi_disable(&vi->rq[qp_index].napi); in virtnet_disable_queue_pair()
3040 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_disable_queue_pair()
3045 struct net_device *dev = vi->dev; in virtnet_enable_queue_pair()
3048 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, in virtnet_enable_queue_pair()
3049 vi->rq[qp_index].napi.napi_id); in virtnet_enable_queue_pair()
3053 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, in virtnet_enable_queue_pair()
3058 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); in virtnet_enable_queue_pair()
3059 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); in virtnet_enable_queue_pair()
3064 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); in virtnet_enable_queue_pair()
3070 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_cancel_dim()
3080 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
3083 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
3086 vi->speed = speed; in virtnet_update_settings()
3088 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
3091 vi->duplex = duplex; in virtnet_update_settings()
3101 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
3102 if (i < vi->curr_queue_pairs) in virtnet_open()
3104 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
3105 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
3112 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_open()
3113 if (vi->status & VIRTIO_NET_S_LINK_UP) in virtnet_open()
3114 netif_carrier_on(vi->dev); in virtnet_open()
3115 virtio_config_driver_enable(vi->vdev); in virtnet_open()
3117 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_open()
3125 cancel_delayed_work_sync(&vi->refill); in virtnet_open()
3127 for (i--; i >= 0; i--) { in virtnet_open()
3129 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_open()
3138 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx()
3139 unsigned int index = vq2txq(sq->vq); in virtnet_poll_tx()
3150 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
3152 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3154 if (sq->xsk_pool) in virtnet_poll_tx()
3155 xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget); in virtnet_poll_tx()
3159 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { in virtnet_poll_tx()
3161 u64_stats_update_begin(&sq->stats.syncp); in virtnet_poll_tx()
3162 u64_stats_inc(&sq->stats.wake); in virtnet_poll_tx()
3163 u64_stats_update_end(&sq->stats.syncp); in virtnet_poll_tx()
3173 opaque = virtqueue_enable_cb_prepare(sq->vq); in virtnet_poll_tx()
3178 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3183 if (unlikely(virtqueue_poll(sq->vq, opaque))) { in virtnet_poll_tx()
3186 virtqueue_disable_cb(sq->vq); in virtnet_poll_tx()
3199 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; in xmit_skb()
3200 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb()
3202 unsigned hdr_len = vi->hdr_len; in xmit_skb()
3205 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
3207 can_push = vi->any_header_sg && in xmit_skb()
3208 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && in xmit_skb()
3213 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); in xmit_skb()
3215 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; in xmit_skb()
3217 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, in xmit_skb()
3218 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
3220 return -EPROTO; in xmit_skb()
3222 if (vi->mergeable_rx_bufs) in xmit_skb()
3223 hdr->num_buffers = 0; in xmit_skb()
3225 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); in xmit_skb()
3228 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); in xmit_skb()
3234 sg_set_buf(sq->sg, hdr, hdr_len); in xmit_skb()
3235 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); in xmit_skb()
3249 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
3253 bool use_napi = sq->napi.weight; in start_xmit()
3259 virtqueue_disable_cb(sq->vq); in start_xmit()
3264 unlikely(!virtqueue_enable_cb_delayed(sq->vq))); in start_xmit()
3276 dev_warn(&dev->dev, in start_xmit()
3292 kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : in start_xmit()
3295 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { in start_xmit()
3296 u64_stats_update_begin(&sq->stats.syncp); in start_xmit()
3297 u64_stats_inc(&sq->stats.kicks); in start_xmit()
3298 u64_stats_update_end(&sq->stats.syncp); in start_xmit()
3307 bool running = netif_running(vi->dev); in virtnet_rx_pause()
3310 napi_disable(&rq->napi); in virtnet_rx_pause()
3311 virtnet_cancel_dim(vi, &rq->dim); in virtnet_rx_pause()
3317 bool running = netif_running(vi->dev); in virtnet_rx_resume()
3320 schedule_delayed_work(&vi->refill, 0); in virtnet_rx_resume()
3323 virtnet_napi_enable(rq->vq, &rq->napi); in virtnet_rx_resume()
3331 qindex = rq - vi->rq; in virtnet_rx_resize()
3335 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL); in virtnet_rx_resize()
3337 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rx_resize()
3345 bool running = netif_running(vi->dev); in virtnet_tx_pause()
3349 qindex = sq - vi->sq; in virtnet_tx_pause()
3352 virtnet_napi_tx_disable(&sq->napi); in virtnet_tx_pause()
3354 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_pause()
3362 sq->reset = true; in virtnet_tx_pause()
3365 netif_stop_subqueue(vi->dev, qindex); in virtnet_tx_pause()
3372 bool running = netif_running(vi->dev); in virtnet_tx_resume()
3376 qindex = sq - vi->sq; in virtnet_tx_resume()
3378 txq = netdev_get_tx_queue(vi->dev, qindex); in virtnet_tx_resume()
3381 sq->reset = false; in virtnet_tx_resume()
3386 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); in virtnet_tx_resume()
3394 qindex = sq - vi->sq; in virtnet_tx_resize()
3398 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, in virtnet_tx_resize()
3401 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_tx_resize()
3423 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command_reply()
3425 mutex_lock(&vi->cvq_lock); in virtnet_send_command_reply()
3426 vi->ctrl->status = ~0; in virtnet_send_command_reply()
3427 vi->ctrl->hdr.class = class; in virtnet_send_command_reply()
3428 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command_reply()
3430 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command_reply()
3437 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command_reply()
3444 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3446 dev_warn(&vi->vdev->dev, in virtnet_send_command_reply()
3448 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3452 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3458 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3459 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
3465 ok = vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command_reply()
3466 mutex_unlock(&vi->cvq_lock); in virtnet_send_command_reply()
3479 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
3484 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
3485 return -EOPNOTSUPP; in virtnet_set_mac_address()
3489 return -ENOMEM; in virtnet_set_mac_address()
3496 sg_init_one(&sg, addr->sa_data, dev->addr_len); in virtnet_set_mac_address()
3499 dev_warn(&vdev->dev, in virtnet_set_mac_address()
3501 ret = -EINVAL; in virtnet_set_mac_address()
3509 for (i = 0; i < dev->addr_len; i++) in virtnet_set_mac_address()
3512 i, addr->sa_data[i]); in virtnet_set_mac_address()
3530 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
3532 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
3533 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
3536 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_stats()
3537 tpackets = u64_stats_read(&sq->stats.packets); in virtnet_stats()
3538 tbytes = u64_stats_read(&sq->stats.bytes); in virtnet_stats()
3539 terrors = u64_stats_read(&sq->stats.tx_timeouts); in virtnet_stats()
3540 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_stats()
3543 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_stats()
3544 rpackets = u64_stats_read(&rq->stats.packets); in virtnet_stats()
3545 rbytes = u64_stats_read(&rq->stats.bytes); in virtnet_stats()
3546 rdrops = u64_stats_read(&rq->stats.drops); in virtnet_stats()
3547 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_stats()
3549 tot->rx_packets += rpackets; in virtnet_stats()
3550 tot->tx_packets += tpackets; in virtnet_stats()
3551 tot->rx_bytes += rbytes; in virtnet_stats()
3552 tot->tx_bytes += tbytes; in virtnet_stats()
3553 tot->rx_dropped += rdrops; in virtnet_stats()
3554 tot->tx_errors += terrors; in virtnet_stats()
3557 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); in virtnet_stats()
3558 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); in virtnet_stats()
3559 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); in virtnet_stats()
3560 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); in virtnet_stats()
3567 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
3577 for (; i < vi->rss_indir_table_size; ++i) { in virtnet_rss_update_by_qpairs()
3579 vi->rss.indirection_table[i] = indir_val; in virtnet_rss_update_by_qpairs()
3581 vi->rss.max_tx_vq = queue_pairs; in virtnet_rss_update_by_qpairs()
3588 struct net_device *dev = vi->dev; in virtnet_set_queues()
3591 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
3601 if (vi->has_rss && !netif_is_rxfh_configured(dev)) { in virtnet_set_queues()
3602 memcpy(&old_rss, &vi->rss, sizeof(old_rss)); in virtnet_set_queues()
3603 if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) { in virtnet_set_queues()
3604 vi->rss.indirection_table = old_rss.indirection_table; in virtnet_set_queues()
3605 return -ENOMEM; in virtnet_set_queues()
3612 rss_indirection_table_free(&vi->rss); in virtnet_set_queues()
3613 memcpy(&vi->rss, &old_rss, sizeof(old_rss)); in virtnet_set_queues()
3615 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", in virtnet_set_queues()
3617 return -EINVAL; in virtnet_set_queues()
3625 return -ENOMEM; in virtnet_set_queues()
3627 mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
3632 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", in virtnet_set_queues()
3634 return -EINVAL; in virtnet_set_queues()
3637 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
3639 if (dev->flags & IFF_UP) in virtnet_set_queues()
3640 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
3652 /* Make sure refill_work doesn't re-enable napi! */ in virtnet_close()
3653 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
3657 virtio_config_driver_disable(vi->vdev); in virtnet_close()
3661 cancel_work_sync(&vi->config_work); in virtnet_close()
3663 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
3665 virtnet_cancel_dim(vi, &vi->rq[i].dim); in virtnet_close()
3678 struct net_device *dev = vi->dev; in virtnet_rx_mode_work()
3688 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_rx_mode_work()
3693 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); in virtnet_rx_mode_work()
3699 *promisc_allmulti = !!(dev->flags & IFF_PROMISC); in virtnet_rx_mode_work()
3704 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", in virtnet_rx_mode_work()
3707 *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); in virtnet_rx_mode_work()
3712 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", in virtnet_rx_mode_work()
3719 /* MAC filter - use one buffer for both lists */ in virtnet_rx_mode_work()
3721 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); in virtnet_rx_mode_work()
3732 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_rx_mode_work()
3735 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3738 sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3741 mac_data = (void *)&mac_data->macs[uc_count][0]; in virtnet_rx_mode_work()
3743 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_rx_mode_work()
3746 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); in virtnet_rx_mode_work()
3751 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); in virtnet_rx_mode_work()
3755 dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); in virtnet_rx_mode_work()
3766 if (vi->rx_mode_work_enabled) in virtnet_set_rx_mode()
3767 schedule_work(&vi->rx_mode_work); in virtnet_set_rx_mode()
3779 return -ENOMEM; in virtnet_vlan_rx_add_vid()
3781 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
3786 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); in virtnet_vlan_rx_add_vid()
3799 return -ENOMEM; in virtnet_vlan_rx_kill_vid()
3801 *_vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
3806 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); in virtnet_vlan_rx_kill_vid()
3814 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
3815 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
3816 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
3817 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
3820 vi->affinity_hint_set = false; in virtnet_clean_affinity()
3839 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
3840 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
3841 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
3845 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
3853 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
3854 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
3855 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS); in virtnet_set_affinity()
3859 vi->affinity_hint_set = true; in virtnet_set_affinity()
3894 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3898 &vi->node_dead); in virtnet_cpu_notif_add()
3901 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
3907 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
3909 &vi->node_dead); in virtnet_cpu_notif_remove()
3920 return -ENOMEM; in virtnet_send_ctrl_coal_vq_cmd()
3922 coal_vq->vqn = cpu_to_le16(vqn); in virtnet_send_ctrl_coal_vq_cmd()
3923 coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); in virtnet_send_ctrl_coal_vq_cmd()
3924 coal_vq->coal.max_packets = cpu_to_le32(max_packets); in virtnet_send_ctrl_coal_vq_cmd()
3930 return -EINVAL; in virtnet_send_ctrl_coal_vq_cmd()
3941 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_ctrl_coal_vq_cmd()
3942 return -EOPNOTSUPP; in virtnet_send_rx_ctrl_coal_vq_cmd()
3949 vi->rq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_rx_ctrl_coal_vq_cmd()
3950 vi->rq[queue].intr_coal.max_packets = max_packets; in virtnet_send_rx_ctrl_coal_vq_cmd()
3961 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_tx_ctrl_coal_vq_cmd()
3962 return -EOPNOTSUPP; in virtnet_send_tx_ctrl_coal_vq_cmd()
3969 vi->sq[queue].intr_coal.max_usecs = max_usecs; in virtnet_send_tx_ctrl_coal_vq_cmd()
3970 vi->sq[queue].intr_coal.max_packets = max_packets; in virtnet_send_tx_ctrl_coal_vq_cmd()
3982 ring->rx_max_pending = vi->rq[0].vq->num_max; in virtnet_get_ringparam()
3983 ring->tx_max_pending = vi->sq[0].vq->num_max; in virtnet_get_ringparam()
3984 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
3985 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
3999 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in virtnet_set_ringparam()
4000 return -EINVAL; in virtnet_set_ringparam()
4002 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_set_ringparam()
4003 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_set_ringparam()
4005 if (ring->rx_pending == rx_pending && in virtnet_set_ringparam()
4006 ring->tx_pending == tx_pending) in virtnet_set_ringparam()
4009 if (ring->rx_pending > vi->rq[0].vq->num_max) in virtnet_set_ringparam()
4010 return -EINVAL; in virtnet_set_ringparam()
4012 if (ring->tx_pending > vi->sq[0].vq->num_max) in virtnet_set_ringparam()
4013 return -EINVAL; in virtnet_set_ringparam()
4015 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_set_ringparam()
4016 rq = vi->rq + i; in virtnet_set_ringparam()
4017 sq = vi->sq + i; in virtnet_set_ringparam()
4019 if (ring->tx_pending != tx_pending) { in virtnet_set_ringparam()
4020 err = virtnet_tx_resize(vi, sq, ring->tx_pending); in virtnet_set_ringparam()
4024 /* Upon disabling and re-enabling a transmit virtqueue, the device must in virtnet_set_ringparam()
4030 vi->intr_coal_tx.max_usecs, in virtnet_set_ringparam()
4031 vi->intr_coal_tx.max_packets); in virtnet_set_ringparam()
4036 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4040 if (ring->rx_pending != rx_pending) { in virtnet_set_ringparam()
4041 err = virtnet_rx_resize(vi, rq, ring->rx_pending); in virtnet_set_ringparam()
4046 mutex_lock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4048 vi->intr_coal_rx.max_usecs, in virtnet_set_ringparam()
4049 vi->intr_coal_rx.max_packets); in virtnet_set_ringparam()
4050 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_set_ringparam()
4051 if (err && err != -EOPNOTSUPP) in virtnet_set_ringparam()
4061 struct net_device *dev = vi->dev; in virtnet_commit_rss_command()
4069 sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); in virtnet_commit_rss_command()
4071 if (vi->has_rss) { in virtnet_commit_rss_command()
4072 sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size; in virtnet_commit_rss_command()
4073 sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); in virtnet_commit_rss_command()
4075 sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t)); in virtnet_commit_rss_command()
4079 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); in virtnet_commit_rss_command()
4080 sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); in virtnet_commit_rss_command()
4082 sg_buf_size = vi->rss_key_size; in virtnet_commit_rss_command()
4083 sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); in virtnet_commit_rss_command()
4086 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG in virtnet_commit_rss_command()
4093 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); in virtnet_commit_rss_command()
4100 vi->rss.hash_types = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4101 vi->rss_hash_types_saved = vi->rss_hash_types_supported; in virtnet_init_default_rss()
4102 vi->rss.indirection_table_mask = vi->rss_indir_table_size in virtnet_init_default_rss()
4103 ? vi->rss_indir_table_size - 1 : 0; in virtnet_init_default_rss()
4104 vi->rss.unclassified_queue = 0; in virtnet_init_default_rss()
4106 virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); in virtnet_init_default_rss()
4108 vi->rss.hash_key_length = vi->rss_key_size; in virtnet_init_default_rss()
4110 netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); in virtnet_init_default_rss()
4115 info->data = 0; in virtnet_get_hashflow()
4116 switch (info->flow_type) { in virtnet_get_hashflow()
4118 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) { in virtnet_get_hashflow()
4119 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4121 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4122 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4126 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) { in virtnet_get_hashflow()
4127 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4129 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4130 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4134 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) { in virtnet_get_hashflow()
4135 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4137 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { in virtnet_get_hashflow()
4138 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4142 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) { in virtnet_get_hashflow()
4143 info->data = RXH_IP_SRC | RXH_IP_DST | in virtnet_get_hashflow()
4145 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) { in virtnet_get_hashflow()
4146 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4150 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) in virtnet_get_hashflow()
4151 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4155 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) in virtnet_get_hashflow()
4156 info->data = RXH_IP_SRC | RXH_IP_DST; in virtnet_get_hashflow()
4160 info->data = 0; in virtnet_get_hashflow()
4167 u32 new_hashtypes = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4168 bool is_disable = info->data & RXH_DISCARD; in virtnet_set_hashflow()
4169 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3); in virtnet_set_hashflow()
4172 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable)) in virtnet_set_hashflow()
4175 switch (info->flow_type) { in virtnet_set_hashflow()
4216 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported)) in virtnet_set_hashflow()
4219 if (new_hashtypes != vi->rss_hash_types_saved) { in virtnet_set_hashflow()
4220 vi->rss_hash_types_saved = new_hashtypes; in virtnet_set_hashflow()
4221 vi->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_hashflow()
4222 if (vi->dev->features & NETIF_F_RXHASH) in virtnet_set_hashflow()
4233 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
4235 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in virtnet_get_drvinfo()
4236 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); in virtnet_get_drvinfo()
4237 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); in virtnet_get_drvinfo()
4246 u16 queue_pairs = channels->combined_count; in virtnet_set_channels()
4252 if (channels->rx_count || channels->tx_count || channels->other_count) in virtnet_set_channels()
4253 return -EINVAL; in virtnet_set_channels()
4255 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
4256 return -EINVAL; in virtnet_set_channels()
4262 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
4263 return -EINVAL; in virtnet_set_channels()
4281 int num, int qid, const struct virtnet_stat_desc *desc) in virtnet_stats_sprintf() argument
4286 for (i = 0; i < num; ++i) in virtnet_stats_sprintf()
4289 for (i = 0; i < num; ++i) in virtnet_stats_sprintf()
4294 /* qid == -1: for rx/tx queue total field */
4300 u32 num; in virtnet_get_stats_string() local
4305 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_get_stats_string()
4307 num = ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_get_stats_string()
4309 virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); in virtnet_get_stats_string()
4318 num = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_get_stats_string()
4320 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4325 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_stats_string()
4327 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_get_stats_string()
4329 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4332 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_stats_string()
4334 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_get_stats_string()
4336 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4339 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_get_stats_string()
4341 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_get_stats_string()
4343 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4352 num = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_get_stats_string()
4354 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4359 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_stats_string()
4361 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_get_stats_string()
4363 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4366 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_stats_string()
4368 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_get_stats_string()
4370 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4373 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_get_stats_string()
4375 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_get_stats_string()
4377 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); in virtnet_get_stats_string()
4385 /* The stats are write to qstats or ethtool -S */
4407 ctx->data = data; in virtnet_stats_ctx_init()
4408 ctx->to_qstat = to_qstat; in virtnet_stats_ctx_init()
4411 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_stats_ctx_init()
4412 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_stats_ctx_init()
4416 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4417 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4418 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_stats_ctx_init()
4419 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4422 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4423 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4424 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_stats_ctx_init()
4425 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4428 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_stats_ctx_init()
4429 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; in virtnet_stats_ctx_init()
4430 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_stats_ctx_init()
4431 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); in virtnet_stats_ctx_init()
4434 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4435 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4436 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_stats_ctx_init()
4437 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4442 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4443 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4444 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_stats_ctx_init()
4445 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4448 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_stats_ctx_init()
4449 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; in virtnet_stats_ctx_init()
4450 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_stats_ctx_init()
4451 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); in virtnet_stats_ctx_init()
4454 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4455 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4456 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_stats_ctx_init()
4457 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4460 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4461 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4462 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_stats_ctx_init()
4463 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4469 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_stats_ctx_init()
4470 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_stats_ctx_init()
4472 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { in virtnet_stats_ctx_init()
4475 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; in virtnet_stats_ctx_init()
4476 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_stats_ctx_init()
4477 ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); in virtnet_stats_ctx_init()
4482 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_stats_ctx_init()
4483 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; in virtnet_stats_ctx_init()
4484 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_stats_ctx_init()
4485 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); in virtnet_stats_ctx_init()
4488 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_stats_ctx_init()
4489 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; in virtnet_stats_ctx_init()
4490 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_stats_ctx_init()
4491 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); in virtnet_stats_ctx_init()
4494 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { in virtnet_stats_ctx_init()
4495 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; in virtnet_stats_ctx_init()
4496 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_stats_ctx_init()
4497 ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); in virtnet_stats_ctx_init()
4502 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_stats_ctx_init()
4503 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; in virtnet_stats_ctx_init()
4504 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_stats_ctx_init()
4505 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); in virtnet_stats_ctx_init()
4508 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_stats_ctx_init()
4509 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; in virtnet_stats_ctx_init()
4510 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_stats_ctx_init()
4511 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); in virtnet_stats_ctx_init()
4514 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { in virtnet_stats_ctx_init()
4515 ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; in virtnet_stats_ctx_init()
4516 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_stats_ctx_init()
4517 ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); in virtnet_stats_ctx_init()
4521 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4523 * @num: field num
4527 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num) in stats_sum_queue() argument
4529 u32 step = num; in stats_sum_queue()
4533 for (i = 0; i < num; ++i) { in stats_sum_queue()
4548 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_total_fields()
4549 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_total_fields()
4550 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_total_fields()
4552 first_rx_q = ctx->data + num_rx + num_tx + num_cq; in virtnet_fill_total_fields()
4553 first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; in virtnet_fill_total_fields()
4555 data = ctx->data; in virtnet_fill_total_fields()
4557 stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4559 data = ctx->data + num_rx; in virtnet_fill_total_fields()
4561 stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); in virtnet_fill_total_fields()
4573 int i, num; in virtnet_fill_stats_qstat() local
4576 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats_qstat()
4581 num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); in virtnet_fill_stats_qstat()
4584 num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); in virtnet_fill_stats_qstat()
4587 for (i = 0; i < num; ++i) { in virtnet_fill_stats_qstat()
4588 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4590 ctx->data[offset] = u64_stats_read(v_stat); in virtnet_fill_stats_qstat()
4597 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); in virtnet_fill_stats_qstat()
4604 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); in virtnet_fill_stats_qstat()
4611 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); in virtnet_fill_stats_qstat()
4618 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); in virtnet_fill_stats_qstat()
4625 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); in virtnet_fill_stats_qstat()
4632 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); in virtnet_fill_stats_qstat()
4639 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); in virtnet_fill_stats_qstat()
4646 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); in virtnet_fill_stats_qstat()
4654 for (i = 0; i < num; ++i) { in virtnet_fill_stats_qstat()
4655 offset = desc[i].qstat_offset / sizeof(*ctx->data); in virtnet_fill_stats_qstat()
4657 ctx->data[offset] = le64_to_cpu(*v); in virtnet_fill_stats_qstat()
4661 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4680 int i, num; in virtnet_fill_stats() local
4682 if (ctx->to_qstat) in virtnet_fill_stats()
4685 num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; in virtnet_fill_stats()
4686 num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; in virtnet_fill_stats()
4687 num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; in virtnet_fill_stats()
4690 bitmap = ctx->bitmap[queue_type]; in virtnet_fill_stats()
4696 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); in virtnet_fill_stats()
4698 num = ARRAY_SIZE(virtnet_sq_stats_desc); in virtnet_fill_stats()
4704 offset += num; in virtnet_fill_stats()
4709 num = ARRAY_SIZE(virtnet_rq_stats_desc); in virtnet_fill_stats()
4715 offset += num; in virtnet_fill_stats()
4720 num = ARRAY_SIZE(virtnet_stats_cvq_desc); in virtnet_fill_stats()
4724 offset += num; in virtnet_fill_stats()
4729 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); in virtnet_fill_stats()
4733 offset += num; in virtnet_fill_stats()
4738 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); in virtnet_fill_stats()
4742 offset += num; in virtnet_fill_stats()
4747 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); in virtnet_fill_stats()
4751 offset += num; in virtnet_fill_stats()
4756 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); in virtnet_fill_stats()
4760 offset += num; in virtnet_fill_stats()
4765 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); in virtnet_fill_stats()
4769 offset += num; in virtnet_fill_stats()
4774 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); in virtnet_fill_stats()
4778 offset += num; in virtnet_fill_stats()
4784 for (i = 0; i < num; ++i) { in virtnet_fill_stats()
4786 ctx->data[offset + i] = le64_to_cpu(*v); in virtnet_fill_stats()
4792 for (i = 0; i < num; ++i) { in virtnet_fill_stats()
4794 ctx->data[offset + i] = u64_stats_read(v_stat); in virtnet_fill_stats()
4819 for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { in __virtnet_get_hw_stats()
4821 qid = le16_to_cpu(hdr->vq_index); in __virtnet_get_hw_stats()
4822 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); in __virtnet_get_hw_stats()
4834 u64 bitmap = ctx->bitmap[qtype]; in virtnet_make_stat_req()
4839 req->stats[*idx].vq_index = cpu_to_le16(qid); in virtnet_make_stat_req()
4840 req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); in virtnet_make_stat_req()
4844 /* qid: -1: get stats of all vq.
4856 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) in virtnet_get_hw_stats()
4859 if (qid == -1) { in virtnet_get_hw_stats()
4860 last_vq = vi->curr_queue_pairs * 2 - 1; in virtnet_get_hw_stats()
4873 if (ctx->bitmap[qtype]) { in virtnet_get_hw_stats()
4875 res_size += ctx->size[qtype]; in virtnet_get_hw_stats()
4879 if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { in virtnet_get_hw_stats()
4880 res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; in virtnet_get_hw_stats()
4886 return -ENOMEM; in virtnet_get_hw_stats()
4891 return -ENOMEM; in virtnet_get_hw_stats()
4899 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); in virtnet_get_hw_stats()
4918 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); in virtnet_get_strings()
4919 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); in virtnet_get_strings()
4923 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4926 for (i = 0; i < vi->curr_queue_pairs; ++i) in virtnet_get_strings()
4945 vi->curr_queue_pairs * pair_count; in virtnet_get_sset_count()
4947 return -EOPNOTSUPP; in virtnet_get_sset_count()
4960 if (virtnet_get_hw_stats(vi, &ctx, -1)) in virtnet_get_ethtool_stats()
4961 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); in virtnet_get_ethtool_stats()
4963 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
4964 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
4965 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
4967 stats_base = (const u8 *)&rq->stats; in virtnet_get_ethtool_stats()
4969 start = u64_stats_fetch_begin(&rq->stats.syncp); in virtnet_get_ethtool_stats()
4971 } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); in virtnet_get_ethtool_stats()
4973 stats_base = (const u8 *)&sq->stats; in virtnet_get_ethtool_stats()
4975 start = u64_stats_fetch_begin(&sq->stats.syncp); in virtnet_get_ethtool_stats()
4977 } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); in virtnet_get_ethtool_stats()
4988 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
4989 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
4990 channels->max_other = 0; in virtnet_get_channels()
4991 channels->rx_count = 0; in virtnet_get_channels()
4992 channels->tx_count = 0; in virtnet_get_channels()
4993 channels->other_count = 0; in virtnet_get_channels()
5002 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
5010 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
5011 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
5012 cmd->base.port = PORT_OTHER; in virtnet_get_link_ksettings()
5026 return -ENOMEM; in virtnet_send_tx_notf_coal_cmds()
5028 coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); in virtnet_send_tx_notf_coal_cmds()
5029 coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); in virtnet_send_tx_notf_coal_cmds()
5035 return -EINVAL; in virtnet_send_tx_notf_coal_cmds()
5037 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5038 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5039 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_tx_notf_coal_cmds()
5040 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; in virtnet_send_tx_notf_coal_cmds()
5041 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; in virtnet_send_tx_notf_coal_cmds()
5051 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_cmds()
5055 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_send_rx_notf_coal_cmds()
5056 return -EOPNOTSUPP; in virtnet_send_rx_notf_coal_cmds()
5058 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || in virtnet_send_rx_notf_coal_cmds()
5059 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) in virtnet_send_rx_notf_coal_cmds()
5060 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5062 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5063 vi->rx_dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5064 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5065 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5066 vi->rq[i].dim_enabled = true; in virtnet_send_rx_notf_coal_cmds()
5067 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5074 return -ENOMEM; in virtnet_send_rx_notf_coal_cmds()
5076 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { in virtnet_send_rx_notf_coal_cmds()
5077 vi->rx_dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5078 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5079 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5080 vi->rq[i].dim_enabled = false; in virtnet_send_rx_notf_coal_cmds()
5081 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5085 /* Since the per-queue coalescing params can be set, in virtnet_send_rx_notf_coal_cmds()
5089 coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); in virtnet_send_rx_notf_coal_cmds()
5090 coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_cmds()
5096 return -EINVAL; in virtnet_send_rx_notf_coal_cmds()
5098 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5099 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5100 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_send_rx_notf_coal_cmds()
5101 mutex_lock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5102 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; in virtnet_send_rx_notf_coal_cmds()
5103 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; in virtnet_send_rx_notf_coal_cmds()
5104 mutex_unlock(&vi->rq[i].dim_lock); in virtnet_send_rx_notf_coal_cmds()
5130 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; in virtnet_send_rx_notf_coal_vq_cmds()
5135 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5136 cur_rx_dim = vi->rq[queue].dim_enabled; in virtnet_send_rx_notf_coal_vq_cmds()
5137 max_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_send_rx_notf_coal_vq_cmds()
5138 max_packets = vi->rq[queue].intr_coal.max_packets; in virtnet_send_rx_notf_coal_vq_cmds()
5140 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || in virtnet_send_rx_notf_coal_vq_cmds()
5141 ec->rx_max_coalesced_frames != max_packets)) { in virtnet_send_rx_notf_coal_vq_cmds()
5142 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5143 return -EINVAL; in virtnet_send_rx_notf_coal_vq_cmds()
5147 vi->rq[queue].dim_enabled = true; in virtnet_send_rx_notf_coal_vq_cmds()
5148 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5153 vi->rq[queue].dim_enabled = false; in virtnet_send_rx_notf_coal_vq_cmds()
5159 ec->rx_coalesce_usecs, in virtnet_send_rx_notf_coal_vq_cmds()
5160 ec->rx_max_coalesced_frames); in virtnet_send_rx_notf_coal_vq_cmds()
5161 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_send_rx_notf_coal_vq_cmds()
5176 ec->tx_coalesce_usecs, in virtnet_send_notf_coal_vq_cmds()
5177 ec->tx_max_coalesced_frames); in virtnet_send_notf_coal_vq_cmds()
5189 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_rx_dim_work()
5190 struct net_device *dev = vi->dev; in virtnet_rx_dim_work()
5194 qnum = rq - vi->rq; in virtnet_rx_dim_work()
5196 mutex_lock(&rq->dim_lock); in virtnet_rx_dim_work()
5197 if (!rq->dim_enabled) in virtnet_rx_dim_work()
5201 if (update_moder.usec != rq->intr_coal.max_usecs || in virtnet_rx_dim_work()
5202 update_moder.pkts != rq->intr_coal.max_packets) { in virtnet_rx_dim_work()
5208 dev->name, qnum); in virtnet_rx_dim_work()
5211 dim->state = DIM_START_MEASURE; in virtnet_rx_dim_work()
5212 mutex_unlock(&rq->dim_lock); in virtnet_rx_dim_work()
5220 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) in virtnet_coal_params_supported()
5221 return -EOPNOTSUPP; in virtnet_coal_params_supported()
5223 if (ec->tx_max_coalesced_frames > 1 || in virtnet_coal_params_supported()
5224 ec->rx_max_coalesced_frames != 1) in virtnet_coal_params_supported()
5225 return -EINVAL; in virtnet_coal_params_supported()
5235 return -EBUSY; in virtnet_should_update_vq_weight()
5252 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_coalesce()
5253 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { in virtnet_set_coalesce()
5254 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_coalesce()
5255 vi->sq[queue_number].napi.weight, in virtnet_set_coalesce()
5261 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be in virtnet_set_coalesce()
5268 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) in virtnet_set_coalesce()
5280 for (i = queue_number; i < vi->max_queue_pairs; i++) { in virtnet_set_coalesce()
5281 if (vi->sq[i].xsk_pool) in virtnet_set_coalesce()
5282 return -EBUSY; in virtnet_set_coalesce()
5285 for (; queue_number < vi->max_queue_pairs; queue_number++) in virtnet_set_coalesce()
5286 vi->sq[queue_number].napi.weight = napi_weight; in virtnet_set_coalesce()
5299 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_get_coalesce()
5300 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; in virtnet_get_coalesce()
5301 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; in virtnet_get_coalesce()
5302 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; in virtnet_get_coalesce()
5303 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; in virtnet_get_coalesce()
5304 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; in virtnet_get_coalesce()
5306 ec->rx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5308 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
5309 ec->tx_max_coalesced_frames = 1; in virtnet_get_coalesce()
5323 if (queue >= vi->max_queue_pairs) in virtnet_set_per_queue_coalesce()
5324 return -EINVAL; in virtnet_set_per_queue_coalesce()
5327 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; in virtnet_set_per_queue_coalesce()
5328 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, in virtnet_set_per_queue_coalesce()
5329 vi->sq[queue].napi.weight, in virtnet_set_per_queue_coalesce()
5334 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_set_per_queue_coalesce()
5343 vi->sq[queue].napi.weight = napi_weight; in virtnet_set_per_queue_coalesce()
5354 if (queue >= vi->max_queue_pairs) in virtnet_get_per_queue_coalesce()
5355 return -EINVAL; in virtnet_get_per_queue_coalesce()
5357 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_get_per_queue_coalesce()
5358 mutex_lock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5359 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5360 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; in virtnet_get_per_queue_coalesce()
5361 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5362 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; in virtnet_get_per_queue_coalesce()
5363 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; in virtnet_get_per_queue_coalesce()
5364 mutex_unlock(&vi->rq[queue].dim_lock); in virtnet_get_per_queue_coalesce()
5366 ec->rx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5368 if (vi->sq[queue].napi.weight) in virtnet_get_per_queue_coalesce()
5369 ec->tx_max_coalesced_frames = 1; in virtnet_get_per_queue_coalesce()
5379 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
5380 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
5385 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; in virtnet_get_rxfh_key_size()
5390 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size; in virtnet_get_rxfh_indir_size()
5399 if (rxfh->indir) { in virtnet_get_rxfh()
5400 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_get_rxfh()
5401 rxfh->indir[i] = vi->rss.indirection_table[i]; in virtnet_get_rxfh()
5404 if (rxfh->key) in virtnet_get_rxfh()
5405 memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); in virtnet_get_rxfh()
5407 rxfh->hfunc = ETH_RSS_HASH_TOP; in virtnet_get_rxfh()
5420 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in virtnet_set_rxfh()
5421 rxfh->hfunc != ETH_RSS_HASH_TOP) in virtnet_set_rxfh()
5422 return -EOPNOTSUPP; in virtnet_set_rxfh()
5424 if (rxfh->indir) { in virtnet_set_rxfh()
5425 if (!vi->has_rss) in virtnet_set_rxfh()
5426 return -EOPNOTSUPP; in virtnet_set_rxfh()
5428 for (i = 0; i < vi->rss_indir_table_size; ++i) in virtnet_set_rxfh()
5429 vi->rss.indirection_table[i] = rxfh->indir[i]; in virtnet_set_rxfh()
5433 if (rxfh->key) { in virtnet_set_rxfh()
5438 if (!vi->has_rss && !vi->has_rss_hash_report) in virtnet_set_rxfh()
5439 return -EOPNOTSUPP; in virtnet_set_rxfh()
5441 memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); in virtnet_set_rxfh()
5456 switch (info->cmd) { in virtnet_get_rxnfc()
5458 info->data = vi->curr_queue_pairs; in virtnet_get_rxnfc()
5464 rc = -EOPNOTSUPP; in virtnet_get_rxnfc()
5475 switch (info->cmd) { in virtnet_set_rxnfc()
5478 rc = -EINVAL; in virtnet_set_rxnfc()
5482 rc = -EOPNOTSUPP; in virtnet_set_rxnfc()
5519 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_queue_stats_rx()
5525 virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); in virtnet_get_queue_stats_rx()
5532 struct send_queue *sq = &vi->sq[i]; in virtnet_get_queue_stats_tx()
5538 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); in virtnet_get_queue_stats_tx()
5547 /* The queue stats of the virtio-net will not be reset. So here we in virtnet_get_base_stats()
5550 rx->bytes = 0; in virtnet_get_base_stats()
5551 rx->packets = 0; in virtnet_get_base_stats()
5553 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { in virtnet_get_base_stats()
5554 rx->hw_drops = 0; in virtnet_get_base_stats()
5555 rx->hw_drop_overruns = 0; in virtnet_get_base_stats()
5558 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { in virtnet_get_base_stats()
5559 rx->csum_unnecessary = 0; in virtnet_get_base_stats()
5560 rx->csum_none = 0; in virtnet_get_base_stats()
5561 rx->csum_bad = 0; in virtnet_get_base_stats()
5564 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { in virtnet_get_base_stats()
5565 rx->hw_gro_packets = 0; in virtnet_get_base_stats()
5566 rx->hw_gro_bytes = 0; in virtnet_get_base_stats()
5567 rx->hw_gro_wire_packets = 0; in virtnet_get_base_stats()
5568 rx->hw_gro_wire_bytes = 0; in virtnet_get_base_stats()
5571 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) in virtnet_get_base_stats()
5572 rx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5574 tx->bytes = 0; in virtnet_get_base_stats()
5575 tx->packets = 0; in virtnet_get_base_stats()
5576 tx->stop = 0; in virtnet_get_base_stats()
5577 tx->wake = 0; in virtnet_get_base_stats()
5579 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { in virtnet_get_base_stats()
5580 tx->hw_drops = 0; in virtnet_get_base_stats()
5581 tx->hw_drop_errors = 0; in virtnet_get_base_stats()
5584 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { in virtnet_get_base_stats()
5585 tx->csum_none = 0; in virtnet_get_base_stats()
5586 tx->needs_csum = 0; in virtnet_get_base_stats()
5589 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { in virtnet_get_base_stats()
5590 tx->hw_gso_packets = 0; in virtnet_get_base_stats()
5591 tx->hw_gso_bytes = 0; in virtnet_get_base_stats()
5592 tx->hw_gso_wire_packets = 0; in virtnet_get_base_stats()
5593 tx->hw_gso_wire_bytes = 0; in virtnet_get_base_stats()
5596 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) in virtnet_get_base_stats()
5597 tx->hw_drop_ratelimits = 0; in virtnet_get_base_stats()
5608 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down()
5611 flush_work(&vi->config_work); in virtnet_freeze_down()
5613 flush_work(&vi->rx_mode_work); in virtnet_freeze_down()
5615 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
5616 netif_device_detach(vi->dev); in virtnet_freeze_down()
5617 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
5618 if (netif_running(vi->dev)) in virtnet_freeze_down()
5619 virtnet_close(vi->dev); in virtnet_freeze_down()
5626 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up()
5638 if (netif_running(vi->dev)) { in virtnet_restore_up()
5639 err = virtnet_open(vi->dev); in virtnet_restore_up()
5644 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
5645 netif_device_attach(vi->dev); in virtnet_restore_up()
5646 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
5657 return -ENOMEM; in virtnet_set_guest_offloads()
5659 *_offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
5665 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
5666 return -EINVAL; in virtnet_set_guest_offloads()
5676 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
5684 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
5686 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
5697 qindex = rq - vi->rq; in virtnet_rq_bind_xsk_pool()
5700 err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); in virtnet_rq_bind_xsk_pool()
5704 err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, in virtnet_rq_bind_xsk_pool()
5709 xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5714 err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL); in virtnet_rq_bind_xsk_pool()
5716 netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); in virtnet_rq_bind_xsk_pool()
5721 rq->xsk_pool = pool; in virtnet_rq_bind_xsk_pool()
5729 xdp_rxq_info_unreg(&rq->xsk_rxq_info); in virtnet_rq_bind_xsk_pool()
5739 qindex = sq - vi->sq; in virtnet_sq_bind_xsk_pool()
5743 err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, in virtnet_sq_bind_xsk_pool()
5746 netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); in virtnet_sq_bind_xsk_pool()
5750 sq->xsk_pool = pool; in virtnet_sq_bind_xsk_pool()
5768 if (vi->hdr_len > xsk_pool_get_headroom(pool)) in virtnet_xsk_pool_enable()
5769 return -EINVAL; in virtnet_xsk_pool_enable()
5774 if (vi->big_packets && !vi->mergeable_rx_bufs) in virtnet_xsk_pool_enable()
5775 return -ENOENT; in virtnet_xsk_pool_enable()
5777 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_enable()
5778 return -EINVAL; in virtnet_xsk_pool_enable()
5780 sq = &vi->sq[qid]; in virtnet_xsk_pool_enable()
5781 rq = &vi->rq[qid]; in virtnet_xsk_pool_enable()
5783 /* xsk assumes that tx and rx must have the same dma device. The af-xdp in virtnet_xsk_pool_enable()
5787 * But vq->dma_dev allows every vq has the respective dma dev. So I in virtnet_xsk_pool_enable()
5790 if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) in virtnet_xsk_pool_enable()
5791 return -EINVAL; in virtnet_xsk_pool_enable()
5793 dma_dev = virtqueue_dma_dev(rq->vq); in virtnet_xsk_pool_enable()
5795 return -EINVAL; in virtnet_xsk_pool_enable()
5797 size = virtqueue_get_vring_size(rq->vq); in virtnet_xsk_pool_enable()
5799 rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); in virtnet_xsk_pool_enable()
5800 if (!rq->xsk_buffs) in virtnet_xsk_pool_enable()
5801 return -ENOMEM; in virtnet_xsk_pool_enable()
5803 hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, in virtnet_xsk_pool_enable()
5805 if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) in virtnet_xsk_pool_enable()
5806 return -ENOMEM; in virtnet_xsk_pool_enable()
5823 sq->xsk_hdr_dma_addr = hdr_dma; in virtnet_xsk_pool_enable()
5832 virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, in virtnet_xsk_pool_enable()
5845 if (qid >= vi->curr_queue_pairs) in virtnet_xsk_pool_disable()
5846 return -EINVAL; in virtnet_xsk_pool_disable()
5848 sq = &vi->sq[qid]; in virtnet_xsk_pool_disable()
5849 rq = &vi->rq[qid]; in virtnet_xsk_pool_disable()
5851 pool = rq->xsk_pool; in virtnet_xsk_pool_disable()
5858 virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr, in virtnet_xsk_pool_disable()
5859 vi->hdr_len, DMA_TO_DEVICE, 0); in virtnet_xsk_pool_disable()
5860 kvfree(rq->xsk_buffs); in virtnet_xsk_pool_disable()
5867 if (xdp->xsk.pool) in virtnet_xsk_pool_setup()
5868 return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, in virtnet_xsk_pool_setup()
5869 xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5871 return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); in virtnet_xsk_pool_setup()
5879 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; in virtnet_xdp_set()
5885 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
5886 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
5887 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
5888 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
5889 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
5890 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) || in virtnet_xdp_set()
5891 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) || in virtnet_xdp_set()
5892 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) { in virtnet_xdp_set()
5894 return -EOPNOTSUPP; in virtnet_xdp_set()
5897 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
5899 return -EINVAL; in virtnet_xdp_set()
5902 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { in virtnet_xdp_set()
5904 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); in virtnet_xdp_set()
5905 return -EINVAL; in virtnet_xdp_set()
5908 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
5913 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
5915 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
5919 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
5924 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
5928 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5929 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
5930 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
5935 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5936 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5947 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
5950 vi->xdp_enabled = true; in virtnet_xdp_set()
5951 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5952 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
5960 vi->xdp_enabled = false; in virtnet_xdp_set()
5963 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5967 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5968 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5969 &vi->sq[i].napi); in virtnet_xdp_set()
5978 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
5979 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
5983 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
5984 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
5985 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
5986 &vi->sq[i].napi); in virtnet_xdp_set()
5990 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
5996 switch (xdp->command) { in virtnet_xdp()
5998 return virtnet_xdp_set(dev, xdp->prog, xdp->extack); in virtnet_xdp()
6002 return -EINVAL; in virtnet_xdp()
6012 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
6013 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6017 return -EOPNOTSUPP; in virtnet_get_phys_port_name()
6029 if ((dev->features ^ features) & NETIF_F_GRO_HW) { in virtnet_set_features()
6030 if (vi->xdp_enabled) in virtnet_set_features()
6031 return -EBUSY; in virtnet_set_features()
6034 offloads = vi->guest_offloads_capable; in virtnet_set_features()
6036 offloads = vi->guest_offloads_capable & in virtnet_set_features()
6042 vi->guest_offloads = offloads; in virtnet_set_features()
6045 if ((dev->features ^ features) & NETIF_F_RXHASH) { in virtnet_set_features()
6047 vi->rss.hash_types = vi->rss_hash_types_saved; in virtnet_set_features()
6049 vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; in virtnet_set_features()
6052 return -EINVAL; in virtnet_set_features()
6061 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout()
6064 u64_stats_update_begin(&sq->stats.syncp); in virtnet_tx_timeout()
6065 u64_stats_inc(&sq->stats.tx_timeouts); in virtnet_tx_timeout()
6066 u64_stats_update_end(&sq->stats.syncp); in virtnet_tx_timeout()
6069 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout()
6070 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); in virtnet_tx_timeout()
6080 ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, in virtnet_init_irq_moder()
6087 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_init_irq_moder()
6088 net_dim_setting(vi->dev, &vi->rq[i].dim, false); in virtnet_init_irq_moder()
6095 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) in virtnet_free_irq_moder()
6099 net_dim_free_irq_moder(vi->dev); in virtnet_free_irq_moder()
6128 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
6133 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
6140 if (vi->status == v) in virtnet_config_changed_work()
6143 vi->status = v; in virtnet_config_changed_work()
6145 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
6147 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
6148 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
6150 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
6151 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
6157 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed()
6159 schedule_work(&vi->config_work); in virtnet_config_changed()
6166 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
6167 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
6168 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
6172 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
6176 kfree(vi->rq); in virtnet_free_queues()
6177 kfree(vi->sq); in virtnet_free_queues()
6178 kfree(vi->ctrl); in virtnet_free_queues()
6186 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
6187 while (vi->rq[i].pages) in _free_receive_bufs()
6188 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
6190 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
6191 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
6207 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
6208 if (vi->rq[i].alloc_frag.page) { in free_receive_page_frags()
6209 if (vi->rq[i].last_dma) in free_receive_page_frags()
6210 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); in free_receive_page_frags()
6211 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
6217 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf()
6221 sq = &vi->sq[i]; in virtnet_sq_free_unused_buf()
6234 xsk_tx_completed(sq->xsk_pool, 1); in virtnet_sq_free_unused_buf()
6241 struct virtnet_info *vi = vq->vdev->priv; in virtnet_sq_free_unused_buf_done()
6244 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in virtnet_sq_free_unused_buf_done()
6252 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6253 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
6259 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
6260 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
6270 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
6274 vdev->config->del_vqs(vdev); in virtnet_del_vqs()
6285 const unsigned int hdr_len = vi->hdr_len; in mergeable_min_buf_len()
6287 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
6291 return max(max(min_buf_len, hdr_len) - hdr_len, in mergeable_min_buf_len()
6299 int ret = -ENOMEM; in virtnet_find_vqs()
6305 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by in virtnet_find_vqs()
6308 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
6309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
6318 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
6327 if (vi->has_cvq) { in virtnet_find_vqs()
6328 vqs_info[total_vqs - 1].name = "control"; in virtnet_find_vqs()
6332 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6335 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
6336 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
6337 vqs_info[rxq2vq(i)].name = vi->rq[i].name; in virtnet_find_vqs()
6338 vqs_info[txq2vq(i)].name = vi->sq[i].name; in virtnet_find_vqs()
6343 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); in virtnet_find_vqs()
6347 if (vi->has_cvq) { in virtnet_find_vqs()
6348 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
6349 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
6350 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
6353 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
6354 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
6355 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
6356 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
6376 if (vi->has_cvq) { in virtnet_alloc_queues()
6377 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
6378 if (!vi->ctrl) in virtnet_alloc_queues()
6381 vi->ctrl = NULL; in virtnet_alloc_queues()
6383 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
6384 if (!vi->sq) in virtnet_alloc_queues()
6386 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
6387 if (!vi->rq) in virtnet_alloc_queues()
6390 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
6391 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
6392 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
6393 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
6395 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, in virtnet_alloc_queues()
6399 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
6400 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
6401 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
6403 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
6404 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
6405 mutex_init(&vi->rq[i].dim_lock); in virtnet_alloc_queues()
6411 kfree(vi->sq); in virtnet_alloc_queues()
6413 kfree(vi->ctrl); in virtnet_alloc_queues()
6415 return -ENOMEM; in virtnet_alloc_queues()
6447 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show()
6453 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
6454 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
6456 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
6481 dev_err(&vdev->dev, "device advertises feature %s but not %s", in virtnet_fail_on_feature()
6521 if (!vdev->config->get) { in virtnet_validate()
6522 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtnet_validate()
6524 return -EINVAL; in virtnet_validate()
6528 return -EINVAL; in virtnet_validate()
6540 …dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, dis… in virtnet_validate()
6549 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_check_guest_gso()
6550 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_check_guest_gso()
6551 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_check_guest_gso()
6552 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_check_guest_gso()
6553 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) && in virtnet_check_guest_gso()
6554 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6)); in virtnet_check_guest_gso()
6566 vi->big_packets = true; in virtnet_set_big_packets()
6567 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE); in virtnet_set_big_packets()
6594 if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) in virtnet_xdp_rx_hash()
6595 return -ENODATA; in virtnet_xdp_rx_hash()
6597 vi = netdev_priv(xdp->rxq->dev); in virtnet_xdp_rx_hash()
6598 hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); in virtnet_xdp_rx_hash()
6599 hash_report = __le16_to_cpu(hdr_hash->hash_report); in virtnet_xdp_rx_hash()
6605 *hash = __le32_to_cpu(hdr_hash->hash_value); in virtnet_xdp_rx_hash()
6615 int i, err = -ENOMEM; in virtnet_probe()
6636 return -ENOMEM; in virtnet_probe()
6639 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | in virtnet_probe()
6641 dev->netdev_ops = &virtnet_netdev; in virtnet_probe()
6642 dev->stat_ops = &virtnet_stat_ops; in virtnet_probe()
6643 dev->features = NETIF_F_HIGHDMA; in virtnet_probe()
6645 dev->ethtool_ops = &virtnet_ethtool_ops; in virtnet_probe()
6646 SET_NETDEV_DEV(dev, &vdev->dev); in virtnet_probe()
6651 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6653 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; in virtnet_probe()
6656 dev->hw_features |= NETIF_F_TSO in virtnet_probe()
6661 dev->hw_features |= NETIF_F_TSO; in virtnet_probe()
6663 dev->hw_features |= NETIF_F_TSO6; in virtnet_probe()
6665 dev->hw_features |= NETIF_F_TSO_ECN; in virtnet_probe()
6667 dev->hw_features |= NETIF_F_GSO_UDP_L4; in virtnet_probe()
6669 dev->features |= NETIF_F_GSO_ROBUST; in virtnet_probe()
6672 dev->features |= dev->hw_features & NETIF_F_ALL_TSO; in virtnet_probe()
6683 dev->features |= NETIF_F_RXCSUM; in virtnet_probe()
6687 dev->features |= NETIF_F_GRO_HW; in virtnet_probe()
6689 dev->hw_features |= NETIF_F_GRO_HW; in virtnet_probe()
6691 dev->vlan_features = dev->features; in virtnet_probe()
6692 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in virtnet_probe()
6695 /* MTU range: 68 - 65535 */ in virtnet_probe()
6696 dev->min_mtu = MIN_MTU; in virtnet_probe()
6697 dev->max_mtu = MAX_MTU; in virtnet_probe()
6709 dev_info(&vdev->dev, "Assigned random MAC address %pM\n", in virtnet_probe()
6710 dev->dev_addr); in virtnet_probe()
6713 /* Set up our device-specific information */ in virtnet_probe()
6715 vi->dev = dev; in virtnet_probe()
6716 vi->vdev = vdev; in virtnet_probe()
6717 vdev->priv = vi; in virtnet_probe()
6719 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
6720 INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); in virtnet_probe()
6721 spin_lock_init(&vi->refill_lock); in virtnet_probe()
6724 vi->mergeable_rx_bufs = true; in virtnet_probe()
6725 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; in virtnet_probe()
6729 vi->has_rss_hash_report = true; in virtnet_probe()
6732 vi->has_rss = true; in virtnet_probe()
6734 vi->rss_indir_table_size = in virtnet_probe()
6738 err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size); in virtnet_probe()
6742 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6743 vi->rss_key_size = in virtnet_probe()
6745 if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) { in virtnet_probe()
6746 dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n", in virtnet_probe()
6747 vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE); in virtnet_probe()
6748 err = -EINVAL; in virtnet_probe()
6752 vi->rss_hash_types_supported = in virtnet_probe()
6754 vi->rss_hash_types_supported &= in virtnet_probe()
6759 dev->hw_features |= NETIF_F_RXHASH; in virtnet_probe()
6760 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; in virtnet_probe()
6763 if (vi->has_rss_hash_report) in virtnet_probe()
6764 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash); in virtnet_probe()
6767 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
6769 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
6773 vi->any_header_sg = true; in virtnet_probe()
6776 vi->has_cvq = true; in virtnet_probe()
6778 mutex_init(&vi->cvq_lock); in virtnet_probe()
6784 if (mtu < dev->min_mtu) { in virtnet_probe()
6788 dev_err(&vdev->dev, in virtnet_probe()
6790 mtu, dev->min_mtu); in virtnet_probe()
6791 err = -EINVAL; in virtnet_probe()
6795 dev->mtu = mtu; in virtnet_probe()
6796 dev->max_mtu = mtu; in virtnet_probe()
6801 if (vi->any_header_sg) in virtnet_probe()
6802 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
6806 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
6808 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
6809 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
6816 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { in virtnet_probe()
6817 vi->intr_coal_rx.max_usecs = 0; in virtnet_probe()
6818 vi->intr_coal_tx.max_usecs = 0; in virtnet_probe()
6819 vi->intr_coal_rx.max_packets = 0; in virtnet_probe()
6824 if (vi->sq[0].napi.weight) in virtnet_probe()
6825 vi->intr_coal_tx.max_packets = 1; in virtnet_probe()
6827 vi->intr_coal_tx.max_packets = 0; in virtnet_probe()
6830 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { in virtnet_probe()
6832 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_probe()
6833 if (vi->sq[i].napi.weight) in virtnet_probe()
6834 vi->sq[i].intr_coal.max_packets = 1; in virtnet_probe()
6842 if (vi->mergeable_rx_bufs) in virtnet_probe()
6843 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; in virtnet_probe()
6845 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6846 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
6851 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
6852 if (IS_ERR(vi->failover)) { in virtnet_probe()
6853 err = PTR_ERR(vi->failover); in virtnet_probe()
6858 if (vi->has_rss || vi->has_rss_hash_report) in virtnet_probe()
6874 virtio_config_driver_disable(vi->vdev); in virtnet_probe()
6878 if (vi->has_rss || vi->has_rss_hash_report) { in virtnet_probe()
6880 dev_warn(&vdev->dev, "RSS disabled because committing failed.\n"); in virtnet_probe()
6881 dev->hw_features &= ~NETIF_F_RXHASH; in virtnet_probe()
6882 vi->has_rss_hash_report = false; in virtnet_probe()
6883 vi->has_rss = false; in virtnet_probe()
6887 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
6894 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in virtnet_probe()
6897 sg_init_one(&sg, dev->dev_addr, dev->addr_len); in virtnet_probe()
6902 err = -EINVAL; in virtnet_probe()
6907 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { in virtnet_probe()
6915 err = -ENOMEM; in virtnet_probe()
6926 err = -EINVAL; in virtnet_probe()
6930 v = stats_cap->supported_stats_types[0]; in virtnet_probe()
6931 vi->device_stats_cap = le64_to_cpu(v); in virtnet_probe()
6937 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
6938 virtnet_config_changed_work(&vi->config_work); in virtnet_probe()
6940 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
6946 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
6947 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
6948 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
6959 dev->name, max_queue_pairs); in virtnet_probe()
6966 net_failover_destroy(vi->failover); in virtnet_probe()
6969 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
6981 virtio_reset_device(vi->vdev); in remove_vq_common()
6990 for (i = 0; i < vi->max_queue_pairs; i++) in remove_vq_common()
6991 netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); in remove_vq_common()
7002 struct virtnet_info *vi = vdev->priv; in virtnet_remove()
7007 flush_work(&vi->config_work); in virtnet_remove()
7009 flush_work(&vi->rx_mode_work); in virtnet_remove()
7013 unregister_netdev(vi->dev); in virtnet_remove()
7015 net_failover_destroy(vi->failover); in virtnet_remove()
7019 rss_indirection_table_free(&vi->rss); in virtnet_remove()
7021 free_netdev(vi->dev); in virtnet_remove()
7026 struct virtnet_info *vi = vdev->priv; in virtnet_freeze()
7037 struct virtnet_info *vi = vdev->priv; in virtnet_restore()
7043 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()