1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020, Intel Corporation. */ 3 4 #include <linux/if_vlan.h> 5 #include <net/xdp_sock_drv.h> 6 7 #include "igc.h" 8 #include "igc_xdp.h" 9 10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, 11 struct netlink_ext_ack *extack) 12 { 13 struct net_device *dev = adapter->netdev; 14 bool if_running = netif_running(dev); 15 struct bpf_prog *old_prog; 16 bool need_update; 17 unsigned int i; 18 19 if (dev->mtu > ETH_DATA_LEN) { 20 /* For now, the driver doesn't support XDP functionality with 21 * jumbo frames so we return error. 22 */ 23 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); 24 return -EOPNOTSUPP; 25 } 26 27 need_update = !!adapter->xdp_prog != !!prog; 28 if (if_running && need_update) { 29 for (i = 0; i < adapter->num_rx_queues; i++) { 30 igc_disable_rx_ring(adapter->rx_ring[i]); 31 igc_disable_tx_ring(adapter->tx_ring[i]); 32 napi_disable(&adapter->rx_ring[i]->q_vector->napi); 33 } 34 } 35 36 old_prog = xchg(&adapter->xdp_prog, prog); 37 if (old_prog) 38 bpf_prog_put(old_prog); 39 40 if (prog) 41 xdp_features_set_redirect_target(dev, true); 42 else 43 xdp_features_clear_redirect_target(dev); 44 45 if (if_running && need_update) { 46 for (i = 0; i < adapter->num_rx_queues; i++) { 47 napi_enable(&adapter->rx_ring[i]->q_vector->napi); 48 igc_enable_tx_ring(adapter->tx_ring[i]); 49 igc_enable_rx_ring(adapter->rx_ring[i]); 50 } 51 } 52 53 return 0; 54 } 55 56 static int igc_xdp_enable_pool(struct igc_adapter *adapter, 57 struct xsk_buff_pool *pool, u16 queue_id) 58 { 59 struct net_device *ndev = adapter->netdev; 60 struct device *dev = &adapter->pdev->dev; 61 struct igc_ring *rx_ring, *tx_ring; 62 struct napi_struct *napi; 63 bool needs_reset; 64 u32 frame_size; 65 int err; 66 67 if (queue_id >= adapter->num_rx_queues || 68 queue_id >= adapter->num_tx_queues) 69 return -EINVAL; 70 71 frame_size = xsk_pool_get_rx_frame_size(pool); 72 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { 73 /* When XDP is enabled, the driver doesn't support frames that 74 * span over multiple buffers. To avoid that, we check if xsk 75 * frame size is big enough to fit the max ethernet frame size 76 * + vlan double tagging. 77 */ 78 return -EOPNOTSUPP; 79 } 80 81 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); 82 if (err) { 83 netdev_err(ndev, "Failed to map xsk pool\n"); 84 return err; 85 } 86 87 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); 88 89 rx_ring = adapter->rx_ring[queue_id]; 90 tx_ring = adapter->tx_ring[queue_id]; 91 /* Rx and Tx rings share the same napi context. */ 92 napi = &rx_ring->q_vector->napi; 93 94 if (needs_reset) { 95 igc_disable_rx_ring(rx_ring); 96 igc_disable_tx_ring(tx_ring); 97 napi_disable(napi); 98 } 99 100 igc_set_queue_napi(adapter, queue_id, NULL); 101 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); 102 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); 103 104 if (needs_reset) { 105 napi_enable(napi); 106 igc_enable_rx_ring(rx_ring); 107 igc_enable_tx_ring(tx_ring); 108 109 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); 110 if (err) { 111 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); 112 return err; 113 } 114 } 115 116 return 0; 117 } 118 119 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) 120 { 121 struct igc_ring *rx_ring, *tx_ring; 122 struct xsk_buff_pool *pool; 123 struct napi_struct *napi; 124 bool needs_reset; 125 126 if (queue_id >= adapter->num_rx_queues || 127 queue_id >= adapter->num_tx_queues) 128 return -EINVAL; 129 130 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); 131 if (!pool) 132 return -EINVAL; 133 134 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); 135 136 rx_ring = adapter->rx_ring[queue_id]; 137 tx_ring = adapter->tx_ring[queue_id]; 138 /* Rx and Tx rings share the same napi context. */ 139 napi = &rx_ring->q_vector->napi; 140 141 if (needs_reset) { 142 igc_disable_rx_ring(rx_ring); 143 igc_disable_tx_ring(tx_ring); 144 napi_disable(napi); 145 } 146 147 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); 148 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); 149 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); 150 igc_set_queue_napi(adapter, queue_id, napi); 151 152 if (needs_reset) { 153 napi_enable(napi); 154 igc_enable_rx_ring(rx_ring); 155 igc_enable_tx_ring(tx_ring); 156 } 157 158 return 0; 159 } 160 161 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, 162 u16 queue_id) 163 { 164 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : 165 igc_xdp_disable_pool(adapter, queue_id); 166 } 167