1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
6
7 #include "igc.h"
8 #include "igc_xdp.h"
9
igc_xdp_set_prog(struct igc_adapter * adapter,struct bpf_prog * prog,struct netlink_ext_ack * extack)10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 struct netlink_ext_ack *extack)
12 {
13 struct net_device *dev = adapter->netdev;
14 bool if_running = netif_running(dev);
15 struct bpf_prog *old_prog;
16 bool need_update;
17 unsigned int i;
18
19 if (dev->mtu > ETH_DATA_LEN) {
20 /* For now, the driver doesn't support XDP functionality with
21 * jumbo frames so we return error.
22 */
23 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
24 return -EOPNOTSUPP;
25 }
26
27 need_update = !!adapter->xdp_prog != !!prog;
28 if (if_running && need_update) {
29 for (i = 0; i < adapter->num_rx_queues; i++) {
30 igc_disable_rx_ring(adapter->rx_ring[i]);
31 igc_disable_tx_ring(adapter->tx_ring[i]);
32 napi_disable(&adapter->rx_ring[i]->q_vector->napi);
33 }
34 }
35
36 old_prog = xchg(&adapter->xdp_prog, prog);
37 if (old_prog)
38 bpf_prog_put(old_prog);
39
40 if (prog)
41 xdp_features_set_redirect_target(dev, true);
42 else
43 xdp_features_clear_redirect_target(dev);
44
45 if (if_running && need_update) {
46 for (i = 0; i < adapter->num_rx_queues; i++) {
47 napi_enable(&adapter->rx_ring[i]->q_vector->napi);
48 igc_enable_tx_ring(adapter->tx_ring[i]);
49 igc_enable_rx_ring(adapter->rx_ring[i]);
50 }
51 }
52
53 return 0;
54 }
55
igc_xdp_enable_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)56 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
57 struct xsk_buff_pool *pool, u16 queue_id)
58 {
59 struct net_device *ndev = adapter->netdev;
60 struct device *dev = &adapter->pdev->dev;
61 struct igc_ring *rx_ring, *tx_ring;
62 struct napi_struct *napi;
63 bool needs_reset;
64 u32 frame_size;
65 int err;
66
67 if (queue_id >= adapter->num_rx_queues ||
68 queue_id >= adapter->num_tx_queues)
69 return -EINVAL;
70
71 frame_size = xsk_pool_get_rx_frame_size(pool);
72 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
73 /* When XDP is enabled, the driver doesn't support frames that
74 * span over multiple buffers. To avoid that, we check if xsk
75 * frame size is big enough to fit the max ethernet frame size
76 * + vlan double tagging.
77 */
78 return -EOPNOTSUPP;
79 }
80
81 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
82 if (err) {
83 netdev_err(ndev, "Failed to map xsk pool\n");
84 return err;
85 }
86
87 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
88
89 rx_ring = adapter->rx_ring[queue_id];
90 tx_ring = adapter->tx_ring[queue_id];
91 /* Rx and Tx rings share the same napi context. */
92 napi = &rx_ring->q_vector->napi;
93
94 if (needs_reset) {
95 igc_disable_rx_ring(rx_ring);
96 igc_disable_tx_ring(tx_ring);
97 napi_disable(napi);
98 }
99
100 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
101 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
102
103 if (needs_reset) {
104 napi_enable(napi);
105 igc_enable_rx_ring(rx_ring);
106 igc_enable_tx_ring(tx_ring);
107
108 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
109 if (err) {
110 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
111 return err;
112 }
113 }
114
115 return 0;
116 }
117
igc_xdp_disable_pool(struct igc_adapter * adapter,u16 queue_id)118 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
119 {
120 struct igc_ring *rx_ring, *tx_ring;
121 struct xsk_buff_pool *pool;
122 struct napi_struct *napi;
123 bool needs_reset;
124
125 if (queue_id >= adapter->num_rx_queues ||
126 queue_id >= adapter->num_tx_queues)
127 return -EINVAL;
128
129 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
130 if (!pool)
131 return -EINVAL;
132
133 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
134
135 rx_ring = adapter->rx_ring[queue_id];
136 tx_ring = adapter->tx_ring[queue_id];
137 /* Rx and Tx rings share the same napi context. */
138 napi = &rx_ring->q_vector->napi;
139
140 if (needs_reset) {
141 igc_disable_rx_ring(rx_ring);
142 igc_disable_tx_ring(tx_ring);
143 napi_disable(napi);
144 }
145
146 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
147 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
148 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
149
150 if (needs_reset) {
151 napi_enable(napi);
152 igc_enable_rx_ring(rx_ring);
153 igc_enable_tx_ring(tx_ring);
154 }
155
156 return 0;
157 }
158
igc_xdp_setup_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)159 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
160 u16 queue_id)
161 {
162 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
163 igc_xdp_disable_pool(adapter, queue_id);
164 }
165