xref: /linux/drivers/net/ethernet/intel/igc/igc_xdp.c (revision eec8359f0797ef87c6ef6cbed6de08b02073b833)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3 
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
6 
7 #include "igc.h"
8 #include "igc_xdp.h"
9 
10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 		     struct netlink_ext_ack *extack)
12 {
13 	struct net_device *dev = adapter->netdev;
14 	bool if_running = netif_running(dev);
15 	struct bpf_prog *old_prog;
16 	bool need_update;
17 
18 	if (dev->mtu > ETH_DATA_LEN) {
19 		/* For now, the driver doesn't support XDP functionality with
20 		 * jumbo frames so we return error.
21 		 */
22 		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
23 		return -EOPNOTSUPP;
24 	}
25 
26 	need_update = !!adapter->xdp_prog != !!prog;
27 	if (if_running && need_update)
28 		igc_close(dev);
29 
30 	old_prog = xchg(&adapter->xdp_prog, prog);
31 	if (old_prog)
32 		bpf_prog_put(old_prog);
33 
34 	if (prog)
35 		xdp_features_set_redirect_target(dev, true);
36 	else
37 		xdp_features_clear_redirect_target(dev);
38 
39 	if (if_running && need_update)
40 		igc_open(dev);
41 
42 	return 0;
43 }
44 
45 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
46 			       struct xsk_buff_pool *pool, u16 queue_id)
47 {
48 	struct net_device *ndev = adapter->netdev;
49 	struct device *dev = &adapter->pdev->dev;
50 	struct igc_ring *rx_ring, *tx_ring;
51 	struct napi_struct *napi;
52 	bool needs_reset;
53 	u32 frame_size;
54 	int err;
55 
56 	if (queue_id >= adapter->num_rx_queues ||
57 	    queue_id >= adapter->num_tx_queues)
58 		return -EINVAL;
59 
60 	frame_size = xsk_pool_get_rx_frame_size(pool);
61 	if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
62 		/* When XDP is enabled, the driver doesn't support frames that
63 		 * span over multiple buffers. To avoid that, we check if xsk
64 		 * frame size is big enough to fit the max ethernet frame size
65 		 * + vlan double tagging.
66 		 */
67 		return -EOPNOTSUPP;
68 	}
69 
70 	err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
71 	if (err) {
72 		netdev_err(ndev, "Failed to map xsk pool\n");
73 		return err;
74 	}
75 
76 	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
77 
78 	rx_ring = adapter->rx_ring[queue_id];
79 	tx_ring = adapter->tx_ring[queue_id];
80 	/* Rx and Tx rings share the same napi context. */
81 	napi = &rx_ring->q_vector->napi;
82 
83 	if (needs_reset) {
84 		igc_disable_rx_ring(rx_ring);
85 		igc_disable_tx_ring(tx_ring);
86 		napi_disable(napi);
87 	}
88 
89 	igc_set_queue_napi(adapter, queue_id, NULL);
90 	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
91 	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
92 
93 	if (needs_reset) {
94 		napi_enable(napi);
95 		igc_enable_rx_ring(rx_ring);
96 		igc_enable_tx_ring(tx_ring);
97 
98 		err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
99 		if (err) {
100 			xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
101 			return err;
102 		}
103 	}
104 
105 	return 0;
106 }
107 
108 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
109 {
110 	struct igc_ring *rx_ring, *tx_ring;
111 	struct xsk_buff_pool *pool;
112 	struct napi_struct *napi;
113 	bool needs_reset;
114 
115 	if (queue_id >= adapter->num_rx_queues ||
116 	    queue_id >= adapter->num_tx_queues)
117 		return -EINVAL;
118 
119 	pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
120 	if (!pool)
121 		return -EINVAL;
122 
123 	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
124 
125 	rx_ring = adapter->rx_ring[queue_id];
126 	tx_ring = adapter->tx_ring[queue_id];
127 	/* Rx and Tx rings share the same napi context. */
128 	napi = &rx_ring->q_vector->napi;
129 
130 	if (needs_reset) {
131 		igc_disable_rx_ring(rx_ring);
132 		igc_disable_tx_ring(tx_ring);
133 		napi_disable(napi);
134 	}
135 
136 	xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
137 	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
138 	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
139 	igc_set_queue_napi(adapter, queue_id, napi);
140 
141 	if (needs_reset) {
142 		napi_enable(napi);
143 		igc_enable_rx_ring(rx_ring);
144 		igc_enable_tx_ring(tx_ring);
145 	}
146 
147 	return 0;
148 }
149 
150 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
151 		       u16 queue_id)
152 {
153 	return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
154 		      igc_xdp_disable_pool(adapter, queue_id);
155 }
156