xref: /linux/drivers/net/ethernet/intel/igc/igc_xdp.c (revision 68550cbc6129159b7a6434796b721e8b66ee12f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3 
4 #include <net/xdp_sock_drv.h>
5 
6 #include "igc.h"
7 #include "igc_xdp.h"
8 
9 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
10 		     struct netlink_ext_ack *extack)
11 {
12 	struct net_device *dev = adapter->netdev;
13 	bool if_running = netif_running(dev);
14 	struct bpf_prog *old_prog;
15 
16 	if (dev->mtu > ETH_DATA_LEN) {
17 		/* For now, the driver doesn't support XDP functionality with
18 		 * jumbo frames so we return error.
19 		 */
20 		NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
21 		return -EOPNOTSUPP;
22 	}
23 
24 	if (if_running)
25 		igc_close(dev);
26 
27 	old_prog = xchg(&adapter->xdp_prog, prog);
28 	if (old_prog)
29 		bpf_prog_put(old_prog);
30 
31 	if (if_running)
32 		igc_open(dev);
33 
34 	return 0;
35 }
36 
37 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
38 			       struct xsk_buff_pool *pool, u16 queue_id)
39 {
40 	struct net_device *ndev = adapter->netdev;
41 	struct device *dev = &adapter->pdev->dev;
42 	struct igc_ring *rx_ring, *tx_ring;
43 	struct napi_struct *napi;
44 	bool needs_reset;
45 	u32 frame_size;
46 	int err;
47 
48 	if (queue_id >= adapter->num_rx_queues ||
49 	    queue_id >= adapter->num_tx_queues)
50 		return -EINVAL;
51 
52 	frame_size = xsk_pool_get_rx_frame_size(pool);
53 	if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
54 		/* When XDP is enabled, the driver doesn't support frames that
55 		 * span over multiple buffers. To avoid that, we check if xsk
56 		 * frame size is big enough to fit the max ethernet frame size
57 		 * + vlan double tagging.
58 		 */
59 		return -EOPNOTSUPP;
60 	}
61 
62 	err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
63 	if (err) {
64 		netdev_err(ndev, "Failed to map xsk pool\n");
65 		return err;
66 	}
67 
68 	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
69 
70 	rx_ring = adapter->rx_ring[queue_id];
71 	tx_ring = adapter->tx_ring[queue_id];
72 	/* Rx and Tx rings share the same napi context. */
73 	napi = &rx_ring->q_vector->napi;
74 
75 	if (needs_reset) {
76 		igc_disable_rx_ring(rx_ring);
77 		igc_disable_tx_ring(tx_ring);
78 		napi_disable(napi);
79 	}
80 
81 	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
82 	set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
83 
84 	if (needs_reset) {
85 		napi_enable(napi);
86 		igc_enable_rx_ring(rx_ring);
87 		igc_enable_tx_ring(tx_ring);
88 
89 		err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
90 		if (err) {
91 			xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
92 			return err;
93 		}
94 	}
95 
96 	return 0;
97 }
98 
99 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
100 {
101 	struct igc_ring *rx_ring, *tx_ring;
102 	struct xsk_buff_pool *pool;
103 	struct napi_struct *napi;
104 	bool needs_reset;
105 
106 	if (queue_id >= adapter->num_rx_queues ||
107 	    queue_id >= adapter->num_tx_queues)
108 		return -EINVAL;
109 
110 	pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
111 	if (!pool)
112 		return -EINVAL;
113 
114 	needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
115 
116 	rx_ring = adapter->rx_ring[queue_id];
117 	tx_ring = adapter->tx_ring[queue_id];
118 	/* Rx and Tx rings share the same napi context. */
119 	napi = &rx_ring->q_vector->napi;
120 
121 	if (needs_reset) {
122 		igc_disable_rx_ring(rx_ring);
123 		igc_disable_tx_ring(tx_ring);
124 		napi_disable(napi);
125 	}
126 
127 	xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
128 	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
129 	clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
130 
131 	if (needs_reset) {
132 		napi_enable(napi);
133 		igc_enable_rx_ring(rx_ring);
134 		igc_enable_tx_ring(tx_ring);
135 	}
136 
137 	return 0;
138 }
139 
140 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
141 		       u16 queue_id)
142 {
143 	return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
144 		      igc_xdp_disable_pool(adapter, queue_id);
145 }
146