xref: /linux/drivers/net/ethernet/intel/igb/igb_xsk.c (revision 80f6ccf9f1160ba26cfa4bf90f3cced6f2d12268)
1*80f6ccf9SSriram Yagnaraman // SPDX-License-Identifier: GPL-2.0
2*80f6ccf9SSriram Yagnaraman /* Copyright(c) 2018 Intel Corporation. */
3*80f6ccf9SSriram Yagnaraman 
4*80f6ccf9SSriram Yagnaraman #include <linux/bpf_trace.h>
5*80f6ccf9SSriram Yagnaraman #include <net/xdp_sock_drv.h>
6*80f6ccf9SSriram Yagnaraman #include <net/xdp.h>
7*80f6ccf9SSriram Yagnaraman 
8*80f6ccf9SSriram Yagnaraman #include "e1000_hw.h"
9*80f6ccf9SSriram Yagnaraman #include "igb.h"
10*80f6ccf9SSriram Yagnaraman 
11*80f6ccf9SSriram Yagnaraman static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
12*80f6ccf9SSriram Yagnaraman {
13*80f6ccf9SSriram Yagnaraman 	int size = pool_present ?
14*80f6ccf9SSriram Yagnaraman 		sizeof(*ring->rx_buffer_info_zc) * ring->count :
15*80f6ccf9SSriram Yagnaraman 		sizeof(*ring->rx_buffer_info) * ring->count;
16*80f6ccf9SSriram Yagnaraman 	void *buff_info = vmalloc(size);
17*80f6ccf9SSriram Yagnaraman 
18*80f6ccf9SSriram Yagnaraman 	if (!buff_info)
19*80f6ccf9SSriram Yagnaraman 		return -ENOMEM;
20*80f6ccf9SSriram Yagnaraman 
21*80f6ccf9SSriram Yagnaraman 	if (pool_present) {
22*80f6ccf9SSriram Yagnaraman 		vfree(ring->rx_buffer_info);
23*80f6ccf9SSriram Yagnaraman 		ring->rx_buffer_info = NULL;
24*80f6ccf9SSriram Yagnaraman 		ring->rx_buffer_info_zc = buff_info;
25*80f6ccf9SSriram Yagnaraman 	} else {
26*80f6ccf9SSriram Yagnaraman 		vfree(ring->rx_buffer_info_zc);
27*80f6ccf9SSriram Yagnaraman 		ring->rx_buffer_info_zc = NULL;
28*80f6ccf9SSriram Yagnaraman 		ring->rx_buffer_info = buff_info;
29*80f6ccf9SSriram Yagnaraman 	}
30*80f6ccf9SSriram Yagnaraman 
31*80f6ccf9SSriram Yagnaraman 	return 0;
32*80f6ccf9SSriram Yagnaraman }
33*80f6ccf9SSriram Yagnaraman 
34*80f6ccf9SSriram Yagnaraman static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
35*80f6ccf9SSriram Yagnaraman {
36*80f6ccf9SSriram Yagnaraman 	struct igb_ring *tx_ring = adapter->tx_ring[qid];
37*80f6ccf9SSriram Yagnaraman 	struct igb_ring *rx_ring = adapter->rx_ring[qid];
38*80f6ccf9SSriram Yagnaraman 	struct e1000_hw *hw = &adapter->hw;
39*80f6ccf9SSriram Yagnaraman 
40*80f6ccf9SSriram Yagnaraman 	set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
41*80f6ccf9SSriram Yagnaraman 
42*80f6ccf9SSriram Yagnaraman 	wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
43*80f6ccf9SSriram Yagnaraman 	wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
44*80f6ccf9SSriram Yagnaraman 
45*80f6ccf9SSriram Yagnaraman 	synchronize_net();
46*80f6ccf9SSriram Yagnaraman 
47*80f6ccf9SSriram Yagnaraman 	/* Rx/Tx share the same napi context. */
48*80f6ccf9SSriram Yagnaraman 	napi_disable(&rx_ring->q_vector->napi);
49*80f6ccf9SSriram Yagnaraman 
50*80f6ccf9SSriram Yagnaraman 	igb_clean_tx_ring(tx_ring);
51*80f6ccf9SSriram Yagnaraman 	igb_clean_rx_ring(rx_ring);
52*80f6ccf9SSriram Yagnaraman 
53*80f6ccf9SSriram Yagnaraman 	memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
54*80f6ccf9SSriram Yagnaraman 	memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
55*80f6ccf9SSriram Yagnaraman }
56*80f6ccf9SSriram Yagnaraman 
57*80f6ccf9SSriram Yagnaraman static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
58*80f6ccf9SSriram Yagnaraman {
59*80f6ccf9SSriram Yagnaraman 	struct igb_ring *tx_ring = adapter->tx_ring[qid];
60*80f6ccf9SSriram Yagnaraman 	struct igb_ring *rx_ring = adapter->rx_ring[qid];
61*80f6ccf9SSriram Yagnaraman 
62*80f6ccf9SSriram Yagnaraman 	igb_configure_tx_ring(adapter, tx_ring);
63*80f6ccf9SSriram Yagnaraman 	igb_configure_rx_ring(adapter, rx_ring);
64*80f6ccf9SSriram Yagnaraman 
65*80f6ccf9SSriram Yagnaraman 	synchronize_net();
66*80f6ccf9SSriram Yagnaraman 
67*80f6ccf9SSriram Yagnaraman 	clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
68*80f6ccf9SSriram Yagnaraman 
69*80f6ccf9SSriram Yagnaraman 	/* call igb_desc_unused which always leaves
70*80f6ccf9SSriram Yagnaraman 	 * at least 1 descriptor unused to make sure
71*80f6ccf9SSriram Yagnaraman 	 * next_to_use != next_to_clean
72*80f6ccf9SSriram Yagnaraman 	 */
73*80f6ccf9SSriram Yagnaraman 	igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
74*80f6ccf9SSriram Yagnaraman 
75*80f6ccf9SSriram Yagnaraman 	/* Rx/Tx share the same napi context. */
76*80f6ccf9SSriram Yagnaraman 	napi_enable(&rx_ring->q_vector->napi);
77*80f6ccf9SSriram Yagnaraman }
78*80f6ccf9SSriram Yagnaraman 
79*80f6ccf9SSriram Yagnaraman struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
80*80f6ccf9SSriram Yagnaraman 				   struct igb_ring *ring)
81*80f6ccf9SSriram Yagnaraman {
82*80f6ccf9SSriram Yagnaraman 	int qid = ring->queue_index;
83*80f6ccf9SSriram Yagnaraman 	struct xsk_buff_pool *pool;
84*80f6ccf9SSriram Yagnaraman 
85*80f6ccf9SSriram Yagnaraman 	pool = xsk_get_pool_from_qid(adapter->netdev, qid);
86*80f6ccf9SSriram Yagnaraman 
87*80f6ccf9SSriram Yagnaraman 	if (!igb_xdp_is_enabled(adapter))
88*80f6ccf9SSriram Yagnaraman 		return NULL;
89*80f6ccf9SSriram Yagnaraman 
90*80f6ccf9SSriram Yagnaraman 	return (pool && pool->dev) ? pool : NULL;
91*80f6ccf9SSriram Yagnaraman }
92*80f6ccf9SSriram Yagnaraman 
93*80f6ccf9SSriram Yagnaraman static int igb_xsk_pool_enable(struct igb_adapter *adapter,
94*80f6ccf9SSriram Yagnaraman 			       struct xsk_buff_pool *pool,
95*80f6ccf9SSriram Yagnaraman 			       u16 qid)
96*80f6ccf9SSriram Yagnaraman {
97*80f6ccf9SSriram Yagnaraman 	struct net_device *netdev = adapter->netdev;
98*80f6ccf9SSriram Yagnaraman 	struct igb_ring *rx_ring;
99*80f6ccf9SSriram Yagnaraman 	bool if_running;
100*80f6ccf9SSriram Yagnaraman 	int err;
101*80f6ccf9SSriram Yagnaraman 
102*80f6ccf9SSriram Yagnaraman 	if (qid >= adapter->num_rx_queues)
103*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
104*80f6ccf9SSriram Yagnaraman 
105*80f6ccf9SSriram Yagnaraman 	if (qid >= netdev->real_num_rx_queues ||
106*80f6ccf9SSriram Yagnaraman 	    qid >= netdev->real_num_tx_queues)
107*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
108*80f6ccf9SSriram Yagnaraman 
109*80f6ccf9SSriram Yagnaraman 	err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
110*80f6ccf9SSriram Yagnaraman 	if (err)
111*80f6ccf9SSriram Yagnaraman 		return err;
112*80f6ccf9SSriram Yagnaraman 
113*80f6ccf9SSriram Yagnaraman 	rx_ring = adapter->rx_ring[qid];
114*80f6ccf9SSriram Yagnaraman 	if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
115*80f6ccf9SSriram Yagnaraman 	if (if_running)
116*80f6ccf9SSriram Yagnaraman 		igb_txrx_ring_disable(adapter, qid);
117*80f6ccf9SSriram Yagnaraman 
118*80f6ccf9SSriram Yagnaraman 	if (if_running) {
119*80f6ccf9SSriram Yagnaraman 		err = igb_realloc_rx_buffer_info(rx_ring, true);
120*80f6ccf9SSriram Yagnaraman 		if (!err) {
121*80f6ccf9SSriram Yagnaraman 			igb_txrx_ring_enable(adapter, qid);
122*80f6ccf9SSriram Yagnaraman 			/* Kick start the NAPI context so that receiving will start */
123*80f6ccf9SSriram Yagnaraman 			err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
124*80f6ccf9SSriram Yagnaraman 		}
125*80f6ccf9SSriram Yagnaraman 
126*80f6ccf9SSriram Yagnaraman 		if (err) {
127*80f6ccf9SSriram Yagnaraman 			xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
128*80f6ccf9SSriram Yagnaraman 			return err;
129*80f6ccf9SSriram Yagnaraman 		}
130*80f6ccf9SSriram Yagnaraman 	}
131*80f6ccf9SSriram Yagnaraman 
132*80f6ccf9SSriram Yagnaraman 	return 0;
133*80f6ccf9SSriram Yagnaraman }
134*80f6ccf9SSriram Yagnaraman 
135*80f6ccf9SSriram Yagnaraman static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
136*80f6ccf9SSriram Yagnaraman {
137*80f6ccf9SSriram Yagnaraman 	struct xsk_buff_pool *pool;
138*80f6ccf9SSriram Yagnaraman 	struct igb_ring *rx_ring;
139*80f6ccf9SSriram Yagnaraman 	bool if_running;
140*80f6ccf9SSriram Yagnaraman 	int err;
141*80f6ccf9SSriram Yagnaraman 
142*80f6ccf9SSriram Yagnaraman 	pool = xsk_get_pool_from_qid(adapter->netdev, qid);
143*80f6ccf9SSriram Yagnaraman 	if (!pool)
144*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
145*80f6ccf9SSriram Yagnaraman 
146*80f6ccf9SSriram Yagnaraman 	rx_ring = adapter->rx_ring[qid];
147*80f6ccf9SSriram Yagnaraman 	if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
148*80f6ccf9SSriram Yagnaraman 	if (if_running)
149*80f6ccf9SSriram Yagnaraman 		igb_txrx_ring_disable(adapter, qid);
150*80f6ccf9SSriram Yagnaraman 
151*80f6ccf9SSriram Yagnaraman 	xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
152*80f6ccf9SSriram Yagnaraman 
153*80f6ccf9SSriram Yagnaraman 	if (if_running) {
154*80f6ccf9SSriram Yagnaraman 		err = igb_realloc_rx_buffer_info(rx_ring, false);
155*80f6ccf9SSriram Yagnaraman 		if (err)
156*80f6ccf9SSriram Yagnaraman 			return err;
157*80f6ccf9SSriram Yagnaraman 
158*80f6ccf9SSriram Yagnaraman 		igb_txrx_ring_enable(adapter, qid);
159*80f6ccf9SSriram Yagnaraman 	}
160*80f6ccf9SSriram Yagnaraman 
161*80f6ccf9SSriram Yagnaraman 	return 0;
162*80f6ccf9SSriram Yagnaraman }
163*80f6ccf9SSriram Yagnaraman 
164*80f6ccf9SSriram Yagnaraman int igb_xsk_pool_setup(struct igb_adapter *adapter,
165*80f6ccf9SSriram Yagnaraman 		       struct xsk_buff_pool *pool,
166*80f6ccf9SSriram Yagnaraman 		       u16 qid)
167*80f6ccf9SSriram Yagnaraman {
168*80f6ccf9SSriram Yagnaraman 	return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
169*80f6ccf9SSriram Yagnaraman 		igb_xsk_pool_disable(adapter, qid);
170*80f6ccf9SSriram Yagnaraman }
171*80f6ccf9SSriram Yagnaraman 
172*80f6ccf9SSriram Yagnaraman int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
173*80f6ccf9SSriram Yagnaraman {
174*80f6ccf9SSriram Yagnaraman 	struct igb_adapter *adapter = netdev_priv(dev);
175*80f6ccf9SSriram Yagnaraman 	struct e1000_hw *hw = &adapter->hw;
176*80f6ccf9SSriram Yagnaraman 	struct igb_ring *ring;
177*80f6ccf9SSriram Yagnaraman 	u32 eics = 0;
178*80f6ccf9SSriram Yagnaraman 
179*80f6ccf9SSriram Yagnaraman 	if (test_bit(__IGB_DOWN, &adapter->state))
180*80f6ccf9SSriram Yagnaraman 		return -ENETDOWN;
181*80f6ccf9SSriram Yagnaraman 
182*80f6ccf9SSriram Yagnaraman 	if (!igb_xdp_is_enabled(adapter))
183*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
184*80f6ccf9SSriram Yagnaraman 
185*80f6ccf9SSriram Yagnaraman 	if (qid >= adapter->num_tx_queues)
186*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
187*80f6ccf9SSriram Yagnaraman 
188*80f6ccf9SSriram Yagnaraman 	ring = adapter->tx_ring[qid];
189*80f6ccf9SSriram Yagnaraman 
190*80f6ccf9SSriram Yagnaraman 	if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
191*80f6ccf9SSriram Yagnaraman 		return -ENETDOWN;
192*80f6ccf9SSriram Yagnaraman 
193*80f6ccf9SSriram Yagnaraman 	if (!READ_ONCE(ring->xsk_pool))
194*80f6ccf9SSriram Yagnaraman 		return -EINVAL;
195*80f6ccf9SSriram Yagnaraman 
196*80f6ccf9SSriram Yagnaraman 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
197*80f6ccf9SSriram Yagnaraman 		/* Cause software interrupt */
198*80f6ccf9SSriram Yagnaraman 		if (adapter->flags & IGB_FLAG_HAS_MSIX) {
199*80f6ccf9SSriram Yagnaraman 			eics |= ring->q_vector->eims_value;
200*80f6ccf9SSriram Yagnaraman 			wr32(E1000_EICS, eics);
201*80f6ccf9SSriram Yagnaraman 		} else {
202*80f6ccf9SSriram Yagnaraman 			wr32(E1000_ICS, E1000_ICS_RXDMT0);
203*80f6ccf9SSriram Yagnaraman 		}
204*80f6ccf9SSriram Yagnaraman 	}
205*80f6ccf9SSriram Yagnaraman 
206*80f6ccf9SSriram Yagnaraman 	return 0;
207*80f6ccf9SSriram Yagnaraman }
208