xref: /linux/include/net/xsk_buff_pool.h (revision 79ac11393328fb1717d17c12e3c0eef0e9fa0647)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
6 
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bpf.h>
11 #include <net/xdp.h>
12 
13 struct xsk_buff_pool;
14 struct xdp_rxq_info;
15 struct xsk_queue;
16 struct xdp_desc;
17 struct xdp_umem;
18 struct xdp_sock;
19 struct device;
20 struct page;
21 
22 #define XSK_PRIV_MAX 24
23 
24 struct xdp_buff_xsk {
25 	struct xdp_buff xdp;
26 	u8 cb[XSK_PRIV_MAX];
27 	dma_addr_t dma;
28 	dma_addr_t frame_dma;
29 	struct xsk_buff_pool *pool;
30 	u64 orig_addr;
31 	struct list_head free_list_node;
32 	struct list_head xskb_list_node;
33 };
34 
35 #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
36 #define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
37 
38 struct xsk_dma_map {
39 	dma_addr_t *dma_pages;
40 	struct device *dev;
41 	struct net_device *netdev;
42 	refcount_t users;
43 	struct list_head list; /* Protected by the RTNL_LOCK */
44 	u32 dma_pages_cnt;
45 	bool dma_need_sync;
46 };
47 
48 struct xsk_buff_pool {
49 	/* Members only used in the control path first. */
50 	struct device *dev;
51 	struct net_device *netdev;
52 	struct list_head xsk_tx_list;
53 	/* Protects modifications to the xsk_tx_list */
54 	spinlock_t xsk_tx_list_lock;
55 	refcount_t users;
56 	struct xdp_umem *umem;
57 	struct work_struct work;
58 	struct list_head free_list;
59 	struct list_head xskb_list;
60 	u32 heads_cnt;
61 	u16 queue_id;
62 
63 	/* Data path members as close to free_heads at the end as possible. */
64 	struct xsk_queue *fq ____cacheline_aligned_in_smp;
65 	struct xsk_queue *cq;
66 	/* For performance reasons, each buff pool has its own array of dma_pages
67 	 * even when they are identical.
68 	 */
69 	dma_addr_t *dma_pages;
70 	struct xdp_buff_xsk *heads;
71 	struct xdp_desc *tx_descs;
72 	u64 chunk_mask;
73 	u64 addrs_cnt;
74 	u32 free_list_cnt;
75 	u32 dma_pages_cnt;
76 	u32 free_heads_cnt;
77 	u32 headroom;
78 	u32 chunk_size;
79 	u32 chunk_shift;
80 	u32 frame_len;
81 	u8 tx_metadata_len; /* inherited from umem */
82 	u8 cached_need_wakeup;
83 	bool uses_need_wakeup;
84 	bool dma_need_sync;
85 	bool unaligned;
86 	bool tx_sw_csum;
87 	void *addrs;
88 	/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
89 	 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
90 	 * sockets share a single cq when the same netdev and queue id is shared.
91 	 */
92 	spinlock_t cq_lock;
93 	struct xdp_buff_xsk *free_heads[];
94 };
95 
96 /* Masks for xdp_umem_page flags.
97  * The low 12-bits of the addr will be 0 since this is the page address, so we
98  * can use them for flags.
99  */
100 #define XSK_NEXT_PG_CONTIG_SHIFT 0
101 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
102 
103 /* AF_XDP core. */
104 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
105 						struct xdp_umem *umem);
106 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
107 		  u16 queue_id, u16 flags);
108 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
109 			 struct net_device *dev, u16 queue_id);
110 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
111 void xp_destroy(struct xsk_buff_pool *pool);
112 void xp_get_pool(struct xsk_buff_pool *pool);
113 bool xp_put_pool(struct xsk_buff_pool *pool);
114 void xp_clear_dev(struct xsk_buff_pool *pool);
115 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
116 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
117 
118 /* AF_XDP, and XDP core. */
119 void xp_free(struct xdp_buff_xsk *xskb);
120 
121 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
122 				     u64 addr)
123 {
124 	xskb->orig_addr = addr;
125 	xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
126 }
127 
128 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
129 				    dma_addr_t *dma_pages, u64 addr)
130 {
131 	xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
132 		(addr & ~PAGE_MASK);
133 	xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
134 }
135 
136 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
137 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
138 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
139 	       unsigned long attrs, struct page **pages, u32 nr_pages);
140 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
141 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
142 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
143 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
144 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
145 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
146 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
147 {
148 	return xskb->dma;
149 }
150 
151 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
152 {
153 	return xskb->frame_dma;
154 }
155 
156 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
157 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
158 {
159 	xp_dma_sync_for_cpu_slow(xskb);
160 }
161 
162 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
163 				 size_t size);
164 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
165 					  dma_addr_t dma, size_t size)
166 {
167 	if (!pool->dma_need_sync)
168 		return;
169 
170 	xp_dma_sync_for_device_slow(pool, dma, size);
171 }
172 
173 /* Masks for xdp_umem_page flags.
174  * The low 12-bits of the addr will be 0 since this is the page address, so we
175  * can use them for flags.
176  */
177 #define XSK_NEXT_PG_CONTIG_SHIFT 0
178 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
179 
180 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
181 						 u64 addr, u32 len)
182 {
183 	bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
184 
185 	if (likely(!cross_pg))
186 		return false;
187 
188 	return pool->dma_pages &&
189 	       !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
190 }
191 
192 static inline bool xp_mb_desc(struct xdp_desc *desc)
193 {
194 	return desc->options & XDP_PKT_CONTD;
195 }
196 
197 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
198 {
199 	return addr & pool->chunk_mask;
200 }
201 
202 static inline u64 xp_unaligned_extract_addr(u64 addr)
203 {
204 	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
205 }
206 
207 static inline u64 xp_unaligned_extract_offset(u64 addr)
208 {
209 	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
210 }
211 
212 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
213 {
214 	return xp_unaligned_extract_addr(addr) +
215 		xp_unaligned_extract_offset(addr);
216 }
217 
218 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
219 {
220 	return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
221 }
222 
223 static inline void xp_release(struct xdp_buff_xsk *xskb)
224 {
225 	if (xskb->pool->unaligned)
226 		xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
227 }
228 
229 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
230 {
231 	u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
232 
233 	offset += xskb->pool->headroom;
234 	if (!xskb->pool->unaligned)
235 		return xskb->orig_addr + offset;
236 	return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
237 }
238 
239 static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
240 {
241 	return pool->tx_metadata_len > 0;
242 }
243 
244 #endif /* XSK_BUFF_POOL_H_ */
245