xref: /linux/net/xdp/xdp_umem.c (revision 10a708c24a31ae1be1ea23d1c38da2691d1fd65c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/highmem.h>
18 
19 #include "xdp_umem.h"
20 #include "xsk_queue.h"
21 
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
23 
24 static DEFINE_IDA(umem_ida);
25 
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
27 {
28 	unsigned long flags;
29 
30 	spin_lock_irqsave(&umem->xsk_list_lock, flags);
31 	list_add_rcu(&xs->list, &umem->xsk_list);
32 	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
33 }
34 
35 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
36 {
37 	unsigned long flags;
38 
39 	spin_lock_irqsave(&umem->xsk_list_lock, flags);
40 	list_del_rcu(&xs->list);
41 	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
42 }
43 
44 /* The umem is stored both in the _rx struct and the _tx struct as we do
45  * not know if the device has more tx queues than rx, or the opposite.
46  * This might also change during run time.
47  */
48 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
49 			       u16 queue_id)
50 {
51 	if (queue_id >= max_t(unsigned int,
52 			      dev->real_num_rx_queues,
53 			      dev->real_num_tx_queues))
54 		return -EINVAL;
55 
56 	if (queue_id < dev->real_num_rx_queues)
57 		dev->_rx[queue_id].umem = umem;
58 	if (queue_id < dev->real_num_tx_queues)
59 		dev->_tx[queue_id].umem = umem;
60 
61 	return 0;
62 }
63 
64 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
65 				       u16 queue_id)
66 {
67 	if (queue_id < dev->real_num_rx_queues)
68 		return dev->_rx[queue_id].umem;
69 	if (queue_id < dev->real_num_tx_queues)
70 		return dev->_tx[queue_id].umem;
71 
72 	return NULL;
73 }
74 EXPORT_SYMBOL(xdp_get_umem_from_qid);
75 
76 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
77 {
78 	if (queue_id < dev->real_num_rx_queues)
79 		dev->_rx[queue_id].umem = NULL;
80 	if (queue_id < dev->real_num_tx_queues)
81 		dev->_tx[queue_id].umem = NULL;
82 }
83 
84 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
85 			u16 queue_id, u16 flags)
86 {
87 	bool force_zc, force_copy;
88 	struct netdev_bpf bpf;
89 	int err = 0;
90 
91 	ASSERT_RTNL();
92 
93 	force_zc = flags & XDP_ZEROCOPY;
94 	force_copy = flags & XDP_COPY;
95 
96 	if (force_zc && force_copy)
97 		return -EINVAL;
98 
99 	if (xdp_get_umem_from_qid(dev, queue_id))
100 		return -EBUSY;
101 
102 	err = xdp_reg_umem_at_qid(dev, umem, queue_id);
103 	if (err)
104 		return err;
105 
106 	umem->dev = dev;
107 	umem->queue_id = queue_id;
108 
109 	dev_hold(dev);
110 
111 	if (force_copy)
112 		/* For copy-mode, we are done. */
113 		return 0;
114 
115 	if (!dev->netdev_ops->ndo_bpf ||
116 	    !dev->netdev_ops->ndo_xsk_async_xmit) {
117 		err = -EOPNOTSUPP;
118 		goto err_unreg_umem;
119 	}
120 
121 	bpf.command = XDP_SETUP_XSK_UMEM;
122 	bpf.xsk.umem = umem;
123 	bpf.xsk.queue_id = queue_id;
124 
125 	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
126 	if (err)
127 		goto err_unreg_umem;
128 
129 	umem->zc = true;
130 	return 0;
131 
132 err_unreg_umem:
133 	if (!force_zc)
134 		err = 0; /* fallback to copy mode */
135 	if (err)
136 		xdp_clear_umem_at_qid(dev, queue_id);
137 	return err;
138 }
139 
140 void xdp_umem_clear_dev(struct xdp_umem *umem)
141 {
142 	struct netdev_bpf bpf;
143 	int err;
144 
145 	ASSERT_RTNL();
146 
147 	if (!umem->dev)
148 		return;
149 
150 	if (umem->zc) {
151 		bpf.command = XDP_SETUP_XSK_UMEM;
152 		bpf.xsk.umem = NULL;
153 		bpf.xsk.queue_id = umem->queue_id;
154 
155 		err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
156 
157 		if (err)
158 			WARN(1, "failed to disable umem!\n");
159 	}
160 
161 	xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
162 
163 	dev_put(umem->dev);
164 	umem->dev = NULL;
165 	umem->zc = false;
166 }
167 
168 static void xdp_umem_unmap_pages(struct xdp_umem *umem)
169 {
170 	unsigned int i;
171 
172 	for (i = 0; i < umem->npgs; i++)
173 		kunmap(umem->pgs[i]);
174 }
175 
176 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
177 {
178 	unsigned int i;
179 
180 	for (i = 0; i < umem->npgs; i++) {
181 		struct page *page = umem->pgs[i];
182 
183 		set_page_dirty_lock(page);
184 		put_page(page);
185 	}
186 
187 	kfree(umem->pgs);
188 	umem->pgs = NULL;
189 }
190 
191 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
192 {
193 	if (umem->user) {
194 		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
195 		free_uid(umem->user);
196 	}
197 }
198 
199 static void xdp_umem_release(struct xdp_umem *umem)
200 {
201 	rtnl_lock();
202 	xdp_umem_clear_dev(umem);
203 	rtnl_unlock();
204 
205 	ida_simple_remove(&umem_ida, umem->id);
206 
207 	if (umem->fq) {
208 		xskq_destroy(umem->fq);
209 		umem->fq = NULL;
210 	}
211 
212 	if (umem->cq) {
213 		xskq_destroy(umem->cq);
214 		umem->cq = NULL;
215 	}
216 
217 	xsk_reuseq_destroy(umem);
218 
219 	xdp_umem_unmap_pages(umem);
220 	xdp_umem_unpin_pages(umem);
221 
222 	kfree(umem->pages);
223 	umem->pages = NULL;
224 
225 	xdp_umem_unaccount_pages(umem);
226 	kfree(umem);
227 }
228 
229 static void xdp_umem_release_deferred(struct work_struct *work)
230 {
231 	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
232 
233 	xdp_umem_release(umem);
234 }
235 
236 void xdp_get_umem(struct xdp_umem *umem)
237 {
238 	refcount_inc(&umem->users);
239 }
240 
241 void xdp_put_umem(struct xdp_umem *umem)
242 {
243 	if (!umem)
244 		return;
245 
246 	if (refcount_dec_and_test(&umem->users)) {
247 		INIT_WORK(&umem->work, xdp_umem_release_deferred);
248 		schedule_work(&umem->work);
249 	}
250 }
251 
252 static int xdp_umem_pin_pages(struct xdp_umem *umem)
253 {
254 	unsigned int gup_flags = FOLL_WRITE;
255 	long npgs;
256 	int err;
257 
258 	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
259 			    GFP_KERNEL | __GFP_NOWARN);
260 	if (!umem->pgs)
261 		return -ENOMEM;
262 
263 	down_read(&current->mm->mmap_sem);
264 	npgs = get_user_pages(umem->address, umem->npgs,
265 			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
266 	up_read(&current->mm->mmap_sem);
267 
268 	if (npgs != umem->npgs) {
269 		if (npgs >= 0) {
270 			umem->npgs = npgs;
271 			err = -ENOMEM;
272 			goto out_pin;
273 		}
274 		err = npgs;
275 		goto out_pgs;
276 	}
277 	return 0;
278 
279 out_pin:
280 	xdp_umem_unpin_pages(umem);
281 out_pgs:
282 	kfree(umem->pgs);
283 	umem->pgs = NULL;
284 	return err;
285 }
286 
287 static int xdp_umem_account_pages(struct xdp_umem *umem)
288 {
289 	unsigned long lock_limit, new_npgs, old_npgs;
290 
291 	if (capable(CAP_IPC_LOCK))
292 		return 0;
293 
294 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
295 	umem->user = get_uid(current_user());
296 
297 	do {
298 		old_npgs = atomic_long_read(&umem->user->locked_vm);
299 		new_npgs = old_npgs + umem->npgs;
300 		if (new_npgs > lock_limit) {
301 			free_uid(umem->user);
302 			umem->user = NULL;
303 			return -ENOBUFS;
304 		}
305 	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
306 				     new_npgs) != old_npgs);
307 	return 0;
308 }
309 
310 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
311 {
312 	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
313 	unsigned int chunks, chunks_per_page;
314 	u64 addr = mr->addr, size = mr->len;
315 	int size_chk, err, i;
316 
317 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
318 		/* Strictly speaking we could support this, if:
319 		 * - huge pages, or*
320 		 * - using an IOMMU, or
321 		 * - making sure the memory area is consecutive
322 		 * but for now, we simply say "computer says no".
323 		 */
324 		return -EINVAL;
325 	}
326 
327 	if (!is_power_of_2(chunk_size))
328 		return -EINVAL;
329 
330 	if (!PAGE_ALIGNED(addr)) {
331 		/* Memory area has to be page size aligned. For
332 		 * simplicity, this might change.
333 		 */
334 		return -EINVAL;
335 	}
336 
337 	if ((addr + size) < addr)
338 		return -EINVAL;
339 
340 	chunks = (unsigned int)div_u64(size, chunk_size);
341 	if (chunks == 0)
342 		return -EINVAL;
343 
344 	chunks_per_page = PAGE_SIZE / chunk_size;
345 	if (chunks < chunks_per_page || chunks % chunks_per_page)
346 		return -EINVAL;
347 
348 	headroom = ALIGN(headroom, 64);
349 
350 	size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
351 	if (size_chk < 0)
352 		return -EINVAL;
353 
354 	umem->address = (unsigned long)addr;
355 	umem->chunk_mask = ~((u64)chunk_size - 1);
356 	umem->size = size;
357 	umem->headroom = headroom;
358 	umem->chunk_size_nohr = chunk_size - headroom;
359 	umem->npgs = size / PAGE_SIZE;
360 	umem->pgs = NULL;
361 	umem->user = NULL;
362 	INIT_LIST_HEAD(&umem->xsk_list);
363 	spin_lock_init(&umem->xsk_list_lock);
364 
365 	refcount_set(&umem->users, 1);
366 
367 	err = xdp_umem_account_pages(umem);
368 	if (err)
369 		return err;
370 
371 	err = xdp_umem_pin_pages(umem);
372 	if (err)
373 		goto out_account;
374 
375 	umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
376 	if (!umem->pages) {
377 		err = -ENOMEM;
378 		goto out_account;
379 	}
380 
381 	for (i = 0; i < umem->npgs; i++)
382 		umem->pages[i].addr = kmap(umem->pgs[i]);
383 
384 	return 0;
385 
386 out_account:
387 	xdp_umem_unaccount_pages(umem);
388 	return err;
389 }
390 
391 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
392 {
393 	struct xdp_umem *umem;
394 	int err;
395 
396 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
397 	if (!umem)
398 		return ERR_PTR(-ENOMEM);
399 
400 	err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
401 	if (err < 0) {
402 		kfree(umem);
403 		return ERR_PTR(err);
404 	}
405 	umem->id = err;
406 
407 	err = xdp_umem_reg(umem, mr);
408 	if (err) {
409 		ida_simple_remove(&umem_ida, umem->id);
410 		kfree(umem);
411 		return ERR_PTR(err);
412 	}
413 
414 	return umem;
415 }
416 
417 bool xdp_umem_validate_queues(struct xdp_umem *umem)
418 {
419 	return umem->fq && umem->cq;
420 }
421