xref: /linux/net/xdp/xdp_umem.c (revision 217188d9f985bd3ce8c9a0cd50ca35be62d0f8c6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 
17 #include "xdp_umem.h"
18 #include "xsk_queue.h"
19 
20 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
21 
22 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
23 {
24 	unsigned long flags;
25 
26 	spin_lock_irqsave(&umem->xsk_list_lock, flags);
27 	list_add_rcu(&xs->list, &umem->xsk_list);
28 	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
29 }
30 
31 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
32 {
33 	unsigned long flags;
34 
35 	if (xs->dev) {
36 		spin_lock_irqsave(&umem->xsk_list_lock, flags);
37 		list_del_rcu(&xs->list);
38 		spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
39 
40 		if (umem->zc)
41 			synchronize_net();
42 	}
43 }
44 
45 int xdp_umem_query(struct net_device *dev, u16 queue_id)
46 {
47 	struct netdev_bpf bpf;
48 
49 	ASSERT_RTNL();
50 
51 	memset(&bpf, 0, sizeof(bpf));
52 	bpf.command = XDP_QUERY_XSK_UMEM;
53 	bpf.xsk.queue_id = queue_id;
54 
55 	if (!dev->netdev_ops->ndo_bpf)
56 		return 0;
57 	return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
58 }
59 
60 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
61 			u32 queue_id, u16 flags)
62 {
63 	bool force_zc, force_copy;
64 	struct netdev_bpf bpf;
65 	int err;
66 
67 	force_zc = flags & XDP_ZEROCOPY;
68 	force_copy = flags & XDP_COPY;
69 
70 	if (force_zc && force_copy)
71 		return -EINVAL;
72 
73 	if (force_copy)
74 		return 0;
75 
76 	if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
77 		return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
78 
79 	bpf.command = XDP_QUERY_XSK_UMEM;
80 
81 	rtnl_lock();
82 	err = xdp_umem_query(dev, queue_id);
83 	if (err) {
84 		err = err < 0 ? -EOPNOTSUPP : -EBUSY;
85 		goto err_rtnl_unlock;
86 	}
87 
88 	bpf.command = XDP_SETUP_XSK_UMEM;
89 	bpf.xsk.umem = umem;
90 	bpf.xsk.queue_id = queue_id;
91 
92 	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
93 	if (err)
94 		goto err_rtnl_unlock;
95 	rtnl_unlock();
96 
97 	dev_hold(dev);
98 	umem->dev = dev;
99 	umem->queue_id = queue_id;
100 	umem->zc = true;
101 	return 0;
102 
103 err_rtnl_unlock:
104 	rtnl_unlock();
105 	return force_zc ? err : 0; /* fail or fallback */
106 }
107 
108 static void xdp_umem_clear_dev(struct xdp_umem *umem)
109 {
110 	struct netdev_bpf bpf;
111 	int err;
112 
113 	if (umem->dev) {
114 		bpf.command = XDP_SETUP_XSK_UMEM;
115 		bpf.xsk.umem = NULL;
116 		bpf.xsk.queue_id = umem->queue_id;
117 
118 		rtnl_lock();
119 		err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
120 		rtnl_unlock();
121 
122 		if (err)
123 			WARN(1, "failed to disable umem!\n");
124 
125 		dev_put(umem->dev);
126 		umem->dev = NULL;
127 	}
128 }
129 
130 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
131 {
132 	unsigned int i;
133 
134 	for (i = 0; i < umem->npgs; i++) {
135 		struct page *page = umem->pgs[i];
136 
137 		set_page_dirty_lock(page);
138 		put_page(page);
139 	}
140 
141 	kfree(umem->pgs);
142 	umem->pgs = NULL;
143 }
144 
145 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
146 {
147 	if (umem->user) {
148 		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
149 		free_uid(umem->user);
150 	}
151 }
152 
153 static void xdp_umem_release(struct xdp_umem *umem)
154 {
155 	struct task_struct *task;
156 	struct mm_struct *mm;
157 
158 	xdp_umem_clear_dev(umem);
159 
160 	if (umem->fq) {
161 		xskq_destroy(umem->fq);
162 		umem->fq = NULL;
163 	}
164 
165 	if (umem->cq) {
166 		xskq_destroy(umem->cq);
167 		umem->cq = NULL;
168 	}
169 
170 	xdp_umem_unpin_pages(umem);
171 
172 	task = get_pid_task(umem->pid, PIDTYPE_PID);
173 	put_pid(umem->pid);
174 	if (!task)
175 		goto out;
176 	mm = get_task_mm(task);
177 	put_task_struct(task);
178 	if (!mm)
179 		goto out;
180 
181 	mmput(mm);
182 	kfree(umem->pages);
183 	umem->pages = NULL;
184 
185 	xdp_umem_unaccount_pages(umem);
186 out:
187 	kfree(umem);
188 }
189 
190 static void xdp_umem_release_deferred(struct work_struct *work)
191 {
192 	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
193 
194 	xdp_umem_release(umem);
195 }
196 
197 void xdp_get_umem(struct xdp_umem *umem)
198 {
199 	refcount_inc(&umem->users);
200 }
201 
202 void xdp_put_umem(struct xdp_umem *umem)
203 {
204 	if (!umem)
205 		return;
206 
207 	if (refcount_dec_and_test(&umem->users)) {
208 		INIT_WORK(&umem->work, xdp_umem_release_deferred);
209 		schedule_work(&umem->work);
210 	}
211 }
212 
213 static int xdp_umem_pin_pages(struct xdp_umem *umem)
214 {
215 	unsigned int gup_flags = FOLL_WRITE;
216 	long npgs;
217 	int err;
218 
219 	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
220 			    GFP_KERNEL | __GFP_NOWARN);
221 	if (!umem->pgs)
222 		return -ENOMEM;
223 
224 	down_write(&current->mm->mmap_sem);
225 	npgs = get_user_pages(umem->address, umem->npgs,
226 			      gup_flags, &umem->pgs[0], NULL);
227 	up_write(&current->mm->mmap_sem);
228 
229 	if (npgs != umem->npgs) {
230 		if (npgs >= 0) {
231 			umem->npgs = npgs;
232 			err = -ENOMEM;
233 			goto out_pin;
234 		}
235 		err = npgs;
236 		goto out_pgs;
237 	}
238 	return 0;
239 
240 out_pin:
241 	xdp_umem_unpin_pages(umem);
242 out_pgs:
243 	kfree(umem->pgs);
244 	umem->pgs = NULL;
245 	return err;
246 }
247 
248 static int xdp_umem_account_pages(struct xdp_umem *umem)
249 {
250 	unsigned long lock_limit, new_npgs, old_npgs;
251 
252 	if (capable(CAP_IPC_LOCK))
253 		return 0;
254 
255 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
256 	umem->user = get_uid(current_user());
257 
258 	do {
259 		old_npgs = atomic_long_read(&umem->user->locked_vm);
260 		new_npgs = old_npgs + umem->npgs;
261 		if (new_npgs > lock_limit) {
262 			free_uid(umem->user);
263 			umem->user = NULL;
264 			return -ENOBUFS;
265 		}
266 	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
267 				     new_npgs) != old_npgs);
268 	return 0;
269 }
270 
271 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
272 {
273 	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
274 	unsigned int chunks, chunks_per_page;
275 	u64 addr = mr->addr, size = mr->len;
276 	int size_chk, err, i;
277 
278 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
279 		/* Strictly speaking we could support this, if:
280 		 * - huge pages, or*
281 		 * - using an IOMMU, or
282 		 * - making sure the memory area is consecutive
283 		 * but for now, we simply say "computer says no".
284 		 */
285 		return -EINVAL;
286 	}
287 
288 	if (!is_power_of_2(chunk_size))
289 		return -EINVAL;
290 
291 	if (!PAGE_ALIGNED(addr)) {
292 		/* Memory area has to be page size aligned. For
293 		 * simplicity, this might change.
294 		 */
295 		return -EINVAL;
296 	}
297 
298 	if ((addr + size) < addr)
299 		return -EINVAL;
300 
301 	chunks = (unsigned int)div_u64(size, chunk_size);
302 	if (chunks == 0)
303 		return -EINVAL;
304 
305 	chunks_per_page = PAGE_SIZE / chunk_size;
306 	if (chunks < chunks_per_page || chunks % chunks_per_page)
307 		return -EINVAL;
308 
309 	headroom = ALIGN(headroom, 64);
310 
311 	size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
312 	if (size_chk < 0)
313 		return -EINVAL;
314 
315 	umem->pid = get_task_pid(current, PIDTYPE_PID);
316 	umem->address = (unsigned long)addr;
317 	umem->props.chunk_mask = ~((u64)chunk_size - 1);
318 	umem->props.size = size;
319 	umem->headroom = headroom;
320 	umem->chunk_size_nohr = chunk_size - headroom;
321 	umem->npgs = size / PAGE_SIZE;
322 	umem->pgs = NULL;
323 	umem->user = NULL;
324 	INIT_LIST_HEAD(&umem->xsk_list);
325 	spin_lock_init(&umem->xsk_list_lock);
326 
327 	refcount_set(&umem->users, 1);
328 
329 	err = xdp_umem_account_pages(umem);
330 	if (err)
331 		goto out;
332 
333 	err = xdp_umem_pin_pages(umem);
334 	if (err)
335 		goto out_account;
336 
337 	umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
338 	if (!umem->pages) {
339 		err = -ENOMEM;
340 		goto out_account;
341 	}
342 
343 	for (i = 0; i < umem->npgs; i++)
344 		umem->pages[i].addr = page_address(umem->pgs[i]);
345 
346 	return 0;
347 
348 out_account:
349 	xdp_umem_unaccount_pages(umem);
350 out:
351 	put_pid(umem->pid);
352 	return err;
353 }
354 
355 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
356 {
357 	struct xdp_umem *umem;
358 	int err;
359 
360 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
361 	if (!umem)
362 		return ERR_PTR(-ENOMEM);
363 
364 	err = xdp_umem_reg(umem, mr);
365 	if (err) {
366 		kfree(umem);
367 		return ERR_PTR(err);
368 	}
369 
370 	return umem;
371 }
372 
373 bool xdp_umem_validate_queues(struct xdp_umem *umem)
374 {
375 	return umem->fq && umem->cq;
376 }
377