xref: /freebsd/sys/net/bpf_zerocopy.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*-
2  * Copyright (c) 2007 Seccuris Inc.
3  * All rights reserved.
4  *
5  * This sofware was developed by Robert N. M. Watson under contract to
6  * Seccuris Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_bpf.h"
34 
35 #include <sys/param.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/sf_buf.h>
42 #include <sys/socket.h>
43 #include <sys/uio.h>
44 
45 #include <machine/atomic.h>
46 
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/bpf_jitter.h>
50 #include <net/bpf_zerocopy.h>
51 #include <net/bpfdesc.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 
59 /*
60  * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
61  * are mapped into the kernel address space using sf_bufs and used directly
62  * by BPF.  Memory is wired since page faults cannot be tolerated in the
63  * contexts where the buffers are copied to (locks held, interrupt context,
64  * etc).  Access to shared memory buffers is synchronized using a header on
65  * each buffer, allowing the number of system calls to go to zero as BPF
66  * reaches saturation (buffers filled as fast as they can be drained by the
67  * user process).  Full details of the protocol for communicating between the
68  * user process and BPF may be found in bpf(4).
69  */
70 
71 /*
72  * Maximum number of pages per buffer.  Since all BPF devices use two, the
73  * maximum per device is 2*BPF_MAX_PAGES.  Resource limits on the number of
74  * sf_bufs may be an issue, so do not set this too high.  On older systems,
75  * kernel address space limits may also be an issue.
76  */
77 #define	BPF_MAX_PAGES	512
78 
79 /*
80  * struct zbuf describes a memory buffer loaned by a user process to the
81  * kernel.  We represent this as a series of pages managed using an array of
82  * sf_bufs.  Even though the memory is contiguous in user space, it may not
83  * be mapped contiguously in the kernel (i.e., a set of physically
84  * non-contiguous pages in the direct map region) so we must implement
85  * scatter-gather copying.  One significant mitigating factor is that on
86  * systems with a direct memory map, we can avoid TLB misses.
87  *
88  * At the front of the shared memory region is a bpf_zbuf_header, which
89  * contains shared control data to allow user space and the kernel to
90  * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
91  * knows that the space is not available.
92  */
93 struct zbuf {
94 	vm_offset_t	 zb_uaddr;	/* User address, may be stale. */
95 	size_t		 zb_size;	/* Size of buffer, incl. header. */
96 	u_int		 zb_numpages;	/* Number of pages. */
97 	int		 zb_flags;	/* Flags on zbuf. */
98 	struct sf_buf	**zb_pages;	/* Pages themselves. */
99 	struct bpf_zbuf_header	*zb_header;	/* Shared header. */
100 };
101 
102 /*
103  * When a buffer has been assigned to userspace, flag it as such, as the
104  * buffer may remain in the store position as a result of the user process
105  * not yet having acknowledged the buffer in the hold position yet.
106  */
107 #define	ZBUF_FLAG_IMMUTABLE	0x00000001	/* Set when owned by user. */
108 
109 /*
110  * Release a page we've previously wired.
111  */
112 static void
113 zbuf_page_free(vm_page_t pp)
114 {
115 
116 	vm_page_lock_queues();
117 	vm_page_unwire(pp, 0);
118 	if (pp->wire_count == 0 && pp->object == NULL)
119 		vm_page_free(pp);
120 	vm_page_unlock_queues();
121 }
122 
123 /*
124  * Free an sf_buf with attached page.
125  */
126 static void
127 zbuf_sfbuf_free(struct sf_buf *sf)
128 {
129 	vm_page_t pp;
130 
131 	pp = sf_buf_page(sf);
132 	sf_buf_free(sf);
133 	zbuf_page_free(pp);
134 }
135 
136 /*
137  * Free a zbuf, including its page array, sbufs, and pages.  Allow partially
138  * allocated zbufs to be freed so that it may be used even during a zbuf
139  * setup.
140  */
141 static void
142 zbuf_free(struct zbuf *zb)
143 {
144 	int i;
145 
146 	for (i = 0; i < zb->zb_numpages; i++) {
147 		if (zb->zb_pages[i] != NULL)
148 			zbuf_sfbuf_free(zb->zb_pages[i]);
149 	}
150 	free(zb->zb_pages, M_BPF);
151 	free(zb, M_BPF);
152 }
153 
154 /*
155  * Given a user pointer to a page of user memory, return an sf_buf for the
156  * page.  Because we may be requesting quite a few sf_bufs, prefer failure to
157  * deadlock and use SFB_NOWAIT.
158  */
159 static struct sf_buf *
160 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
161 {
162 	struct sf_buf *sf;
163 	vm_page_t pp;
164 
165 	if (vm_fault_quick((caddr_t) uaddr, VM_PROT_READ | VM_PROT_WRITE) <
166 	    0)
167 		return (NULL);
168 	pp = pmap_extract_and_hold(map->pmap, uaddr, VM_PROT_READ |
169 	    VM_PROT_WRITE);
170 	if (pp == NULL)
171 		return (NULL);
172 	vm_page_lock_queues();
173 	vm_page_wire(pp);
174 	vm_page_unhold(pp);
175 	vm_page_unlock_queues();
176 	sf = sf_buf_alloc(pp, SFB_NOWAIT);
177 	if (sf == NULL) {
178 		zbuf_page_free(pp);
179 		return (NULL);
180 	}
181 	return (sf);
182 }
183 
184 /*
185  * Create a zbuf describing a range of user address space memory.  Validate
186  * page alignment, size requirements, etc.
187  */
188 static int
189 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
190     struct zbuf **zbp)
191 {
192 	struct zbuf *zb;
193 	struct vm_map *map;
194 	int error, i;
195 
196 	*zbp = NULL;
197 
198 	/*
199 	 * User address must be page-aligned.
200 	 */
201 	if (uaddr & PAGE_MASK)
202 		return (EINVAL);
203 
204 	/*
205 	 * Length must be an integer number of full pages.
206 	 */
207 	if (len & PAGE_MASK)
208 		return (EINVAL);
209 
210 	/*
211 	 * Length must not exceed per-buffer resource limit.
212 	 */
213 	if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
214 		return (EINVAL);
215 
216 	/*
217 	 * Allocate the buffer and set up each page with is own sf_buf.
218 	 */
219 	error = 0;
220 	zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
221 	zb->zb_uaddr = uaddr;
222 	zb->zb_size = len;
223 	zb->zb_numpages = len / PAGE_SIZE;
224 	zb->zb_pages = malloc(sizeof(struct sf_buf *) *
225 	    zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
226 	map = &td->td_proc->p_vmspace->vm_map;
227 	for (i = 0; i < zb->zb_numpages; i++) {
228 		zb->zb_pages[i] = zbuf_sfbuf_get(map,
229 		    uaddr + (i * PAGE_SIZE));
230 		if (zb->zb_pages[i] == NULL) {
231 			error = EFAULT;
232 			goto error;
233 		}
234 	}
235 	zb->zb_header =
236 	    (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
237 	bzero(zb->zb_header, sizeof(*zb->zb_header));
238 	*zbp = zb;
239 	return (0);
240 
241 error:
242 	zbuf_free(zb);
243 	return (error);
244 }
245 
246 /*
247  * Copy bytes from a source into the specified zbuf.  The caller is
248  * responsible for performing bounds checking, etc.
249  */
250 void
251 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
252     void *src, u_int len)
253 {
254 	u_int count, page, poffset;
255 	u_char *src_bytes;
256 	struct zbuf *zb;
257 
258 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
259 	    ("bpf_zerocopy_append_bytes: not in zbuf mode"));
260 	KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
261 
262 	src_bytes = (u_char *)src;
263 	zb = (struct zbuf *)buf;
264 
265 	KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0,
266 	    ("bpf_zerocopy_append_bytes: ZBUF_FLAG_IMMUTABLE"));
267 
268 	/*
269 	 * Scatter-gather copy to user pages mapped into kernel address space
270 	 * using sf_bufs: copy up to a page at a time.
271 	 */
272 	offset += sizeof(struct bpf_zbuf_header);
273 	page = offset / PAGE_SIZE;
274 	poffset = offset % PAGE_SIZE;
275 	while (len > 0) {
276 		KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
277 		   " page overflow (%d p %d np)\n", page, zb->zb_numpages));
278 
279 		count = min(len, PAGE_SIZE - poffset);
280 		bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
281 		    poffset, count);
282 		poffset += count;
283 		if (poffset == PAGE_SIZE) {
284 			poffset = 0;
285 			page++;
286 		}
287 		KASSERT(poffset < PAGE_SIZE,
288 		    ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
289 		    poffset));
290 		len -= count;
291 		src_bytes += count;
292 	}
293 }
294 
295 /*
296  * Copy bytes from an mbuf chain to the specified zbuf: copying will be
297  * scatter-gather both from mbufs, which may be fragmented over memory, and
298  * to pages, which may not be contiguously mapped in kernel address space.
299  * As with bpf_zerocopy_append_bytes(), the caller is responsible for
300  * checking that this will not exceed the buffer limit.
301  */
302 void
303 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
304     void *src, u_int len)
305 {
306 	u_int count, moffset, page, poffset;
307 	const struct mbuf *m;
308 	struct zbuf *zb;
309 
310 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
311 	    ("bpf_zerocopy_append_mbuf not in zbuf mode"));
312 	KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
313 
314 	m = (struct mbuf *)src;
315 	zb = (struct zbuf *)buf;
316 
317 	KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0,
318 	    ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_IMMUTABLE"));
319 
320 	/*
321 	 * Scatter gather both from an mbuf chain and to a user page set
322 	 * mapped into kernel address space using sf_bufs.  If we're lucky,
323 	 * each mbuf requires one copy operation, but if page alignment and
324 	 * mbuf alignment work out less well, we'll be doing two copies per
325 	 * mbuf.
326 	 */
327 	offset += sizeof(struct bpf_zbuf_header);
328 	page = offset / PAGE_SIZE;
329 	poffset = offset % PAGE_SIZE;
330 	moffset = 0;
331 	while (len > 0) {
332 		KASSERT(page < zb->zb_numpages,
333 		    ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
334 		    "np)\n", page, zb->zb_numpages));
335 		KASSERT(m != NULL,
336 		    ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
337 
338 		count = min(m->m_len - moffset, len);
339 		count = min(count, PAGE_SIZE - poffset);
340 		bcopy(mtod(m, u_char *) + moffset,
341 		    ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
342 		    count);
343 		poffset += count;
344 		if (poffset == PAGE_SIZE) {
345 			poffset = 0;
346 			page++;
347 		}
348 		KASSERT(poffset < PAGE_SIZE,
349 		    ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
350 		    poffset));
351 		moffset += count;
352 		if (moffset == m->m_len) {
353 			m = m->m_next;
354 			moffset = 0;
355 		}
356 		len -= count;
357 	}
358 }
359 
360 /*
361  * Notification from the BPF framework that a buffer in the store position is
362  * rejecting packets and may be considered full.  We mark the buffer as
363  * immutable and assign to userspace so that it is immediately available for
364  * the user process to access.
365  */
366 void
367 bpf_zerocopy_buffull(struct bpf_d *d)
368 {
369 	struct zbuf *zb;
370 
371 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
372 	    ("bpf_zerocopy_buffull: not in zbuf mode"));
373 
374 	zb = (struct zbuf *)d->bd_sbuf;
375 	KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
376 
377 	if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) {
378 		zb->zb_flags |= ZBUF_FLAG_IMMUTABLE;
379 		zb->zb_header->bzh_kernel_len = d->bd_slen;
380 		atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
381 	}
382 }
383 
384 /*
385  * Notification from the BPF framework that a buffer has moved into the held
386  * slot on a descriptor.  Zero-copy BPF will update the shared page to let
387  * the user process know and flag the buffer as immutable if it hasn't
388  * already been marked immutable due to filling while it was in the store
389  * position.
390  *
391  * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
392  * on bd_hbuf and bd_hlen.
393  */
394 void
395 bpf_zerocopy_bufheld(struct bpf_d *d)
396 {
397 	struct zbuf *zb;
398 
399 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
400 	    ("bpf_zerocopy_bufheld: not in zbuf mode"));
401 
402 	zb = (struct zbuf *)d->bd_hbuf;
403 	KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
404 
405 	if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) {
406 		zb->zb_flags |= ZBUF_FLAG_IMMUTABLE;
407 		zb->zb_header->bzh_kernel_len = d->bd_hlen;
408 		atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
409 	}
410 }
411 
412 /*
413  * Notification from the BPF framework that the free buffer has been been
414  * re-assigned.  This happens when the user ackknowledges the buffer.
415  */
416 void
417 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
418 {
419 	struct zbuf *zb;
420 
421 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
422 	    ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
423 
424 	KASSERT(d->bd_fbuf != NULL,
425 	    ("bpf_zerocopy_buf_reclaimed: NULL free buff"));
426 	zb = (struct zbuf *)d->bd_fbuf;
427 	zb->zb_flags &= ~ZBUF_FLAG_IMMUTABLE;
428 }
429 
430 /*
431  * Query from the BPF framework regarding whether the buffer currently in the
432  * held position can be moved to the free position, which can be indicated by
433  * the user process making their generation number equal to the kernel
434  * generation number.
435  */
436 int
437 bpf_zerocopy_canfreebuf(struct bpf_d *d)
438 {
439 	struct zbuf *zb;
440 
441 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
442 	    ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
443 
444 	zb = (struct zbuf *)d->bd_hbuf;
445 	if (zb == NULL)
446 		return (0);
447 	if (zb->zb_header->bzh_kernel_gen ==
448 	    atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
449 		return (1);
450 	return (0);
451 }
452 
453 /*
454  * Query from the BPF framework as to whether or not the buffer current in
455  * the store position can actually be written to.  This may return false if
456  * the store buffer is assigned to userspace before the hold buffer is
457  * acknowledged.
458  */
459 int
460 bpf_zerocopy_canwritebuf(struct bpf_d *d)
461 {
462 	struct zbuf *zb;
463 
464 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
465 	    ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
466 
467 	zb = (struct zbuf *)d->bd_sbuf;
468 	KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
469 
470 	if (zb->zb_flags & ZBUF_FLAG_IMMUTABLE)
471 		return (0);
472 	return (1);
473 }
474 
475 /*
476  * Free zero copy buffers at request of descriptor.
477  */
478 void
479 bpf_zerocopy_free(struct bpf_d *d)
480 {
481 	struct zbuf *zb;
482 
483 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
484 	    ("bpf_zerocopy_free: not in zbuf mode"));
485 
486 	zb = (struct zbuf *)d->bd_sbuf;
487 	if (zb != NULL)
488 		zbuf_free(zb);
489 	zb = (struct zbuf *)d->bd_hbuf;
490 	if (zb != NULL)
491 		zbuf_free(zb);
492 	zb = (struct zbuf *)d->bd_fbuf;
493 	if (zb != NULL)
494 		zbuf_free(zb);
495 }
496 
497 /*
498  * Ioctl to return the maximum buffer size.
499  */
500 int
501 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
502 {
503 
504 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
505 	    ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
506 
507 	*i = BPF_MAX_PAGES * PAGE_SIZE;
508 	return (0);
509 }
510 
511 /*
512  * Ioctl to force rotation of the two buffers, if there's any data available.
513  * This can be used by user space to implement time outs when waiting for a
514  * buffer to fill.
515  */
516 int
517 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
518     struct bpf_zbuf *bz)
519 {
520 	struct zbuf *bzh;
521 
522 	bzero(bz, sizeof(*bz));
523 	BPFD_LOCK(d);
524 	if (d->bd_hbuf == NULL && d->bd_slen != 0) {
525 		ROTATE_BUFFERS(d);
526 		bzh = (struct zbuf *)d->bd_hbuf;
527 		bz->bz_bufa = (void *)bzh->zb_uaddr;
528 		bz->bz_buflen = d->bd_hlen;
529 	}
530 	BPFD_UNLOCK(d);
531 	return (0);
532 }
533 
534 /*
535  * Ioctl to configure zero-copy buffers -- may be done only once.
536  */
537 int
538 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
539     struct bpf_zbuf *bz)
540 {
541 	struct zbuf *zba, *zbb;
542 	int error;
543 
544 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
545 	    ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
546 
547 	/*
548 	 * Must set both buffers.  Cannot clear them.
549 	 */
550 	if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
551 		return (EINVAL);
552 
553 	/*
554 	 * Buffers must have a size greater than 0.  Alignment and other size
555 	 * validity checking is done in zbuf_setup().
556 	 */
557 	if (bz->bz_buflen == 0)
558 		return (EINVAL);
559 
560 	/*
561 	 * Allocate new buffers.
562 	 */
563 	error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
564 	    &zba);
565 	if (error)
566 		return (error);
567 	error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
568 	    &zbb);
569 	if (error) {
570 		zbuf_free(zba);
571 		return (error);
572 	}
573 
574 	/*
575 	 * We only allow buffers to be installed once, so atomically check
576 	 * that no buffers are currently installed and install new buffers.
577 	 */
578 	BPFD_LOCK(d);
579 	if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
580 	    d->bd_bif != NULL) {
581 		BPFD_UNLOCK(d);
582 		zbuf_free(zba);
583 		zbuf_free(zbb);
584 		return (EINVAL);
585 	}
586 
587 	/*
588 	 * Point BPF descriptor at buffers; initialize sbuf as zba so that
589 	 * it is always filled first in the sequence, per bpf(4).
590 	 */
591 	d->bd_fbuf = (caddr_t)zbb;
592 	d->bd_sbuf = (caddr_t)zba;
593 	d->bd_slen = 0;
594 	d->bd_hlen = 0;
595 
596 	/*
597 	 * We expose only the space left in the buffer after the size of the
598 	 * shared management region.
599 	 */
600 	d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
601 	BPFD_UNLOCK(d);
602 	return (0);
603 }
604