xref: /freebsd/sys/net/bpf_zerocopy.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2007 Seccuris Inc.
3  * All rights reserved.
4  *
5  * This software was developed by Robert N. M. Watson under contract to
6  * Seccuris Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_bpf.h"
34 
35 #include <sys/param.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/sf_buf.h>
42 #include <sys/socket.h>
43 #include <sys/uio.h>
44 
45 #include <machine/atomic.h>
46 
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/bpf_zerocopy.h>
50 #include <net/bpfdesc.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 
59 /*
60  * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
61  * are mapped into the kernel address space using sf_bufs and used directly
62  * by BPF.  Memory is wired since page faults cannot be tolerated in the
63  * contexts where the buffers are copied to (locks held, interrupt context,
64  * etc).  Access to shared memory buffers is synchronized using a header on
65  * each buffer, allowing the number of system calls to go to zero as BPF
66  * reaches saturation (buffers filled as fast as they can be drained by the
67  * user process).  Full details of the protocol for communicating between the
68  * user process and BPF may be found in bpf(4).
69  */
70 
71 /*
72  * Maximum number of pages per buffer.  Since all BPF devices use two, the
73  * maximum per device is 2*BPF_MAX_PAGES.  Resource limits on the number of
74  * sf_bufs may be an issue, so do not set this too high.  On older systems,
75  * kernel address space limits may also be an issue.
76  */
77 #define	BPF_MAX_PAGES	512
78 
79 /*
80  * struct zbuf describes a memory buffer loaned by a user process to the
81  * kernel.  We represent this as a series of pages managed using an array of
82  * sf_bufs.  Even though the memory is contiguous in user space, it may not
83  * be mapped contiguously in the kernel (i.e., a set of physically
84  * non-contiguous pages in the direct map region) so we must implement
85  * scatter-gather copying.  One significant mitigating factor is that on
86  * systems with a direct memory map, we can avoid TLB misses.
87  *
88  * At the front of the shared memory region is a bpf_zbuf_header, which
89  * contains shared control data to allow user space and the kernel to
90  * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
91  * knows that the space is not available.
92  */
93 struct zbuf {
94 	vm_offset_t	 zb_uaddr;	/* User address at time of setup. */
95 	size_t		 zb_size;	/* Size of buffer, incl. header. */
96 	u_int		 zb_numpages;	/* Number of pages. */
97 	int		 zb_flags;	/* Flags on zbuf. */
98 	struct sf_buf	**zb_pages;	/* Pages themselves. */
99 	struct bpf_zbuf_header	*zb_header;	/* Shared header. */
100 };
101 
102 /*
103  * When a buffer has been assigned to userspace, flag it as such, as the
104  * buffer may remain in the store position as a result of the user process
105  * not yet having acknowledged the buffer in the hold position yet.
106  */
107 #define	ZBUF_FLAG_ASSIGNED	0x00000001	/* Set when owned by user. */
108 
109 /*
110  * Release a page we've previously wired.
111  */
112 static void
113 zbuf_page_free(vm_page_t pp)
114 {
115 
116 	vm_page_lock(pp);
117 	vm_page_unwire(pp, PQ_INACTIVE);
118 	if (pp->wire_count == 0 && pp->object == NULL)
119 		vm_page_free(pp);
120 	vm_page_unlock(pp);
121 }
122 
123 /*
124  * Free an sf_buf with attached page.
125  */
126 static void
127 zbuf_sfbuf_free(struct sf_buf *sf)
128 {
129 	vm_page_t pp;
130 
131 	pp = sf_buf_page(sf);
132 	sf_buf_free(sf);
133 	zbuf_page_free(pp);
134 }
135 
136 /*
137  * Free a zbuf, including its page array, sbufs, and pages.  Allow partially
138  * allocated zbufs to be freed so that it may be used even during a zbuf
139  * setup.
140  */
141 static void
142 zbuf_free(struct zbuf *zb)
143 {
144 	int i;
145 
146 	for (i = 0; i < zb->zb_numpages; i++) {
147 		if (zb->zb_pages[i] != NULL)
148 			zbuf_sfbuf_free(zb->zb_pages[i]);
149 	}
150 	free(zb->zb_pages, M_BPF);
151 	free(zb, M_BPF);
152 }
153 
154 /*
155  * Given a user pointer to a page of user memory, return an sf_buf for the
156  * page.  Because we may be requesting quite a few sf_bufs, prefer failure to
157  * deadlock and use SFB_NOWAIT.
158  */
159 static struct sf_buf *
160 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
161 {
162 	struct sf_buf *sf;
163 	vm_page_t pp;
164 
165 	if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
166 	    VM_PROT_WRITE, &pp, 1) < 0)
167 		return (NULL);
168 	vm_page_lock(pp);
169 	vm_page_wire(pp);
170 	vm_page_unhold(pp);
171 	vm_page_unlock(pp);
172 	sf = sf_buf_alloc(pp, SFB_NOWAIT);
173 	if (sf == NULL) {
174 		zbuf_page_free(pp);
175 		return (NULL);
176 	}
177 	return (sf);
178 }
179 
180 /*
181  * Create a zbuf describing a range of user address space memory.  Validate
182  * page alignment, size requirements, etc.
183  */
184 static int
185 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len,
186     struct zbuf **zbp)
187 {
188 	struct zbuf *zb;
189 	struct vm_map *map;
190 	int error, i;
191 
192 	*zbp = NULL;
193 
194 	/*
195 	 * User address must be page-aligned.
196 	 */
197 	if (uaddr & PAGE_MASK)
198 		return (EINVAL);
199 
200 	/*
201 	 * Length must be an integer number of full pages.
202 	 */
203 	if (len & PAGE_MASK)
204 		return (EINVAL);
205 
206 	/*
207 	 * Length must not exceed per-buffer resource limit.
208 	 */
209 	if ((len / PAGE_SIZE) > BPF_MAX_PAGES)
210 		return (EINVAL);
211 
212 	/*
213 	 * Allocate the buffer and set up each page with is own sf_buf.
214 	 */
215 	error = 0;
216 	zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK);
217 	zb->zb_uaddr = uaddr;
218 	zb->zb_size = len;
219 	zb->zb_numpages = len / PAGE_SIZE;
220 	zb->zb_pages = malloc(sizeof(struct sf_buf *) *
221 	    zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK);
222 	map = &td->td_proc->p_vmspace->vm_map;
223 	for (i = 0; i < zb->zb_numpages; i++) {
224 		zb->zb_pages[i] = zbuf_sfbuf_get(map,
225 		    uaddr + (i * PAGE_SIZE));
226 		if (zb->zb_pages[i] == NULL) {
227 			error = EFAULT;
228 			goto error;
229 		}
230 	}
231 	zb->zb_header =
232 	    (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]);
233 	bzero(zb->zb_header, sizeof(*zb->zb_header));
234 	*zbp = zb;
235 	return (0);
236 
237 error:
238 	zbuf_free(zb);
239 	return (error);
240 }
241 
242 /*
243  * Copy bytes from a source into the specified zbuf.  The caller is
244  * responsible for performing bounds checking, etc.
245  */
246 void
247 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
248     void *src, u_int len)
249 {
250 	u_int count, page, poffset;
251 	u_char *src_bytes;
252 	struct zbuf *zb;
253 
254 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
255 	    ("bpf_zerocopy_append_bytes: not in zbuf mode"));
256 	KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf"));
257 
258 	src_bytes = (u_char *)src;
259 	zb = (struct zbuf *)buf;
260 
261 	KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
262 	    ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
263 
264 	/*
265 	 * Scatter-gather copy to user pages mapped into kernel address space
266 	 * using sf_bufs: copy up to a page at a time.
267 	 */
268 	offset += sizeof(struct bpf_zbuf_header);
269 	page = offset / PAGE_SIZE;
270 	poffset = offset % PAGE_SIZE;
271 	while (len > 0) {
272 		KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:"
273 		   " page overflow (%d p %d np)\n", page, zb->zb_numpages));
274 
275 		count = min(len, PAGE_SIZE - poffset);
276 		bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) +
277 		    poffset, count);
278 		poffset += count;
279 		if (poffset == PAGE_SIZE) {
280 			poffset = 0;
281 			page++;
282 		}
283 		KASSERT(poffset < PAGE_SIZE,
284 		    ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
285 		    poffset));
286 		len -= count;
287 		src_bytes += count;
288 	}
289 }
290 
291 /*
292  * Copy bytes from an mbuf chain to the specified zbuf: copying will be
293  * scatter-gather both from mbufs, which may be fragmented over memory, and
294  * to pages, which may not be contiguously mapped in kernel address space.
295  * As with bpf_zerocopy_append_bytes(), the caller is responsible for
296  * checking that this will not exceed the buffer limit.
297  */
298 void
299 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
300     void *src, u_int len)
301 {
302 	u_int count, moffset, page, poffset;
303 	const struct mbuf *m;
304 	struct zbuf *zb;
305 
306 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
307 	    ("bpf_zerocopy_append_mbuf not in zbuf mode"));
308 	KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf"));
309 
310 	m = (struct mbuf *)src;
311 	zb = (struct zbuf *)buf;
312 
313 	KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0,
314 	    ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
315 
316 	/*
317 	 * Scatter gather both from an mbuf chain and to a user page set
318 	 * mapped into kernel address space using sf_bufs.  If we're lucky,
319 	 * each mbuf requires one copy operation, but if page alignment and
320 	 * mbuf alignment work out less well, we'll be doing two copies per
321 	 * mbuf.
322 	 */
323 	offset += sizeof(struct bpf_zbuf_header);
324 	page = offset / PAGE_SIZE;
325 	poffset = offset % PAGE_SIZE;
326 	moffset = 0;
327 	while (len > 0) {
328 		KASSERT(page < zb->zb_numpages,
329 		    ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
330 		    "np)\n", page, zb->zb_numpages));
331 		KASSERT(m != NULL,
332 		    ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
333 
334 		count = min(m->m_len - moffset, len);
335 		count = min(count, PAGE_SIZE - poffset);
336 		bcopy(mtod(m, u_char *) + moffset,
337 		    ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset,
338 		    count);
339 		poffset += count;
340 		if (poffset == PAGE_SIZE) {
341 			poffset = 0;
342 			page++;
343 		}
344 		KASSERT(poffset < PAGE_SIZE,
345 		    ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
346 		    poffset));
347 		moffset += count;
348 		if (moffset == m->m_len) {
349 			m = m->m_next;
350 			moffset = 0;
351 		}
352 		len -= count;
353 	}
354 }
355 
356 /*
357  * Notification from the BPF framework that a buffer in the store position is
358  * rejecting packets and may be considered full.  We mark the buffer as
359  * immutable and assign to userspace so that it is immediately available for
360  * the user process to access.
361  */
362 void
363 bpf_zerocopy_buffull(struct bpf_d *d)
364 {
365 	struct zbuf *zb;
366 
367 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
368 	    ("bpf_zerocopy_buffull: not in zbuf mode"));
369 
370 	zb = (struct zbuf *)d->bd_sbuf;
371 	KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL"));
372 
373 	if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
374 		zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
375 		zb->zb_header->bzh_kernel_len = d->bd_slen;
376 		atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
377 	}
378 }
379 
380 /*
381  * Notification from the BPF framework that a buffer has moved into the held
382  * slot on a descriptor.  Zero-copy BPF will update the shared page to let
383  * the user process know and flag the buffer as assigned if it hasn't already
384  * been marked assigned due to filling while it was in the store position.
385  *
386  * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
387  * on bd_hbuf and bd_hlen.
388  */
389 void
390 bpf_zerocopy_bufheld(struct bpf_d *d)
391 {
392 	struct zbuf *zb;
393 
394 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
395 	    ("bpf_zerocopy_bufheld: not in zbuf mode"));
396 
397 	zb = (struct zbuf *)d->bd_hbuf;
398 	KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL"));
399 
400 	if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) {
401 		zb->zb_flags |= ZBUF_FLAG_ASSIGNED;
402 		zb->zb_header->bzh_kernel_len = d->bd_hlen;
403 		atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1);
404 	}
405 }
406 
407 /*
408  * Notification from the BPF framework that the free buffer has been been
409  * rotated out of the held position to the free position.  This happens when
410  * the user acknowledges the held buffer.
411  */
412 void
413 bpf_zerocopy_buf_reclaimed(struct bpf_d *d)
414 {
415 	struct zbuf *zb;
416 
417 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
418 	    ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
419 
420 	KASSERT(d->bd_fbuf != NULL,
421 	    ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
422 	zb = (struct zbuf *)d->bd_fbuf;
423 	zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED;
424 }
425 
426 /*
427  * Query from the BPF framework regarding whether the buffer currently in the
428  * held position can be moved to the free position, which can be indicated by
429  * the user process making their generation number equal to the kernel
430  * generation number.
431  */
432 int
433 bpf_zerocopy_canfreebuf(struct bpf_d *d)
434 {
435 	struct zbuf *zb;
436 
437 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
438 	    ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
439 
440 	zb = (struct zbuf *)d->bd_hbuf;
441 	if (zb == NULL)
442 		return (0);
443 	if (zb->zb_header->bzh_kernel_gen ==
444 	    atomic_load_acq_int(&zb->zb_header->bzh_user_gen))
445 		return (1);
446 	return (0);
447 }
448 
449 /*
450  * Query from the BPF framework as to whether or not the buffer current in
451  * the store position can actually be written to.  This may return false if
452  * the store buffer is assigned to userspace before the hold buffer is
453  * acknowledged.
454  */
455 int
456 bpf_zerocopy_canwritebuf(struct bpf_d *d)
457 {
458 	struct zbuf *zb;
459 
460 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
461 	    ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
462 
463 	zb = (struct zbuf *)d->bd_sbuf;
464 	KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
465 
466 	if (zb->zb_flags & ZBUF_FLAG_ASSIGNED)
467 		return (0);
468 	return (1);
469 }
470 
471 /*
472  * Free zero copy buffers at request of descriptor.
473  */
474 void
475 bpf_zerocopy_free(struct bpf_d *d)
476 {
477 	struct zbuf *zb;
478 
479 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
480 	    ("bpf_zerocopy_free: not in zbuf mode"));
481 
482 	zb = (struct zbuf *)d->bd_sbuf;
483 	if (zb != NULL)
484 		zbuf_free(zb);
485 	zb = (struct zbuf *)d->bd_hbuf;
486 	if (zb != NULL)
487 		zbuf_free(zb);
488 	zb = (struct zbuf *)d->bd_fbuf;
489 	if (zb != NULL)
490 		zbuf_free(zb);
491 }
492 
493 /*
494  * Ioctl to return the maximum buffer size.
495  */
496 int
497 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
498 {
499 
500 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
501 	    ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
502 
503 	*i = BPF_MAX_PAGES * PAGE_SIZE;
504 	return (0);
505 }
506 
507 /*
508  * Ioctl to force rotation of the two buffers, if there's any data available.
509  * This can be used by user space to implement timeouts when waiting for a
510  * buffer to fill.
511  */
512 int
513 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
514     struct bpf_zbuf *bz)
515 {
516 	struct zbuf *bzh;
517 
518 	bzero(bz, sizeof(*bz));
519 	BPFD_LOCK(d);
520 	if (d->bd_hbuf == NULL && d->bd_slen != 0) {
521 		ROTATE_BUFFERS(d);
522 		bzh = (struct zbuf *)d->bd_hbuf;
523 		bz->bz_bufa = (void *)bzh->zb_uaddr;
524 		bz->bz_buflen = d->bd_hlen;
525 	}
526 	BPFD_UNLOCK(d);
527 	return (0);
528 }
529 
530 /*
531  * Ioctl to configure zero-copy buffers -- may be done only once.
532  */
533 int
534 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
535     struct bpf_zbuf *bz)
536 {
537 	struct zbuf *zba, *zbb;
538 	int error;
539 
540 	KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF,
541 	    ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
542 
543 	/*
544 	 * Must set both buffers.  Cannot clear them.
545 	 */
546 	if (bz->bz_bufa == NULL || bz->bz_bufb == NULL)
547 		return (EINVAL);
548 
549 	/*
550 	 * Buffers must have a size greater than 0.  Alignment and other size
551 	 * validity checking is done in zbuf_setup().
552 	 */
553 	if (bz->bz_buflen == 0)
554 		return (EINVAL);
555 
556 	/*
557 	 * Allocate new buffers.
558 	 */
559 	error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen,
560 	    &zba);
561 	if (error)
562 		return (error);
563 	error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen,
564 	    &zbb);
565 	if (error) {
566 		zbuf_free(zba);
567 		return (error);
568 	}
569 
570 	/*
571 	 * We only allow buffers to be installed once, so atomically check
572 	 * that no buffers are currently installed and install new buffers.
573 	 */
574 	BPFD_LOCK(d);
575 	if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL ||
576 	    d->bd_bif != NULL) {
577 		BPFD_UNLOCK(d);
578 		zbuf_free(zba);
579 		zbuf_free(zbb);
580 		return (EINVAL);
581 	}
582 
583 	/*
584 	 * Point BPF descriptor at buffers; initialize sbuf as zba so that
585 	 * it is always filled first in the sequence, per bpf(4).
586 	 */
587 	d->bd_fbuf = (caddr_t)zbb;
588 	d->bd_sbuf = (caddr_t)zba;
589 	d->bd_slen = 0;
590 	d->bd_hlen = 0;
591 
592 	/*
593 	 * We expose only the space left in the buffer after the size of the
594 	 * shared management region.
595 	 */
596 	d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header);
597 	BPFD_UNLOCK(d);
598 	return (0);
599 }
600