1 /*- 2 * Copyright (c) 2007 Seccuris Inc. 3 * All rights reserved. 4 * 5 * This sofware was developed by Robert N. M. Watson under contract to 6 * Seccuris Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_bpf.h" 34 35 #include <sys/param.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mbuf.h> 39 #include <sys/mutex.h> 40 #include <sys/proc.h> 41 #include <sys/sf_buf.h> 42 #include <sys/socket.h> 43 #include <sys/uio.h> 44 45 #include <machine/atomic.h> 46 47 #include <net/if.h> 48 #include <net/bpf.h> 49 #include <net/bpf_jitter.h> 50 #include <net/bpf_zerocopy.h> 51 #include <net/bpfdesc.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_page.h> 58 59 /* 60 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which 61 * are mapped into the kernel address space using sf_bufs and used directly 62 * by BPF. Memory is wired since page faults cannot be tolerated in the 63 * contexts where the buffers are copied to (locks held, interrupt context, 64 * etc). Access to shared memory buffers is synchronized using a header on 65 * each buffer, allowing the number of system calls to go to zero as BPF 66 * reaches saturation (buffers filled as fast as they can be drained by the 67 * user process). Full details of the protocol for communicating between the 68 * user process and BPF may be found in bpf(4). 69 */ 70 71 /* 72 * Maximum number of pages per buffer. Since all BPF devices use two, the 73 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of 74 * sf_bufs may be an issue, so do not set this too high. On older systems, 75 * kernel address space limits may also be an issue. 76 */ 77 #define BPF_MAX_PAGES 512 78 79 /* 80 * struct zbuf describes a memory buffer loaned by a user process to the 81 * kernel. We represent this as a series of pages managed using an array of 82 * sf_bufs. Even though the memory is contiguous in user space, it may not 83 * be mapped contiguously in the kernel (i.e., a set of physically 84 * non-contiguous pages in the direct map region) so we must implement 85 * scatter-gather copying. One significant mitigating factor is that on 86 * systems with a direct memory map, we can avoid TLB misses. 87 * 88 * At the front of the shared memory region is a bpf_zbuf_header, which 89 * contains shared control data to allow user space and the kernel to 90 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF 91 * knows that the space is not available. 92 */ 93 struct zbuf { 94 vm_offset_t zb_uaddr; /* User address, may be stale. */ 95 size_t zb_size; /* Size of buffer, incl. header. */ 96 u_int zb_numpages; /* Number of pages. */ 97 int zb_flags; /* Flags on zbuf. */ 98 struct sf_buf **zb_pages; /* Pages themselves. */ 99 struct bpf_zbuf_header *zb_header; /* Shared header. */ 100 }; 101 102 /* 103 * When a buffer has been assigned to userspace, flag it as such, as the 104 * buffer may remain in the store position as a result of the user process 105 * not yet having acknowledged the buffer in the hold position yet. 106 */ 107 #define ZBUF_FLAG_IMMUTABLE 0x00000001 /* Set when owned by user. */ 108 109 /* 110 * Release a page we've previously wired. 111 */ 112 static void 113 zbuf_page_free(vm_page_t pp) 114 { 115 116 vm_page_lock_queues(); 117 vm_page_unwire(pp, 0); 118 if (pp->wire_count == 0 && pp->object == NULL) 119 vm_page_free(pp); 120 vm_page_unlock_queues(); 121 } 122 123 /* 124 * Free an sf_buf with attached page. 125 */ 126 static void 127 zbuf_sfbuf_free(struct sf_buf *sf) 128 { 129 vm_page_t pp; 130 131 pp = sf_buf_page(sf); 132 sf_buf_free(sf); 133 zbuf_page_free(pp); 134 } 135 136 /* 137 * Free a zbuf, including its page array, sbufs, and pages. Allow partially 138 * allocated zbufs to be freed so that it may be used even during a zbuf 139 * setup. 140 */ 141 static void 142 zbuf_free(struct zbuf *zb) 143 { 144 int i; 145 146 for (i = 0; i < zb->zb_numpages; i++) { 147 if (zb->zb_pages[i] != NULL) 148 zbuf_sfbuf_free(zb->zb_pages[i]); 149 } 150 free(zb->zb_pages, M_BPF); 151 free(zb, M_BPF); 152 } 153 154 /* 155 * Given a user pointer to a page of user memory, return an sf_buf for the 156 * page. Because we may be requesting quite a few sf_bufs, prefer failure to 157 * deadlock and use SFB_NOWAIT. 158 */ 159 static struct sf_buf * 160 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr) 161 { 162 struct sf_buf *sf; 163 vm_page_t pp; 164 165 if (vm_fault_quick((caddr_t) uaddr, VM_PROT_READ | VM_PROT_WRITE) < 166 0) 167 return (NULL); 168 pp = pmap_extract_and_hold(map->pmap, uaddr, VM_PROT_READ | 169 VM_PROT_WRITE); 170 if (pp == NULL) 171 return (NULL); 172 vm_page_lock_queues(); 173 vm_page_wire(pp); 174 vm_page_unhold(pp); 175 vm_page_unlock_queues(); 176 sf = sf_buf_alloc(pp, SFB_NOWAIT); 177 if (sf == NULL) { 178 zbuf_page_free(pp); 179 return (NULL); 180 } 181 return (sf); 182 } 183 184 /* 185 * Create a zbuf describing a range of user address space memory. Validate 186 * page alignment, size requirements, etc. 187 */ 188 static int 189 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len, 190 struct zbuf **zbp) 191 { 192 struct zbuf *zb; 193 struct vm_map *map; 194 int error, i; 195 196 *zbp = NULL; 197 198 /* 199 * User address must be page-aligned. 200 */ 201 if (uaddr & PAGE_MASK) 202 return (EINVAL); 203 204 /* 205 * Length must be an integer number of full pages. 206 */ 207 if (len & PAGE_MASK) 208 return (EINVAL); 209 210 /* 211 * Length must not exceed per-buffer resource limit. 212 */ 213 if ((len / PAGE_SIZE) > BPF_MAX_PAGES) 214 return (EINVAL); 215 216 /* 217 * Allocate the buffer and set up each page with is own sf_buf. 218 */ 219 error = 0; 220 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK); 221 zb->zb_uaddr = uaddr; 222 zb->zb_size = len; 223 zb->zb_numpages = len / PAGE_SIZE; 224 zb->zb_pages = malloc(sizeof(struct sf_buf *) * 225 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK); 226 map = &td->td_proc->p_vmspace->vm_map; 227 for (i = 0; i < zb->zb_numpages; i++) { 228 zb->zb_pages[i] = zbuf_sfbuf_get(map, 229 uaddr + (i * PAGE_SIZE)); 230 if (zb->zb_pages[i] == NULL) { 231 error = EFAULT; 232 goto error; 233 } 234 } 235 zb->zb_header = 236 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]); 237 bzero(zb->zb_header, sizeof(*zb->zb_header)); 238 *zbp = zb; 239 return (0); 240 241 error: 242 zbuf_free(zb); 243 return (error); 244 } 245 246 /* 247 * Copy bytes from a source into the specified zbuf. The caller is 248 * responsible for performing bounds checking, etc. 249 */ 250 void 251 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, 252 void *src, u_int len) 253 { 254 u_int count, page, poffset; 255 u_char *src_bytes; 256 struct zbuf *zb; 257 258 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 259 ("bpf_zerocopy_append_bytes: not in zbuf mode")); 260 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf")); 261 262 src_bytes = (u_char *)src; 263 zb = (struct zbuf *)buf; 264 265 KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0, 266 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_IMMUTABLE")); 267 268 /* 269 * Scatter-gather copy to user pages mapped into kernel address space 270 * using sf_bufs: copy up to a page at a time. 271 */ 272 offset += sizeof(struct bpf_zbuf_header); 273 page = offset / PAGE_SIZE; 274 poffset = offset % PAGE_SIZE; 275 while (len > 0) { 276 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:" 277 " page overflow (%d p %d np)\n", page, zb->zb_numpages)); 278 279 count = min(len, PAGE_SIZE - poffset); 280 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) + 281 poffset, count); 282 poffset += count; 283 if (poffset == PAGE_SIZE) { 284 poffset = 0; 285 page++; 286 } 287 KASSERT(poffset < PAGE_SIZE, 288 ("bpf_zerocopy_append_bytes: page offset overflow (%d)", 289 poffset)); 290 len -= count; 291 src_bytes += count; 292 } 293 } 294 295 /* 296 * Copy bytes from an mbuf chain to the specified zbuf: copying will be 297 * scatter-gather both from mbufs, which may be fragmented over memory, and 298 * to pages, which may not be contiguously mapped in kernel address space. 299 * As with bpf_zerocopy_append_bytes(), the caller is responsible for 300 * checking that this will not exceed the buffer limit. 301 */ 302 void 303 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, 304 void *src, u_int len) 305 { 306 u_int count, moffset, page, poffset; 307 const struct mbuf *m; 308 struct zbuf *zb; 309 310 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 311 ("bpf_zerocopy_append_mbuf not in zbuf mode")); 312 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf")); 313 314 m = (struct mbuf *)src; 315 zb = (struct zbuf *)buf; 316 317 KASSERT((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0, 318 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_IMMUTABLE")); 319 320 /* 321 * Scatter gather both from an mbuf chain and to a user page set 322 * mapped into kernel address space using sf_bufs. If we're lucky, 323 * each mbuf requires one copy operation, but if page alignment and 324 * mbuf alignment work out less well, we'll be doing two copies per 325 * mbuf. 326 */ 327 offset += sizeof(struct bpf_zbuf_header); 328 page = offset / PAGE_SIZE; 329 poffset = offset % PAGE_SIZE; 330 moffset = 0; 331 while (len > 0) { 332 KASSERT(page < zb->zb_numpages, 333 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d " 334 "np)\n", page, zb->zb_numpages)); 335 KASSERT(m != NULL, 336 ("bpf_zerocopy_append_mbuf: end of mbuf chain")); 337 338 count = min(m->m_len - moffset, len); 339 count = min(count, PAGE_SIZE - poffset); 340 bcopy(mtod(m, u_char *) + moffset, 341 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset, 342 count); 343 poffset += count; 344 if (poffset == PAGE_SIZE) { 345 poffset = 0; 346 page++; 347 } 348 KASSERT(poffset < PAGE_SIZE, 349 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)", 350 poffset)); 351 moffset += count; 352 if (moffset == m->m_len) { 353 m = m->m_next; 354 moffset = 0; 355 } 356 len -= count; 357 } 358 } 359 360 /* 361 * Notification from the BPF framework that a buffer in the store position is 362 * rejecting packets and may be considered full. We mark the buffer as 363 * immutable and assign to userspace so that it is immediately available for 364 * the user process to access. 365 */ 366 void 367 bpf_zerocopy_buffull(struct bpf_d *d) 368 { 369 struct zbuf *zb; 370 371 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 372 ("bpf_zerocopy_buffull: not in zbuf mode")); 373 374 zb = (struct zbuf *)d->bd_sbuf; 375 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL")); 376 377 if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) { 378 zb->zb_flags |= ZBUF_FLAG_IMMUTABLE; 379 zb->zb_header->bzh_kernel_len = d->bd_slen; 380 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); 381 } 382 } 383 384 /* 385 * Notification from the BPF framework that a buffer has moved into the held 386 * slot on a descriptor. Zero-copy BPF will update the shared page to let 387 * the user process know and flag the buffer as immutable if it hasn't 388 * already been marked immutable due to filling while it was in the store 389 * position. 390 * 391 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate 392 * on bd_hbuf and bd_hlen. 393 */ 394 void 395 bpf_zerocopy_bufheld(struct bpf_d *d) 396 { 397 struct zbuf *zb; 398 399 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 400 ("bpf_zerocopy_bufheld: not in zbuf mode")); 401 402 zb = (struct zbuf *)d->bd_hbuf; 403 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL")); 404 405 if ((zb->zb_flags & ZBUF_FLAG_IMMUTABLE) == 0) { 406 zb->zb_flags |= ZBUF_FLAG_IMMUTABLE; 407 zb->zb_header->bzh_kernel_len = d->bd_hlen; 408 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); 409 } 410 } 411 412 /* 413 * Query from the BPF framework regarding whether the buffer currently in the 414 * held position can be moved to the free position, which can be indicated by 415 * the user process making their generation number equal to the kernel 416 * generation number. 417 */ 418 int 419 bpf_zerocopy_canfreebuf(struct bpf_d *d) 420 { 421 struct zbuf *zb; 422 423 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 424 ("bpf_zerocopy_canfreebuf: not in zbuf mode")); 425 426 zb = (struct zbuf *)d->bd_hbuf; 427 if (zb == NULL) 428 return (0); 429 if (zb->zb_header->bzh_kernel_gen == 430 atomic_load_acq_int(&zb->zb_header->bzh_user_gen)) 431 return (1); 432 return (0); 433 } 434 435 /* 436 * Query from the BPF framework as to whether or not the buffer current in 437 * the store position can actually be written to. This may return false if 438 * the store buffer is assigned to userspace before the hold buffer is 439 * acknowledged. 440 */ 441 int 442 bpf_zerocopy_canwritebuf(struct bpf_d *d) 443 { 444 struct zbuf *zb; 445 446 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 447 ("bpf_zerocopy_canwritebuf: not in zbuf mode")); 448 449 zb = (struct zbuf *)d->bd_sbuf; 450 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL")); 451 452 if (zb->zb_flags & ZBUF_FLAG_IMMUTABLE) 453 return (0); 454 return (1); 455 } 456 457 /* 458 * Free zero copy buffers at request of descriptor. 459 */ 460 void 461 bpf_zerocopy_free(struct bpf_d *d) 462 { 463 struct zbuf *zb; 464 465 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 466 ("bpf_zerocopy_free: not in zbuf mode")); 467 468 zb = (struct zbuf *)d->bd_sbuf; 469 if (zb != NULL) 470 zbuf_free(zb); 471 zb = (struct zbuf *)d->bd_hbuf; 472 if (zb != NULL) 473 zbuf_free(zb); 474 zb = (struct zbuf *)d->bd_fbuf; 475 if (zb != NULL) 476 zbuf_free(zb); 477 } 478 479 /* 480 * Ioctl to return the maximum buffer size. 481 */ 482 int 483 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 484 { 485 486 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 487 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode")); 488 489 *i = BPF_MAX_PAGES * PAGE_SIZE; 490 return (0); 491 } 492 493 /* 494 * Ioctl to force rotation of the two buffers, if there's any data available. 495 * This can be used by user space to implement time outs when waiting for a 496 * buffer to fill. 497 */ 498 int 499 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, 500 struct bpf_zbuf *bz) 501 { 502 struct zbuf *bzh; 503 504 bzero(bz, sizeof(*bz)); 505 BPFD_LOCK(d); 506 if (d->bd_hbuf == NULL && d->bd_slen != 0) { 507 ROTATE_BUFFERS(d); 508 bzh = (struct zbuf *)d->bd_hbuf; 509 bz->bz_bufa = (void *)bzh->zb_uaddr; 510 bz->bz_buflen = d->bd_hlen; 511 } 512 BPFD_UNLOCK(d); 513 return (0); 514 } 515 516 /* 517 * Ioctl to configure zero-copy buffers -- may be done only once. 518 */ 519 int 520 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d, 521 struct bpf_zbuf *bz) 522 { 523 struct zbuf *zba, *zbb; 524 int error; 525 526 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 527 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode")); 528 529 /* 530 * Must set both buffers. Cannot clear them. 531 */ 532 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL) 533 return (EINVAL); 534 535 /* 536 * Buffers must have a size greater than 0. Alignment and other size 537 * validity checking is done in zbuf_setup(). 538 */ 539 if (bz->bz_buflen == 0) 540 return (EINVAL); 541 542 /* 543 * Allocate new buffers. 544 */ 545 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen, 546 &zba); 547 if (error) 548 return (error); 549 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen, 550 &zbb); 551 if (error) { 552 zbuf_free(zba); 553 return (error); 554 } 555 556 /* 557 * We only allow buffers to be installed once, so atomically check 558 * that no buffers are currently installed and install new buffers. 559 */ 560 BPFD_LOCK(d); 561 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL || 562 d->bd_bif != NULL) { 563 BPFD_UNLOCK(d); 564 zbuf_free(zba); 565 zbuf_free(zbb); 566 return (EINVAL); 567 } 568 569 /* 570 * Point BPF descriptor at buffers; initialize sbuf as zba so that 571 * it is always filled first in the sequence, per bpf(4). 572 */ 573 d->bd_fbuf = (caddr_t)zbb; 574 d->bd_sbuf = (caddr_t)zba; 575 d->bd_slen = 0; 576 d->bd_hlen = 0; 577 578 /* 579 * We expose only the space left in the buffer after the size of the 580 * shared management region. 581 */ 582 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header); 583 BPFD_UNLOCK(d); 584 return (0); 585 } 586