1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007 Seccuris Inc. 5 * All rights reserved. 6 * 7 * This software was developed by Robert N. M. Watson under contract to 8 * Seccuris Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bpf.h" 36 37 #include <sys/param.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/sf_buf.h> 44 #include <sys/socket.h> 45 #include <sys/uio.h> 46 47 #include <machine/atomic.h> 48 49 #include <net/if.h> 50 #include <net/bpf.h> 51 #include <net/bpf_zerocopy.h> 52 #include <net/bpfdesc.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_param.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 61 /* 62 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which 63 * are mapped into the kernel address space using sf_bufs and used directly 64 * by BPF. Memory is wired since page faults cannot be tolerated in the 65 * contexts where the buffers are copied to (locks held, interrupt context, 66 * etc). Access to shared memory buffers is synchronized using a header on 67 * each buffer, allowing the number of system calls to go to zero as BPF 68 * reaches saturation (buffers filled as fast as they can be drained by the 69 * user process). Full details of the protocol for communicating between the 70 * user process and BPF may be found in bpf(4). 71 */ 72 73 /* 74 * Maximum number of pages per buffer. Since all BPF devices use two, the 75 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of 76 * sf_bufs may be an issue, so do not set this too high. On older systems, 77 * kernel address space limits may also be an issue. 78 */ 79 #define BPF_MAX_PAGES 512 80 81 /* 82 * struct zbuf describes a memory buffer loaned by a user process to the 83 * kernel. We represent this as a series of pages managed using an array of 84 * sf_bufs. Even though the memory is contiguous in user space, it may not 85 * be mapped contiguously in the kernel (i.e., a set of physically 86 * non-contiguous pages in the direct map region) so we must implement 87 * scatter-gather copying. One significant mitigating factor is that on 88 * systems with a direct memory map, we can avoid TLB misses. 89 * 90 * At the front of the shared memory region is a bpf_zbuf_header, which 91 * contains shared control data to allow user space and the kernel to 92 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF 93 * knows that the space is not available. 94 */ 95 struct zbuf { 96 vm_offset_t zb_uaddr; /* User address at time of setup. */ 97 size_t zb_size; /* Size of buffer, incl. header. */ 98 u_int zb_numpages; /* Number of pages. */ 99 int zb_flags; /* Flags on zbuf. */ 100 struct sf_buf **zb_pages; /* Pages themselves. */ 101 struct bpf_zbuf_header *zb_header; /* Shared header. */ 102 }; 103 104 /* 105 * When a buffer has been assigned to userspace, flag it as such, as the 106 * buffer may remain in the store position as a result of the user process 107 * not yet having acknowledged the buffer in the hold position yet. 108 */ 109 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */ 110 111 /* 112 * Release a page we've previously wired. 113 */ 114 static void 115 zbuf_page_free(vm_page_t pp) 116 { 117 118 vm_page_lock(pp); 119 if (vm_page_unwire(pp, PQ_INACTIVE) && pp->object == NULL) 120 vm_page_free(pp); 121 vm_page_unlock(pp); 122 } 123 124 /* 125 * Free an sf_buf with attached page. 126 */ 127 static void 128 zbuf_sfbuf_free(struct sf_buf *sf) 129 { 130 vm_page_t pp; 131 132 pp = sf_buf_page(sf); 133 sf_buf_free(sf); 134 zbuf_page_free(pp); 135 } 136 137 /* 138 * Free a zbuf, including its page array, sbufs, and pages. Allow partially 139 * allocated zbufs to be freed so that it may be used even during a zbuf 140 * setup. 141 */ 142 static void 143 zbuf_free(struct zbuf *zb) 144 { 145 int i; 146 147 for (i = 0; i < zb->zb_numpages; i++) { 148 if (zb->zb_pages[i] != NULL) 149 zbuf_sfbuf_free(zb->zb_pages[i]); 150 } 151 free(zb->zb_pages, M_BPF); 152 free(zb, M_BPF); 153 } 154 155 /* 156 * Given a user pointer to a page of user memory, return an sf_buf for the 157 * page. Because we may be requesting quite a few sf_bufs, prefer failure to 158 * deadlock and use SFB_NOWAIT. 159 */ 160 static struct sf_buf * 161 zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr) 162 { 163 struct sf_buf *sf; 164 vm_page_t pp; 165 166 if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ | 167 VM_PROT_WRITE, &pp, 1) < 0) 168 return (NULL); 169 sf = sf_buf_alloc(pp, SFB_NOWAIT); 170 if (sf == NULL) { 171 zbuf_page_free(pp); 172 return (NULL); 173 } 174 return (sf); 175 } 176 177 /* 178 * Create a zbuf describing a range of user address space memory. Validate 179 * page alignment, size requirements, etc. 180 */ 181 static int 182 zbuf_setup(struct thread *td, vm_offset_t uaddr, size_t len, 183 struct zbuf **zbp) 184 { 185 struct zbuf *zb; 186 struct vm_map *map; 187 int error, i; 188 189 *zbp = NULL; 190 191 /* 192 * User address must be page-aligned. 193 */ 194 if (uaddr & PAGE_MASK) 195 return (EINVAL); 196 197 /* 198 * Length must be an integer number of full pages. 199 */ 200 if (len & PAGE_MASK) 201 return (EINVAL); 202 203 /* 204 * Length must not exceed per-buffer resource limit. 205 */ 206 if ((len / PAGE_SIZE) > BPF_MAX_PAGES) 207 return (EINVAL); 208 209 /* 210 * Allocate the buffer and set up each page with is own sf_buf. 211 */ 212 error = 0; 213 zb = malloc(sizeof(*zb), M_BPF, M_ZERO | M_WAITOK); 214 zb->zb_uaddr = uaddr; 215 zb->zb_size = len; 216 zb->zb_numpages = len / PAGE_SIZE; 217 zb->zb_pages = malloc(sizeof(struct sf_buf *) * 218 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK); 219 map = &td->td_proc->p_vmspace->vm_map; 220 for (i = 0; i < zb->zb_numpages; i++) { 221 zb->zb_pages[i] = zbuf_sfbuf_get(map, 222 uaddr + (i * PAGE_SIZE)); 223 if (zb->zb_pages[i] == NULL) { 224 error = EFAULT; 225 goto error; 226 } 227 } 228 zb->zb_header = 229 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]); 230 bzero(zb->zb_header, sizeof(*zb->zb_header)); 231 *zbp = zb; 232 return (0); 233 234 error: 235 zbuf_free(zb); 236 return (error); 237 } 238 239 /* 240 * Copy bytes from a source into the specified zbuf. The caller is 241 * responsible for performing bounds checking, etc. 242 */ 243 void 244 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, 245 void *src, u_int len) 246 { 247 u_int count, page, poffset; 248 u_char *src_bytes; 249 struct zbuf *zb; 250 251 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 252 ("bpf_zerocopy_append_bytes: not in zbuf mode")); 253 KASSERT(buf != NULL, ("bpf_zerocopy_append_bytes: NULL buf")); 254 255 src_bytes = (u_char *)src; 256 zb = (struct zbuf *)buf; 257 258 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0, 259 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED")); 260 261 /* 262 * Scatter-gather copy to user pages mapped into kernel address space 263 * using sf_bufs: copy up to a page at a time. 264 */ 265 offset += sizeof(struct bpf_zbuf_header); 266 page = offset / PAGE_SIZE; 267 poffset = offset % PAGE_SIZE; 268 while (len > 0) { 269 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:" 270 " page overflow (%d p %d np)\n", page, zb->zb_numpages)); 271 272 count = min(len, PAGE_SIZE - poffset); 273 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) + 274 poffset, count); 275 poffset += count; 276 if (poffset == PAGE_SIZE) { 277 poffset = 0; 278 page++; 279 } 280 KASSERT(poffset < PAGE_SIZE, 281 ("bpf_zerocopy_append_bytes: page offset overflow (%d)", 282 poffset)); 283 len -= count; 284 src_bytes += count; 285 } 286 } 287 288 /* 289 * Copy bytes from an mbuf chain to the specified zbuf: copying will be 290 * scatter-gather both from mbufs, which may be fragmented over memory, and 291 * to pages, which may not be contiguously mapped in kernel address space. 292 * As with bpf_zerocopy_append_bytes(), the caller is responsible for 293 * checking that this will not exceed the buffer limit. 294 */ 295 void 296 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, 297 void *src, u_int len) 298 { 299 u_int count, moffset, page, poffset; 300 const struct mbuf *m; 301 struct zbuf *zb; 302 303 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 304 ("bpf_zerocopy_append_mbuf not in zbuf mode")); 305 KASSERT(buf != NULL, ("bpf_zerocopy_append_mbuf: NULL buf")); 306 307 m = (struct mbuf *)src; 308 zb = (struct zbuf *)buf; 309 310 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0, 311 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED")); 312 313 /* 314 * Scatter gather both from an mbuf chain and to a user page set 315 * mapped into kernel address space using sf_bufs. If we're lucky, 316 * each mbuf requires one copy operation, but if page alignment and 317 * mbuf alignment work out less well, we'll be doing two copies per 318 * mbuf. 319 */ 320 offset += sizeof(struct bpf_zbuf_header); 321 page = offset / PAGE_SIZE; 322 poffset = offset % PAGE_SIZE; 323 moffset = 0; 324 while (len > 0) { 325 KASSERT(page < zb->zb_numpages, 326 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d " 327 "np)\n", page, zb->zb_numpages)); 328 KASSERT(m != NULL, 329 ("bpf_zerocopy_append_mbuf: end of mbuf chain")); 330 331 count = min(m->m_len - moffset, len); 332 count = min(count, PAGE_SIZE - poffset); 333 bcopy(mtod(m, u_char *) + moffset, 334 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset, 335 count); 336 poffset += count; 337 if (poffset == PAGE_SIZE) { 338 poffset = 0; 339 page++; 340 } 341 KASSERT(poffset < PAGE_SIZE, 342 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)", 343 poffset)); 344 moffset += count; 345 if (moffset == m->m_len) { 346 m = m->m_next; 347 moffset = 0; 348 } 349 len -= count; 350 } 351 } 352 353 /* 354 * Notification from the BPF framework that a buffer in the store position is 355 * rejecting packets and may be considered full. We mark the buffer as 356 * immutable and assign to userspace so that it is immediately available for 357 * the user process to access. 358 */ 359 void 360 bpf_zerocopy_buffull(struct bpf_d *d) 361 { 362 struct zbuf *zb; 363 364 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 365 ("bpf_zerocopy_buffull: not in zbuf mode")); 366 367 zb = (struct zbuf *)d->bd_sbuf; 368 KASSERT(zb != NULL, ("bpf_zerocopy_buffull: zb == NULL")); 369 370 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) { 371 zb->zb_flags |= ZBUF_FLAG_ASSIGNED; 372 zb->zb_header->bzh_kernel_len = d->bd_slen; 373 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); 374 } 375 } 376 377 /* 378 * Notification from the BPF framework that a buffer has moved into the held 379 * slot on a descriptor. Zero-copy BPF will update the shared page to let 380 * the user process know and flag the buffer as assigned if it hasn't already 381 * been marked assigned due to filling while it was in the store position. 382 * 383 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate 384 * on bd_hbuf and bd_hlen. 385 */ 386 void 387 bpf_zerocopy_bufheld(struct bpf_d *d) 388 { 389 struct zbuf *zb; 390 391 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 392 ("bpf_zerocopy_bufheld: not in zbuf mode")); 393 394 zb = (struct zbuf *)d->bd_hbuf; 395 KASSERT(zb != NULL, ("bpf_zerocopy_bufheld: zb == NULL")); 396 397 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) { 398 zb->zb_flags |= ZBUF_FLAG_ASSIGNED; 399 zb->zb_header->bzh_kernel_len = d->bd_hlen; 400 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); 401 } 402 } 403 404 /* 405 * Notification from the BPF framework that the free buffer has been been 406 * rotated out of the held position to the free position. This happens when 407 * the user acknowledges the held buffer. 408 */ 409 void 410 bpf_zerocopy_buf_reclaimed(struct bpf_d *d) 411 { 412 struct zbuf *zb; 413 414 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 415 ("bpf_zerocopy_reclaim_buf: not in zbuf mode")); 416 417 KASSERT(d->bd_fbuf != NULL, 418 ("bpf_zerocopy_buf_reclaimed: NULL free buf")); 419 zb = (struct zbuf *)d->bd_fbuf; 420 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED; 421 } 422 423 /* 424 * Query from the BPF framework regarding whether the buffer currently in the 425 * held position can be moved to the free position, which can be indicated by 426 * the user process making their generation number equal to the kernel 427 * generation number. 428 */ 429 int 430 bpf_zerocopy_canfreebuf(struct bpf_d *d) 431 { 432 struct zbuf *zb; 433 434 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 435 ("bpf_zerocopy_canfreebuf: not in zbuf mode")); 436 437 zb = (struct zbuf *)d->bd_hbuf; 438 if (zb == NULL) 439 return (0); 440 if (zb->zb_header->bzh_kernel_gen == 441 atomic_load_acq_int(&zb->zb_header->bzh_user_gen)) 442 return (1); 443 return (0); 444 } 445 446 /* 447 * Query from the BPF framework as to whether or not the buffer current in 448 * the store position can actually be written to. This may return false if 449 * the store buffer is assigned to userspace before the hold buffer is 450 * acknowledged. 451 */ 452 int 453 bpf_zerocopy_canwritebuf(struct bpf_d *d) 454 { 455 struct zbuf *zb; 456 457 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 458 ("bpf_zerocopy_canwritebuf: not in zbuf mode")); 459 460 zb = (struct zbuf *)d->bd_sbuf; 461 KASSERT(zb != NULL, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL")); 462 463 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED) 464 return (0); 465 return (1); 466 } 467 468 /* 469 * Free zero copy buffers at request of descriptor. 470 */ 471 void 472 bpf_zerocopy_free(struct bpf_d *d) 473 { 474 struct zbuf *zb; 475 476 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 477 ("bpf_zerocopy_free: not in zbuf mode")); 478 479 zb = (struct zbuf *)d->bd_sbuf; 480 if (zb != NULL) 481 zbuf_free(zb); 482 zb = (struct zbuf *)d->bd_hbuf; 483 if (zb != NULL) 484 zbuf_free(zb); 485 zb = (struct zbuf *)d->bd_fbuf; 486 if (zb != NULL) 487 zbuf_free(zb); 488 } 489 490 /* 491 * Ioctl to return the maximum buffer size. 492 */ 493 int 494 bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 495 { 496 497 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 498 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode")); 499 500 *i = BPF_MAX_PAGES * PAGE_SIZE; 501 return (0); 502 } 503 504 /* 505 * Ioctl to force rotation of the two buffers, if there's any data available. 506 * This can be used by user space to implement timeouts when waiting for a 507 * buffer to fill. 508 */ 509 int 510 bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, 511 struct bpf_zbuf *bz) 512 { 513 struct zbuf *bzh; 514 515 bzero(bz, sizeof(*bz)); 516 BPFD_LOCK(d); 517 if (d->bd_hbuf == NULL && d->bd_slen != 0) { 518 ROTATE_BUFFERS(d); 519 bzh = (struct zbuf *)d->bd_hbuf; 520 bz->bz_bufa = (void *)bzh->zb_uaddr; 521 bz->bz_buflen = d->bd_hlen; 522 } 523 BPFD_UNLOCK(d); 524 return (0); 525 } 526 527 /* 528 * Ioctl to configure zero-copy buffers -- may be done only once. 529 */ 530 int 531 bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d, 532 struct bpf_zbuf *bz) 533 { 534 struct zbuf *zba, *zbb; 535 int error; 536 537 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, 538 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode")); 539 540 /* 541 * Must set both buffers. Cannot clear them. 542 */ 543 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL) 544 return (EINVAL); 545 546 /* 547 * Buffers must have a size greater than 0. Alignment and other size 548 * validity checking is done in zbuf_setup(). 549 */ 550 if (bz->bz_buflen == 0) 551 return (EINVAL); 552 553 /* 554 * Allocate new buffers. 555 */ 556 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen, 557 &zba); 558 if (error) 559 return (error); 560 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen, 561 &zbb); 562 if (error) { 563 zbuf_free(zba); 564 return (error); 565 } 566 567 /* 568 * We only allow buffers to be installed once, so atomically check 569 * that no buffers are currently installed and install new buffers. 570 */ 571 BPFD_LOCK(d); 572 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL || 573 d->bd_bif != NULL) { 574 BPFD_UNLOCK(d); 575 zbuf_free(zba); 576 zbuf_free(zbb); 577 return (EINVAL); 578 } 579 580 /* 581 * Point BPF descriptor at buffers; initialize sbuf as zba so that 582 * it is always filled first in the sequence, per bpf(4). 583 */ 584 d->bd_fbuf = (caddr_t)zbb; 585 d->bd_sbuf = (caddr_t)zba; 586 d->bd_slen = 0; 587 d->bd_hlen = 0; 588 589 /* 590 * We expose only the space left in the buffer after the size of the 591 * shared management region. 592 */ 593 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header); 594 BPFD_UNLOCK(d); 595 return (0); 596 } 597