Lines Matching +full:page +full:- +full:offset

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
60 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
62 * by BPF. Memory is wired since page faults cannot be tolerated in the
84 * non-contiguous pages in the direct map region) so we must implement
85 * scatter-gather copying. One significant mitigating factor is that on
110 * Release a page we've previously wired.
120 * Free an sf_buf with attached page.
133 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
142 for (i = 0; i < zb->zb_numpages; i++) { in zbuf_free()
143 if (zb->zb_pages[i] != NULL) in zbuf_free()
144 zbuf_sfbuf_free(zb->zb_pages[i]); in zbuf_free()
146 free(zb->zb_pages, M_BPF); in zbuf_free()
151 * Given a user pointer to a page of user memory, return an sf_buf for the
152 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
174 * page alignment, size requirements, etc.
187 * User address must be page-aligned. in zbuf_setup()
199 * Length must not exceed per-buffer resource limit. in zbuf_setup()
205 * Allocate the buffer and set up each page with is own sf_buf. in zbuf_setup()
209 zb->zb_uaddr = uaddr; in zbuf_setup()
210 zb->zb_size = len; in zbuf_setup()
211 zb->zb_numpages = len / PAGE_SIZE; in zbuf_setup()
212 zb->zb_pages = malloc(sizeof(struct sf_buf *) * in zbuf_setup()
213 zb->zb_numpages, M_BPF, M_ZERO | M_WAITOK); in zbuf_setup()
214 map = &td->td_proc->p_vmspace->vm_map; in zbuf_setup()
215 for (i = 0; i < zb->zb_numpages; i++) { in zbuf_setup()
216 zb->zb_pages[i] = zbuf_sfbuf_get(map, in zbuf_setup()
218 if (zb->zb_pages[i] == NULL) { in zbuf_setup()
223 zb->zb_header = in zbuf_setup()
224 (struct bpf_zbuf_header *)sf_buf_kva(zb->zb_pages[0]); in zbuf_setup()
225 bzero(zb->zb_header, sizeof(*zb->zb_header)); in zbuf_setup()
239 bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, in bpf_zerocopy_append_bytes() argument
242 u_int count, page, poffset; in bpf_zerocopy_append_bytes() local
246 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_append_bytes()
253 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0, in bpf_zerocopy_append_bytes()
257 * Scatter-gather copy to user pages mapped into kernel address space in bpf_zerocopy_append_bytes()
258 * using sf_bufs: copy up to a page at a time. in bpf_zerocopy_append_bytes()
260 offset += sizeof(struct bpf_zbuf_header); in bpf_zerocopy_append_bytes()
261 page = offset / PAGE_SIZE; in bpf_zerocopy_append_bytes()
262 poffset = offset % PAGE_SIZE; in bpf_zerocopy_append_bytes()
264 KASSERT(page < zb->zb_numpages, ("bpf_zerocopy_append_bytes:" in bpf_zerocopy_append_bytes()
265 " page overflow (%d p %d np)\n", page, zb->zb_numpages)); in bpf_zerocopy_append_bytes()
267 count = min(len, PAGE_SIZE - poffset); in bpf_zerocopy_append_bytes()
268 bcopy(src_bytes, ((u_char *)sf_buf_kva(zb->zb_pages[page])) + in bpf_zerocopy_append_bytes()
273 page++; in bpf_zerocopy_append_bytes()
276 ("bpf_zerocopy_append_bytes: page offset overflow (%d)", in bpf_zerocopy_append_bytes()
278 len -= count; in bpf_zerocopy_append_bytes()
285 * scatter-gather both from mbufs, which may be fragmented over memory, and
291 bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, in bpf_zerocopy_append_mbuf() argument
294 u_int count, moffset, page, poffset; in bpf_zerocopy_append_mbuf() local
298 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_append_mbuf()
305 KASSERT((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0, in bpf_zerocopy_append_mbuf()
309 * Scatter gather both from an mbuf chain and to a user page set in bpf_zerocopy_append_mbuf()
311 * each mbuf requires one copy operation, but if page alignment and in bpf_zerocopy_append_mbuf()
315 offset += sizeof(struct bpf_zbuf_header); in bpf_zerocopy_append_mbuf()
316 page = offset / PAGE_SIZE; in bpf_zerocopy_append_mbuf()
317 poffset = offset % PAGE_SIZE; in bpf_zerocopy_append_mbuf()
320 KASSERT(page < zb->zb_numpages, in bpf_zerocopy_append_mbuf()
321 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d " in bpf_zerocopy_append_mbuf()
322 "np)\n", page, zb->zb_numpages)); in bpf_zerocopy_append_mbuf()
326 count = min(m->m_len - moffset, len); in bpf_zerocopy_append_mbuf()
327 count = min(count, PAGE_SIZE - poffset); in bpf_zerocopy_append_mbuf()
329 ((u_char *)sf_buf_kva(zb->zb_pages[page])) + poffset, in bpf_zerocopy_append_mbuf()
334 page++; in bpf_zerocopy_append_mbuf()
337 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)", in bpf_zerocopy_append_mbuf()
340 if (moffset == m->m_len) { in bpf_zerocopy_append_mbuf()
341 m = m->m_next; in bpf_zerocopy_append_mbuf()
344 len -= count; in bpf_zerocopy_append_mbuf()
359 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_buffull()
362 zb = (struct zbuf *)d->bd_sbuf; in bpf_zerocopy_buffull()
365 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) { in bpf_zerocopy_buffull()
366 zb->zb_flags |= ZBUF_FLAG_ASSIGNED; in bpf_zerocopy_buffull()
367 zb->zb_header->bzh_kernel_len = d->bd_slen; in bpf_zerocopy_buffull()
368 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); in bpf_zerocopy_buffull()
374 * slot on a descriptor. Zero-copy BPF will update the shared page to let
386 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_bufheld()
389 zb = (struct zbuf *)d->bd_hbuf; in bpf_zerocopy_bufheld()
392 if ((zb->zb_flags & ZBUF_FLAG_ASSIGNED) == 0) { in bpf_zerocopy_bufheld()
393 zb->zb_flags |= ZBUF_FLAG_ASSIGNED; in bpf_zerocopy_bufheld()
394 zb->zb_header->bzh_kernel_len = d->bd_hlen; in bpf_zerocopy_bufheld()
395 atomic_add_rel_int(&zb->zb_header->bzh_kernel_gen, 1); in bpf_zerocopy_bufheld()
409 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_buf_reclaimed()
412 KASSERT(d->bd_fbuf != NULL, in bpf_zerocopy_buf_reclaimed()
414 zb = (struct zbuf *)d->bd_fbuf; in bpf_zerocopy_buf_reclaimed()
415 zb->zb_flags &= ~ZBUF_FLAG_ASSIGNED; in bpf_zerocopy_buf_reclaimed()
429 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_canfreebuf()
432 zb = (struct zbuf *)d->bd_hbuf; in bpf_zerocopy_canfreebuf()
435 if (zb->zb_header->bzh_kernel_gen == in bpf_zerocopy_canfreebuf()
436 atomic_load_acq_int(&zb->zb_header->bzh_user_gen)) in bpf_zerocopy_canfreebuf()
452 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_canwritebuf()
455 zb = (struct zbuf *)d->bd_sbuf; in bpf_zerocopy_canwritebuf()
458 if (zb->zb_flags & ZBUF_FLAG_ASSIGNED) in bpf_zerocopy_canwritebuf()
471 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_free()
474 zb = (struct zbuf *)d->bd_sbuf; in bpf_zerocopy_free()
477 zb = (struct zbuf *)d->bd_hbuf; in bpf_zerocopy_free()
480 zb = (struct zbuf *)d->bd_fbuf; in bpf_zerocopy_free()
492 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_ioctl_getzmax()
512 if (d->bd_hbuf == NULL && d->bd_slen != 0) { in bpf_zerocopy_ioctl_rotzbuf()
514 bzh = (struct zbuf *)d->bd_hbuf; in bpf_zerocopy_ioctl_rotzbuf()
515 bz->bz_bufa = (void *)bzh->zb_uaddr; in bpf_zerocopy_ioctl_rotzbuf()
516 bz->bz_buflen = d->bd_hlen; in bpf_zerocopy_ioctl_rotzbuf()
523 * Ioctl to configure zero-copy buffers -- may be done only once.
532 KASSERT(d->bd_bufmode == BPF_BUFMODE_ZBUF, in bpf_zerocopy_ioctl_setzbuf()
538 if (bz->bz_bufa == NULL || bz->bz_bufb == NULL) in bpf_zerocopy_ioctl_setzbuf()
545 if (bz->bz_buflen == 0) in bpf_zerocopy_ioctl_setzbuf()
551 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufa, bz->bz_buflen, in bpf_zerocopy_ioctl_setzbuf()
555 error = zbuf_setup(td, (vm_offset_t)bz->bz_bufb, bz->bz_buflen, in bpf_zerocopy_ioctl_setzbuf()
567 if (d->bd_hbuf != NULL || d->bd_sbuf != NULL || d->bd_fbuf != NULL || in bpf_zerocopy_ioctl_setzbuf()
568 d->bd_bif != NULL) { in bpf_zerocopy_ioctl_setzbuf()
579 d->bd_fbuf = (caddr_t)zbb; in bpf_zerocopy_ioctl_setzbuf()
580 d->bd_sbuf = (caddr_t)zba; in bpf_zerocopy_ioctl_setzbuf()
581 d->bd_slen = 0; in bpf_zerocopy_ioctl_setzbuf()
582 d->bd_hlen = 0; in bpf_zerocopy_ioctl_setzbuf()
588 d->bd_bufsize = bz->bz_buflen - sizeof(struct bpf_zbuf_header); in bpf_zerocopy_ioctl_setzbuf()