xref: /freebsd/sys/opencrypto/criov.c (revision 0333fad1b7e042eea0fe8348c6fde8ee55538d63)
1 /*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999 Theo de Raadt
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *   notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *   notice, this list of conditions and the following disclaimer in the
14  *   documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *   derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/proc.h>
36 #include <sys/errno.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/uio.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/sdt.h>
44 
45 #include <machine/vmparam.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/pmap.h>
50 
51 #include <opencrypto/cryptodev.h>
52 
53 SDT_PROVIDER_DECLARE(opencrypto);
54 
55 /*
56  * These macros are only for avoiding code duplication, as we need to skip
57  * given number of bytes in the same way in several functions below.
58  */
59 #define	CUIO_SKIP()	do {						\
60 	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
61 	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
62 	while (off > 0) {						\
63 		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
64 		if (off < iov->iov_len)					\
65 			break;						\
66 		off -= iov->iov_len;					\
67 		iol--;							\
68 		iov++;							\
69 	}								\
70 } while (0)
71 
72 #define CVM_PAGE_SKIP()	do {					\
73 	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
74 	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
75 	while (off > 0) {						\
76 		if (off < PAGE_SIZE)					\
77 			break;						\
78 		processed += PAGE_SIZE - off;				\
79 		off -= PAGE_SIZE - off;					\
80 		pages++;						\
81 	}								\
82 } while (0)
83 
84 static void
85 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
86 {
87 	struct iovec *iov = uio->uio_iov;
88 	int iol = uio->uio_iovcnt;
89 	unsigned count;
90 
91 	CUIO_SKIP();
92 	while (len > 0) {
93 		KASSERT(iol >= 0, ("%s: empty", __func__));
94 		count = min(iov->iov_len - off, len);
95 		bcopy(((caddr_t)iov->iov_base) + off, cp, count);
96 		len -= count;
97 		cp += count;
98 		off = 0;
99 		iol--;
100 		iov++;
101 	}
102 }
103 
104 static void
105 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp)
106 {
107 	struct iovec *iov = uio->uio_iov;
108 	int iol = uio->uio_iovcnt;
109 	unsigned count;
110 
111 	CUIO_SKIP();
112 	while (len > 0) {
113 		KASSERT(iol >= 0, ("%s: empty", __func__));
114 		count = min(iov->iov_len - off, len);
115 		bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
116 		len -= count;
117 		cp += count;
118 		off = 0;
119 		iol--;
120 		iov++;
121 	}
122 }
123 
124 /*
125  * Return the index and offset of location in iovec list.
126  */
127 static int
128 cuio_getptr(struct uio *uio, int loc, int *off)
129 {
130 	int ind, len;
131 
132 	ind = 0;
133 	while (loc >= 0 && ind < uio->uio_iovcnt) {
134 		len = uio->uio_iov[ind].iov_len;
135 		if (len > loc) {
136 	    		*off = loc;
137 	    		return (ind);
138 		}
139 		loc -= len;
140 		ind++;
141 	}
142 
143 	if (ind > 0 && loc == 0) {
144 		ind--;
145 		*off = uio->uio_iov[ind].iov_len;
146 		return (ind);
147 	}
148 
149 	return (-1);
150 }
151 
152 #if CRYPTO_MAY_HAVE_VMPAGE
153 /*
154  * Apply function f to the data in a vm_page_t list starting "off" bytes from
155  * the beginning, continuing for "len" bytes.
156  */
157 static int
158 cvm_page_apply(vm_page_t *pages, int off, int len,
159     int (*f)(void *, const void *, u_int), void *arg)
160 {
161 	int processed = 0;
162 	unsigned count;
163 	int rval;
164 
165 	CVM_PAGE_SKIP();
166 	while (len > 0) {
167 		char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages));
168 		count = min(PAGE_SIZE - off, len);
169 		rval = (*f)(arg, kaddr + off, count);
170 		if (rval)
171 			return (rval);
172 		len -= count;
173 		processed += count;
174 		off = 0;
175 		pages++;
176 	}
177 	return (0);
178 }
179 
180 static inline void *
181 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len)
182 {
183 	if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE)
184 		return (NULL);
185 
186 	pages += (skip / PAGE_SIZE);
187 	skip -= rounddown(skip, PAGE_SIZE);
188 	return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip);
189 }
190 
191 /*
192  * Copy len bytes of data from the vm_page_t array, skipping the first off
193  * bytes, into the pointer cp.  Return the number of bytes skipped and copied.
194  * Does not verify the length of the array.
195  */
196 static int
197 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp)
198 {
199 	int processed = 0;
200 	unsigned count;
201 
202 	CVM_PAGE_SKIP();
203 	while (len > 0) {
204 		count = min(PAGE_SIZE - off, len);
205 		bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off,
206 		    count);
207 		len -= count;
208 		cp += count;
209 		processed += count;
210 		off = 0;
211 		pages++;
212 	}
213 	return (processed);
214 }
215 
216 /*
217  * Copy len bytes of data from the pointer cp into the vm_page_t array,
218  * skipping the first off bytes, Return the number of bytes skipped and copied.
219  * Does not verify the length of the array.
220  */
221 static int
222 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp)
223 {
224 	int processed = 0;
225 	unsigned count;
226 
227 	CVM_PAGE_SKIP();
228 	while (len > 0) {
229 		count = min(PAGE_SIZE - off, len);
230 		bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp,
231 		    count);
232 		len -= count;
233 		cp += count;
234 		processed += count;
235 		off = 0;
236 		pages++;
237 	}
238 	return processed;
239 }
240 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
241 
242 /*
243  * Given a starting page in an m_epg, determine the length of the
244  * current physically contiguous segment.
245  */
246 static __inline size_t
247 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen)
248 {
249 	size_t len;
250 	u_int i;
251 
252 	len = pglen;
253 	for (i = idx + 1; i < m->m_epg_npgs; i++) {
254 		if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i])
255 			break;
256 		len += m_epg_pagelen(m, i, 0);
257 	}
258 	return (len);
259 }
260 
261 static void *
262 m_epg_segment(struct mbuf *m, size_t offset, size_t *len)
263 {
264 	u_int i, pglen, pgoff;
265 
266 	offset += mtod(m, vm_offset_t);
267 	if (offset < m->m_epg_hdrlen) {
268 		*len = m->m_epg_hdrlen - offset;
269 		return (m->m_epg_hdr + offset);
270 	}
271 	offset -= m->m_epg_hdrlen;
272 	pgoff = m->m_epg_1st_off;
273 	for (i = 0; i < m->m_epg_npgs; i++) {
274 		pglen = m_epg_pagelen(m, i, pgoff);
275 		if (offset < pglen) {
276 			*len = m_epg_pages_extent(m, i, pglen) - offset;
277 			return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff +
278 			    offset));
279 		}
280 		offset -= pglen;
281 		pgoff = 0;
282 	}
283 	KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer",
284 	    __func__));
285 	*len = m->m_epg_trllen - offset;
286 	return (m->m_epg_trail + offset);
287 }
288 
289 static __inline void *
290 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
291 {
292 	void *base;
293 	size_t seglen;
294 
295 	base = m_epg_segment(m, skip, &seglen);
296 	if (len > seglen)
297 		return (NULL);
298 	return (base);
299 }
300 
301 void
302 crypto_cursor_init(struct crypto_buffer_cursor *cc,
303     const struct crypto_buffer *cb)
304 {
305 	memset(cc, 0, sizeof(*cc));
306 	cc->cc_type = cb->cb_type;
307 	switch (cc->cc_type) {
308 	case CRYPTO_BUF_CONTIG:
309 		cc->cc_buf = cb->cb_buf;
310 		cc->cc_buf_len = cb->cb_buf_len;
311 		break;
312 	case CRYPTO_BUF_MBUF:
313 	case CRYPTO_BUF_SINGLE_MBUF:
314 		cc->cc_mbuf = cb->cb_mbuf;
315 		break;
316 	case CRYPTO_BUF_VMPAGE:
317 		cc->cc_vmpage = cb->cb_vm_page;
318 		cc->cc_buf_len = cb->cb_vm_page_len;
319 		cc->cc_offset = cb->cb_vm_page_offset;
320 		break;
321 	case CRYPTO_BUF_UIO:
322 		cc->cc_iov = cb->cb_uio->uio_iov;
323 		break;
324 	default:
325 #ifdef INVARIANTS
326 		panic("%s: invalid buffer type %d", __func__, cb->cb_type);
327 #endif
328 		break;
329 	}
330 }
331 
332 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t");
333 
334 void
335 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount)
336 {
337 	size_t remain;
338 
339 	switch (cc->cc_type) {
340 	case CRYPTO_BUF_CONTIG:
341 		MPASS(cc->cc_buf_len >= amount);
342 		cc->cc_buf += amount;
343 		cc->cc_buf_len -= amount;
344 		break;
345 	case CRYPTO_BUF_MBUF:
346 		for (;;) {
347 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
348 			if (amount < remain) {
349 				cc->cc_offset += amount;
350 				break;
351 			}
352 			amount -= remain;
353 			cc->cc_mbuf = cc->cc_mbuf->m_next;
354 			cc->cc_offset = 0;
355 			if (amount == 0)
356 				break;
357 		}
358 		break;
359 	case CRYPTO_BUF_SINGLE_MBUF:
360 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount);
361 		cc->cc_offset += amount;
362 		break;
363 	case CRYPTO_BUF_VMPAGE:
364 		for (;;) {
365 			SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage,
366 			    cc, amount);
367 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
368 			if (amount < remain) {
369 				cc->cc_buf_len -= amount;
370 				cc->cc_offset += amount;
371 				break;
372 			}
373 			cc->cc_buf_len -= remain;
374 			amount -= remain;
375 			cc->cc_vmpage++;
376 			cc->cc_offset = 0;
377 			if (amount == 0 || cc->cc_buf_len == 0)
378 				break;
379 		}
380 		break;
381 	case CRYPTO_BUF_UIO:
382 		for (;;) {
383 			remain = cc->cc_iov->iov_len - cc->cc_offset;
384 			if (amount < remain) {
385 				cc->cc_offset += amount;
386 				break;
387 			}
388 			amount -= remain;
389 			cc->cc_iov++;
390 			cc->cc_offset = 0;
391 			if (amount == 0)
392 				break;
393 		}
394 		break;
395 	default:
396 #ifdef INVARIANTS
397 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
398 #endif
399 		break;
400 	}
401 }
402 
403 void *
404 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len)
405 {
406 	switch (cc->cc_type) {
407 	case CRYPTO_BUF_CONTIG:
408 		*len = cc->cc_buf_len;
409 		return (cc->cc_buf);
410 	case CRYPTO_BUF_MBUF:
411 	case CRYPTO_BUF_SINGLE_MBUF:
412 		if (cc->cc_mbuf == NULL) {
413 			*len = 0;
414 			return (NULL);
415 		}
416 		if (cc->cc_mbuf->m_flags & M_EXTPG)
417 			return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len));
418 		*len = cc->cc_mbuf->m_len - cc->cc_offset;
419 		return (mtod(cc->cc_mbuf, char *) + cc->cc_offset);
420 	case CRYPTO_BUF_VMPAGE:
421 		*len = PAGE_SIZE - cc->cc_offset;
422 		return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
423 		    *cc->cc_vmpage)) + cc->cc_offset);
424 	case CRYPTO_BUF_UIO:
425 		*len = cc->cc_iov->iov_len - cc->cc_offset;
426 		return ((char *)cc->cc_iov->iov_base + cc->cc_offset);
427 	default:
428 #ifdef INVARIANTS
429 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
430 #endif
431 		*len = 0;
432 		return (NULL);
433 	}
434 }
435 
436 void *
437 crypto_cursor_segbase(struct crypto_buffer_cursor *cc)
438 {
439 	size_t len;
440 
441 	return (crypto_cursor_segment(cc, &len));
442 }
443 
444 size_t
445 crypto_cursor_seglen(struct crypto_buffer_cursor *cc)
446 {
447 	size_t len;
448 
449 	crypto_cursor_segment(cc, &len);
450 	return (len);
451 }
452 
453 void
454 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size,
455     const void *vsrc)
456 {
457 	size_t remain, todo;
458 	const char *src;
459 	char *dst;
460 
461 	src = vsrc;
462 	switch (cc->cc_type) {
463 	case CRYPTO_BUF_CONTIG:
464 		MPASS(cc->cc_buf_len >= size);
465 		memcpy(cc->cc_buf, src, size);
466 		cc->cc_buf += size;
467 		cc->cc_buf_len -= size;
468 		break;
469 	case CRYPTO_BUF_MBUF:
470 		for (;;) {
471 			/*
472 			 * This uses m_copyback() for individual
473 			 * mbufs so that cc_mbuf and cc_offset are
474 			 * updated.
475 			 */
476 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
477 			todo = MIN(remain, size);
478 			m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src);
479 			src += todo;
480 			if (todo < remain) {
481 				cc->cc_offset += todo;
482 				break;
483 			}
484 			size -= todo;
485 			cc->cc_mbuf = cc->cc_mbuf->m_next;
486 			cc->cc_offset = 0;
487 			if (size == 0)
488 				break;
489 		}
490 		break;
491 	case CRYPTO_BUF_SINGLE_MBUF:
492 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
493 		m_copyback(cc->cc_mbuf, cc->cc_offset, size, src);
494 		cc->cc_offset += size;
495 		break;
496 	case CRYPTO_BUF_VMPAGE:
497 		for (;;) {
498 			dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
499 			    *cc->cc_vmpage)) + cc->cc_offset;
500 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
501 			todo = MIN(remain, size);
502 			memcpy(dst, src, todo);
503 			src += todo;
504 			cc->cc_buf_len -= todo;
505 			if (todo < remain) {
506 				cc->cc_offset += todo;
507 				break;
508 			}
509 			size -= todo;
510 			cc->cc_vmpage++;
511 			cc->cc_offset = 0;
512 			if (size == 0)
513 				break;
514 		}
515 		break;
516 	case CRYPTO_BUF_UIO:
517 		for (;;) {
518 			dst = (char *)cc->cc_iov->iov_base + cc->cc_offset;
519 			remain = cc->cc_iov->iov_len - cc->cc_offset;
520 			todo = MIN(remain, size);
521 			memcpy(dst, src, todo);
522 			src += todo;
523 			if (todo < remain) {
524 				cc->cc_offset += todo;
525 				break;
526 			}
527 			size -= todo;
528 			cc->cc_iov++;
529 			cc->cc_offset = 0;
530 			if (size == 0)
531 				break;
532 		}
533 		break;
534 	default:
535 #ifdef INVARIANTS
536 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
537 #endif
538 		break;
539 	}
540 }
541 
542 void
543 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst)
544 {
545 	size_t remain, todo;
546 	const char *src;
547 	char *dst;
548 
549 	dst = vdst;
550 	switch (cc->cc_type) {
551 	case CRYPTO_BUF_CONTIG:
552 		MPASS(cc->cc_buf_len >= size);
553 		memcpy(dst, cc->cc_buf, size);
554 		cc->cc_buf += size;
555 		cc->cc_buf_len -= size;
556 		break;
557 	case CRYPTO_BUF_MBUF:
558 		for (;;) {
559 			/*
560 			 * This uses m_copydata() for individual
561 			 * mbufs so that cc_mbuf and cc_offset are
562 			 * updated.
563 			 */
564 			remain = cc->cc_mbuf->m_len - cc->cc_offset;
565 			todo = MIN(remain, size);
566 			m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst);
567 			dst += todo;
568 			if (todo < remain) {
569 				cc->cc_offset += todo;
570 				break;
571 			}
572 			size -= todo;
573 			cc->cc_mbuf = cc->cc_mbuf->m_next;
574 			cc->cc_offset = 0;
575 			if (size == 0)
576 				break;
577 		}
578 		break;
579 	case CRYPTO_BUF_SINGLE_MBUF:
580 		MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size);
581 		m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst);
582 		cc->cc_offset += size;
583 		break;
584 	case CRYPTO_BUF_VMPAGE:
585 		for (;;) {
586 			src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(
587 			    *cc->cc_vmpage)) + cc->cc_offset;
588 			remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len);
589 			todo = MIN(remain, size);
590 			memcpy(dst, src, todo);
591 			src += todo;
592 			cc->cc_buf_len -= todo;
593 			if (todo < remain) {
594 				cc->cc_offset += todo;
595 				break;
596 			}
597 			size -= todo;
598 			cc->cc_vmpage++;
599 			cc->cc_offset = 0;
600 			if (size == 0)
601 				break;
602 		}
603 		break;
604 	case CRYPTO_BUF_UIO:
605 		for (;;) {
606 			src = (const char *)cc->cc_iov->iov_base +
607 			    cc->cc_offset;
608 			remain = cc->cc_iov->iov_len - cc->cc_offset;
609 			todo = MIN(remain, size);
610 			memcpy(dst, src, todo);
611 			dst += todo;
612 			if (todo < remain) {
613 				cc->cc_offset += todo;
614 				break;
615 			}
616 			size -= todo;
617 			cc->cc_iov++;
618 			cc->cc_offset = 0;
619 			if (size == 0)
620 				break;
621 		}
622 		break;
623 	default:
624 #ifdef INVARIANTS
625 		panic("%s: invalid buffer type %d", __func__, cc->cc_type);
626 #endif
627 		break;
628 	}
629 }
630 
631 /*
632  * To avoid advancing 'cursor', make a local copy that gets advanced
633  * instead.
634  */
635 void
636 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size,
637     void *vdst)
638 {
639 	struct crypto_buffer_cursor copy;
640 
641 	copy = *cc;
642 	crypto_cursor_copydata(&copy, size, vdst);
643 }
644 
645 /*
646  * Apply function f to the data in an iovec list starting "off" bytes from
647  * the beginning, continuing for "len" bytes.
648  */
649 static int
650 cuio_apply(struct uio *uio, int off, int len,
651     int (*f)(void *, const void *, u_int), void *arg)
652 {
653 	struct iovec *iov = uio->uio_iov;
654 	int iol = uio->uio_iovcnt;
655 	unsigned count;
656 	int rval;
657 
658 	CUIO_SKIP();
659 	while (len > 0) {
660 		KASSERT(iol >= 0, ("%s: empty", __func__));
661 		count = min(iov->iov_len - off, len);
662 		rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
663 		if (rval)
664 			return (rval);
665 		len -= count;
666 		off = 0;
667 		iol--;
668 		iov++;
669 	}
670 	return (0);
671 }
672 
673 void
674 crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
675 {
676 	struct crypto_buffer *cb;
677 
678 	if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE)
679 		cb = &crp->crp_obuf;
680 	else
681 		cb = &crp->crp_buf;
682 	switch (cb->cb_type) {
683 	case CRYPTO_BUF_MBUF:
684 	case CRYPTO_BUF_SINGLE_MBUF:
685 		m_copyback(cb->cb_mbuf, off, size, src);
686 		break;
687 #if CRYPTO_MAY_HAVE_VMPAGE
688 	case CRYPTO_BUF_VMPAGE:
689 		MPASS(size <= cb->cb_vm_page_len);
690 		MPASS(size + off <=
691 		    cb->cb_vm_page_len + cb->cb_vm_page_offset);
692 		cvm_page_copyback(cb->cb_vm_page,
693 		    off + cb->cb_vm_page_offset, size, src);
694 		break;
695 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
696 	case CRYPTO_BUF_UIO:
697 		cuio_copyback(cb->cb_uio, off, size, src);
698 		break;
699 	case CRYPTO_BUF_CONTIG:
700 		MPASS(off + size <= cb->cb_buf_len);
701 		bcopy(src, cb->cb_buf + off, size);
702 		break;
703 	default:
704 #ifdef INVARIANTS
705 		panic("invalid crp buf type %d", cb->cb_type);
706 #endif
707 		break;
708 	}
709 }
710 
711 void
712 crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
713 {
714 
715 	switch (crp->crp_buf.cb_type) {
716 	case CRYPTO_BUF_MBUF:
717 	case CRYPTO_BUF_SINGLE_MBUF:
718 		m_copydata(crp->crp_buf.cb_mbuf, off, size, dst);
719 		break;
720 #if CRYPTO_MAY_HAVE_VMPAGE
721 	case CRYPTO_BUF_VMPAGE:
722 		MPASS(size <= crp->crp_buf.cb_vm_page_len);
723 		MPASS(size + off <= crp->crp_buf.cb_vm_page_len +
724 		    crp->crp_buf.cb_vm_page_offset);
725 		cvm_page_copydata(crp->crp_buf.cb_vm_page,
726 		    off + crp->crp_buf.cb_vm_page_offset, size, dst);
727 		break;
728 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
729 	case CRYPTO_BUF_UIO:
730 		cuio_copydata(crp->crp_buf.cb_uio, off, size, dst);
731 		break;
732 	case CRYPTO_BUF_CONTIG:
733 		MPASS(off + size <= crp->crp_buf.cb_buf_len);
734 		bcopy(crp->crp_buf.cb_buf + off, dst, size);
735 		break;
736 	default:
737 #ifdef INVARIANTS
738 		panic("invalid crp buf type %d", crp->crp_buf.cb_type);
739 #endif
740 		break;
741 	}
742 }
743 
744 int
745 crypto_apply_buf(struct crypto_buffer *cb, int off, int len,
746     int (*f)(void *, const void *, u_int), void *arg)
747 {
748 	int error;
749 
750 	switch (cb->cb_type) {
751 	case CRYPTO_BUF_MBUF:
752 	case CRYPTO_BUF_SINGLE_MBUF:
753 		error = m_apply(cb->cb_mbuf, off, len,
754 		    (int (*)(void *, void *, u_int))f, arg);
755 		break;
756 	case CRYPTO_BUF_UIO:
757 		error = cuio_apply(cb->cb_uio, off, len, f, arg);
758 		break;
759 #if CRYPTO_MAY_HAVE_VMPAGE
760 	case CRYPTO_BUF_VMPAGE:
761 		error = cvm_page_apply(cb->cb_vm_page,
762 		    off + cb->cb_vm_page_offset, len, f, arg);
763 		break;
764 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
765 	case CRYPTO_BUF_CONTIG:
766 		MPASS(off + len <= cb->cb_buf_len);
767 		error = (*f)(arg, cb->cb_buf + off, len);
768 		break;
769 	default:
770 #ifdef INVARIANTS
771 		panic("invalid crypto buf type %d", cb->cb_type);
772 #endif
773 		error = 0;
774 		break;
775 	}
776 	return (error);
777 }
778 
779 int
780 crypto_apply(struct cryptop *crp, int off, int len,
781     int (*f)(void *, const void *, u_int), void *arg)
782 {
783 	return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg));
784 }
785 
786 static inline void *
787 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len)
788 {
789 	int rel_off;
790 
791 	MPASS(skip <= INT_MAX);
792 
793 	m = m_getptr(m, (int)skip, &rel_off);
794 	if (m == NULL)
795 		return (NULL);
796 
797 	MPASS(rel_off >= 0);
798 	skip = rel_off;
799 	if (skip + len > m->m_len)
800 		return (NULL);
801 
802 	if (m->m_flags & M_EXTPG)
803 		return (m_epg_contiguous_subsegment(m, skip, len));
804 	return (mtod(m, char*) + skip);
805 }
806 
807 static inline void *
808 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len)
809 {
810 	int rel_off, idx;
811 
812 	MPASS(skip <= INT_MAX);
813 	idx = cuio_getptr(uio, (int)skip, &rel_off);
814 	if (idx < 0)
815 		return (NULL);
816 
817 	MPASS(rel_off >= 0);
818 	skip = rel_off;
819 	if (skip + len > uio->uio_iov[idx].iov_len)
820 		return (NULL);
821 	return ((char *)uio->uio_iov[idx].iov_base + skip);
822 }
823 
824 void *
825 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip,
826     size_t len)
827 {
828 
829 	switch (cb->cb_type) {
830 	case CRYPTO_BUF_MBUF:
831 	case CRYPTO_BUF_SINGLE_MBUF:
832 		return (m_contiguous_subsegment(cb->cb_mbuf, skip, len));
833 	case CRYPTO_BUF_UIO:
834 		return (cuio_contiguous_segment(cb->cb_uio, skip, len));
835 #if CRYPTO_MAY_HAVE_VMPAGE
836 	case CRYPTO_BUF_VMPAGE:
837 		MPASS(skip + len <= cb->cb_vm_page_len);
838 		return (cvm_page_contiguous_segment(cb->cb_vm_page,
839 		    skip + cb->cb_vm_page_offset, len));
840 #endif /* CRYPTO_MAY_HAVE_VMPAGE */
841 	case CRYPTO_BUF_CONTIG:
842 		MPASS(skip + len <= cb->cb_buf_len);
843 		return (cb->cb_buf + skip);
844 	default:
845 #ifdef INVARIANTS
846 		panic("invalid crp buf type %d", cb->cb_type);
847 #endif
848 		return (NULL);
849 	}
850 }
851 
852 void *
853 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
854 {
855 	return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len));
856 }
857