1 /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 Theo de Raadt 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/proc.h> 36 #include <sys/errno.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/mbuf.h> 40 #include <sys/uio.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/sdt.h> 44 45 #include <machine/vmparam.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_page.h> 49 #include <vm/pmap.h> 50 51 #include <opencrypto/cryptodev.h> 52 53 SDT_PROVIDER_DECLARE(opencrypto); 54 55 /* 56 * These macros are only for avoiding code duplication, as we need to skip 57 * given number of bytes in the same way in several functions below. 58 */ 59 #define CUIO_SKIP() do { \ 60 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ 61 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ 62 while (off > 0) { \ 63 KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \ 64 if (off < iov->iov_len) \ 65 break; \ 66 off -= iov->iov_len; \ 67 iol--; \ 68 iov++; \ 69 } \ 70 } while (0) 71 72 #define CVM_PAGE_SKIP() do { \ 73 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ 74 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ 75 while (off > 0) { \ 76 if (off < PAGE_SIZE) \ 77 break; \ 78 processed += PAGE_SIZE - off; \ 79 off -= PAGE_SIZE - off; \ 80 pages++; \ 81 } \ 82 } while (0) 83 84 static void 85 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) 86 { 87 struct iovec *iov = uio->uio_iov; 88 int iol __diagused = uio->uio_iovcnt; 89 unsigned count; 90 91 CUIO_SKIP(); 92 while (len > 0) { 93 KASSERT(iol >= 0, ("%s: empty", __func__)); 94 count = min(iov->iov_len - off, len); 95 bcopy(((caddr_t)iov->iov_base) + off, cp, count); 96 len -= count; 97 cp += count; 98 off = 0; 99 iol--; 100 iov++; 101 } 102 } 103 104 static void 105 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) 106 { 107 struct iovec *iov = uio->uio_iov; 108 int iol __diagused = uio->uio_iovcnt; 109 unsigned count; 110 111 CUIO_SKIP(); 112 while (len > 0) { 113 KASSERT(iol >= 0, ("%s: empty", __func__)); 114 count = min(iov->iov_len - off, len); 115 bcopy(cp, ((caddr_t)iov->iov_base) + off, count); 116 len -= count; 117 cp += count; 118 off = 0; 119 iol--; 120 iov++; 121 } 122 } 123 124 /* 125 * Return the index and offset of location in iovec list. 126 */ 127 static int 128 cuio_getptr(struct uio *uio, int loc, int *off) 129 { 130 int ind, len; 131 132 ind = 0; 133 while (loc >= 0 && ind < uio->uio_iovcnt) { 134 len = uio->uio_iov[ind].iov_len; 135 if (len > loc) { 136 *off = loc; 137 return (ind); 138 } 139 loc -= len; 140 ind++; 141 } 142 143 if (ind > 0 && loc == 0) { 144 ind--; 145 *off = uio->uio_iov[ind].iov_len; 146 return (ind); 147 } 148 149 return (-1); 150 } 151 152 #if CRYPTO_MAY_HAVE_VMPAGE 153 /* 154 * Apply function f to the data in a vm_page_t list starting "off" bytes from 155 * the beginning, continuing for "len" bytes. 156 */ 157 static int 158 cvm_page_apply(vm_page_t *pages, int off, int len, 159 int (*f)(void *, const void *, u_int), void *arg) 160 { 161 int processed __unused; 162 unsigned count; 163 int rval; 164 165 processed = 0; 166 CVM_PAGE_SKIP(); 167 while (len > 0) { 168 char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)); 169 count = min(PAGE_SIZE - off, len); 170 rval = (*f)(arg, kaddr + off, count); 171 if (rval) 172 return (rval); 173 len -= count; 174 processed += count; 175 off = 0; 176 pages++; 177 } 178 return (0); 179 } 180 181 static inline void * 182 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len) 183 { 184 if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) 185 return (NULL); 186 187 pages += (skip / PAGE_SIZE); 188 skip -= rounddown(skip, PAGE_SIZE); 189 return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip); 190 } 191 192 /* 193 * Copy len bytes of data from the vm_page_t array, skipping the first off 194 * bytes, into the pointer cp. Return the number of bytes skipped and copied. 195 * Does not verify the length of the array. 196 */ 197 static int 198 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp) 199 { 200 int processed = 0; 201 unsigned count; 202 203 CVM_PAGE_SKIP(); 204 while (len > 0) { 205 count = min(PAGE_SIZE - off, len); 206 bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off, 207 count); 208 len -= count; 209 cp += count; 210 processed += count; 211 off = 0; 212 pages++; 213 } 214 return (processed); 215 } 216 217 /* 218 * Copy len bytes of data from the pointer cp into the vm_page_t array, 219 * skipping the first off bytes, Return the number of bytes skipped and copied. 220 * Does not verify the length of the array. 221 */ 222 static int 223 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp) 224 { 225 int processed = 0; 226 unsigned count; 227 228 CVM_PAGE_SKIP(); 229 while (len > 0) { 230 count = min(PAGE_SIZE - off, len); 231 bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp, 232 count); 233 len -= count; 234 cp += count; 235 processed += count; 236 off = 0; 237 pages++; 238 } 239 return processed; 240 } 241 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 242 243 /* 244 * Given a starting page in an m_epg, determine the length of the 245 * current physically contiguous segment. 246 */ 247 static __inline size_t 248 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen) 249 { 250 size_t len; 251 u_int i; 252 253 len = pglen; 254 for (i = idx + 1; i < m->m_epg_npgs; i++) { 255 if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i]) 256 break; 257 len += m_epg_pagelen(m, i, 0); 258 } 259 return (len); 260 } 261 262 static void * 263 m_epg_segment(struct mbuf *m, size_t offset, size_t *len) 264 { 265 u_int i, pglen, pgoff; 266 267 offset += mtod(m, vm_offset_t); 268 if (offset < m->m_epg_hdrlen) { 269 *len = m->m_epg_hdrlen - offset; 270 return (m->m_epg_hdr + offset); 271 } 272 offset -= m->m_epg_hdrlen; 273 pgoff = m->m_epg_1st_off; 274 for (i = 0; i < m->m_epg_npgs; i++) { 275 pglen = m_epg_pagelen(m, i, pgoff); 276 if (offset < pglen) { 277 *len = m_epg_pages_extent(m, i, pglen) - offset; 278 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff + 279 offset)); 280 } 281 offset -= pglen; 282 pgoff = 0; 283 } 284 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer", 285 __func__)); 286 *len = m->m_epg_trllen - offset; 287 return (m->m_epg_trail + offset); 288 } 289 290 static __inline void * 291 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len) 292 { 293 void *base; 294 size_t seglen; 295 296 base = m_epg_segment(m, skip, &seglen); 297 if (len > seglen) 298 return (NULL); 299 return (base); 300 } 301 302 void 303 crypto_cursor_init(struct crypto_buffer_cursor *cc, 304 const struct crypto_buffer *cb) 305 { 306 memset(cc, 0, sizeof(*cc)); 307 cc->cc_type = cb->cb_type; 308 switch (cc->cc_type) { 309 case CRYPTO_BUF_CONTIG: 310 cc->cc_buf = cb->cb_buf; 311 cc->cc_buf_len = cb->cb_buf_len; 312 break; 313 case CRYPTO_BUF_MBUF: 314 case CRYPTO_BUF_SINGLE_MBUF: 315 cc->cc_mbuf = cb->cb_mbuf; 316 break; 317 case CRYPTO_BUF_VMPAGE: 318 cc->cc_vmpage = cb->cb_vm_page; 319 cc->cc_buf_len = cb->cb_vm_page_len; 320 cc->cc_offset = cb->cb_vm_page_offset; 321 break; 322 case CRYPTO_BUF_UIO: 323 cc->cc_iov = cb->cb_uio->uio_iov; 324 cc->cc_buf_len = cb->cb_uio->uio_resid; 325 break; 326 default: 327 #ifdef INVARIANTS 328 panic("%s: invalid buffer type %d", __func__, cb->cb_type); 329 #endif 330 break; 331 } 332 } 333 334 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t"); 335 336 void 337 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount) 338 { 339 size_t remain; 340 341 switch (cc->cc_type) { 342 case CRYPTO_BUF_CONTIG: 343 MPASS(cc->cc_buf_len >= amount); 344 cc->cc_buf += amount; 345 cc->cc_buf_len -= amount; 346 break; 347 case CRYPTO_BUF_MBUF: 348 for (;;) { 349 remain = cc->cc_mbuf->m_len - cc->cc_offset; 350 if (amount < remain) { 351 cc->cc_offset += amount; 352 break; 353 } 354 amount -= remain; 355 cc->cc_mbuf = cc->cc_mbuf->m_next; 356 cc->cc_offset = 0; 357 if (amount == 0) 358 break; 359 } 360 break; 361 case CRYPTO_BUF_SINGLE_MBUF: 362 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount); 363 cc->cc_offset += amount; 364 break; 365 case CRYPTO_BUF_VMPAGE: 366 for (;;) { 367 SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage, 368 cc, amount); 369 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 370 if (amount < remain) { 371 cc->cc_buf_len -= amount; 372 cc->cc_offset += amount; 373 break; 374 } 375 cc->cc_buf_len -= remain; 376 amount -= remain; 377 cc->cc_vmpage++; 378 cc->cc_offset = 0; 379 if (amount == 0 || cc->cc_buf_len == 0) 380 break; 381 } 382 break; 383 case CRYPTO_BUF_UIO: 384 for (;;) { 385 remain = cc->cc_iov->iov_len - cc->cc_offset; 386 if (amount < remain) { 387 cc->cc_offset += amount; 388 break; 389 } 390 cc->cc_buf_len -= remain; 391 amount -= remain; 392 cc->cc_iov++; 393 cc->cc_offset = 0; 394 if (amount == 0) 395 break; 396 } 397 break; 398 default: 399 #ifdef INVARIANTS 400 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 401 #endif 402 break; 403 } 404 } 405 406 void * 407 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len) 408 { 409 switch (cc->cc_type) { 410 case CRYPTO_BUF_CONTIG: 411 case CRYPTO_BUF_UIO: 412 case CRYPTO_BUF_VMPAGE: 413 if (cc->cc_buf_len == 0) { 414 *len = 0; 415 return (NULL); 416 } 417 break; 418 case CRYPTO_BUF_MBUF: 419 case CRYPTO_BUF_SINGLE_MBUF: 420 if (cc->cc_mbuf == NULL) { 421 *len = 0; 422 return (NULL); 423 } 424 break; 425 default: 426 #ifdef INVARIANTS 427 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 428 #endif 429 *len = 0; 430 return (NULL); 431 } 432 433 switch (cc->cc_type) { 434 case CRYPTO_BUF_CONTIG: 435 *len = cc->cc_buf_len; 436 return (cc->cc_buf); 437 case CRYPTO_BUF_MBUF: 438 case CRYPTO_BUF_SINGLE_MBUF: 439 if (cc->cc_mbuf->m_flags & M_EXTPG) 440 return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len)); 441 *len = cc->cc_mbuf->m_len - cc->cc_offset; 442 return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); 443 case CRYPTO_BUF_VMPAGE: 444 *len = PAGE_SIZE - cc->cc_offset; 445 return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 446 *cc->cc_vmpage)) + cc->cc_offset); 447 case CRYPTO_BUF_UIO: 448 *len = cc->cc_iov->iov_len - cc->cc_offset; 449 return ((char *)cc->cc_iov->iov_base + cc->cc_offset); 450 default: 451 __assert_unreachable(); 452 } 453 } 454 455 void 456 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, 457 const void *vsrc) 458 { 459 size_t remain, todo; 460 const char *src; 461 char *dst; 462 463 src = vsrc; 464 switch (cc->cc_type) { 465 case CRYPTO_BUF_CONTIG: 466 MPASS(cc->cc_buf_len >= size); 467 memcpy(cc->cc_buf, src, size); 468 cc->cc_buf += size; 469 cc->cc_buf_len -= size; 470 break; 471 case CRYPTO_BUF_MBUF: 472 for (;;) { 473 /* 474 * This uses m_copyback() for individual 475 * mbufs so that cc_mbuf and cc_offset are 476 * updated. 477 */ 478 remain = cc->cc_mbuf->m_len - cc->cc_offset; 479 todo = MIN(remain, size); 480 m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src); 481 src += todo; 482 if (todo < remain) { 483 cc->cc_offset += todo; 484 break; 485 } 486 size -= todo; 487 cc->cc_mbuf = cc->cc_mbuf->m_next; 488 cc->cc_offset = 0; 489 if (size == 0) 490 break; 491 } 492 break; 493 case CRYPTO_BUF_SINGLE_MBUF: 494 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size); 495 m_copyback(cc->cc_mbuf, cc->cc_offset, size, src); 496 cc->cc_offset += size; 497 break; 498 case CRYPTO_BUF_VMPAGE: 499 for (;;) { 500 dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 501 *cc->cc_vmpage)) + cc->cc_offset; 502 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 503 todo = MIN(remain, size); 504 memcpy(dst, src, todo); 505 src += todo; 506 cc->cc_buf_len -= todo; 507 if (todo < remain) { 508 cc->cc_offset += todo; 509 break; 510 } 511 size -= todo; 512 cc->cc_vmpage++; 513 cc->cc_offset = 0; 514 if (size == 0) 515 break; 516 } 517 break; 518 case CRYPTO_BUF_UIO: 519 for (;;) { 520 dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; 521 remain = cc->cc_iov->iov_len - cc->cc_offset; 522 todo = MIN(remain, size); 523 memcpy(dst, src, todo); 524 src += todo; 525 cc->cc_buf_len -= todo; 526 if (todo < remain) { 527 cc->cc_offset += todo; 528 break; 529 } 530 size -= todo; 531 cc->cc_iov++; 532 cc->cc_offset = 0; 533 if (size == 0) 534 break; 535 } 536 break; 537 default: 538 #ifdef INVARIANTS 539 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 540 #endif 541 break; 542 } 543 } 544 545 void 546 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst) 547 { 548 size_t remain, todo; 549 const char *src; 550 char *dst; 551 552 dst = vdst; 553 switch (cc->cc_type) { 554 case CRYPTO_BUF_CONTIG: 555 MPASS(cc->cc_buf_len >= size); 556 memcpy(dst, cc->cc_buf, size); 557 cc->cc_buf += size; 558 cc->cc_buf_len -= size; 559 break; 560 case CRYPTO_BUF_MBUF: 561 for (;;) { 562 /* 563 * This uses m_copydata() for individual 564 * mbufs so that cc_mbuf and cc_offset are 565 * updated. 566 */ 567 remain = cc->cc_mbuf->m_len - cc->cc_offset; 568 todo = MIN(remain, size); 569 m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst); 570 dst += todo; 571 if (todo < remain) { 572 cc->cc_offset += todo; 573 break; 574 } 575 size -= todo; 576 cc->cc_mbuf = cc->cc_mbuf->m_next; 577 cc->cc_offset = 0; 578 if (size == 0) 579 break; 580 } 581 break; 582 case CRYPTO_BUF_SINGLE_MBUF: 583 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size); 584 m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst); 585 cc->cc_offset += size; 586 break; 587 case CRYPTO_BUF_VMPAGE: 588 for (;;) { 589 src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 590 *cc->cc_vmpage)) + cc->cc_offset; 591 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 592 todo = MIN(remain, size); 593 memcpy(dst, src, todo); 594 dst += todo; 595 cc->cc_buf_len -= todo; 596 if (todo < remain) { 597 cc->cc_offset += todo; 598 break; 599 } 600 size -= todo; 601 cc->cc_vmpage++; 602 cc->cc_offset = 0; 603 if (size == 0) 604 break; 605 } 606 break; 607 case CRYPTO_BUF_UIO: 608 for (;;) { 609 src = (const char *)cc->cc_iov->iov_base + 610 cc->cc_offset; 611 remain = cc->cc_iov->iov_len - cc->cc_offset; 612 todo = MIN(remain, size); 613 memcpy(dst, src, todo); 614 dst += todo; 615 cc->cc_buf_len -= todo; 616 if (todo < remain) { 617 cc->cc_offset += todo; 618 break; 619 } 620 size -= todo; 621 cc->cc_iov++; 622 cc->cc_offset = 0; 623 if (size == 0) 624 break; 625 } 626 break; 627 default: 628 #ifdef INVARIANTS 629 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 630 #endif 631 break; 632 } 633 } 634 635 /* 636 * To avoid advancing 'cursor', make a local copy that gets advanced 637 * instead. 638 */ 639 void 640 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, 641 void *vdst) 642 { 643 struct crypto_buffer_cursor copy; 644 645 copy = *cc; 646 crypto_cursor_copydata(©, size, vdst); 647 } 648 649 /* 650 * Apply function f to the data in an iovec list starting "off" bytes from 651 * the beginning, continuing for "len" bytes. 652 */ 653 static int 654 cuio_apply(struct uio *uio, int off, int len, 655 int (*f)(void *, const void *, u_int), void *arg) 656 { 657 struct iovec *iov = uio->uio_iov; 658 int iol __diagused = uio->uio_iovcnt; 659 unsigned count; 660 int rval; 661 662 CUIO_SKIP(); 663 while (len > 0) { 664 KASSERT(iol >= 0, ("%s: empty", __func__)); 665 count = min(iov->iov_len - off, len); 666 rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count); 667 if (rval) 668 return (rval); 669 len -= count; 670 off = 0; 671 iol--; 672 iov++; 673 } 674 return (0); 675 } 676 677 void 678 crypto_copyback(struct cryptop *crp, int off, int size, const void *src) 679 { 680 struct crypto_buffer *cb; 681 682 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) 683 cb = &crp->crp_obuf; 684 else 685 cb = &crp->crp_buf; 686 switch (cb->cb_type) { 687 case CRYPTO_BUF_MBUF: 688 case CRYPTO_BUF_SINGLE_MBUF: 689 m_copyback(cb->cb_mbuf, off, size, src); 690 break; 691 #if CRYPTO_MAY_HAVE_VMPAGE 692 case CRYPTO_BUF_VMPAGE: 693 MPASS(size <= cb->cb_vm_page_len); 694 MPASS(size + off <= 695 cb->cb_vm_page_len + cb->cb_vm_page_offset); 696 cvm_page_copyback(cb->cb_vm_page, 697 off + cb->cb_vm_page_offset, size, src); 698 break; 699 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 700 case CRYPTO_BUF_UIO: 701 cuio_copyback(cb->cb_uio, off, size, src); 702 break; 703 case CRYPTO_BUF_CONTIG: 704 MPASS(off + size <= cb->cb_buf_len); 705 bcopy(src, cb->cb_buf + off, size); 706 break; 707 default: 708 #ifdef INVARIANTS 709 panic("invalid crp buf type %d", cb->cb_type); 710 #endif 711 break; 712 } 713 } 714 715 void 716 crypto_copydata(struct cryptop *crp, int off, int size, void *dst) 717 { 718 719 switch (crp->crp_buf.cb_type) { 720 case CRYPTO_BUF_MBUF: 721 case CRYPTO_BUF_SINGLE_MBUF: 722 m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); 723 break; 724 #if CRYPTO_MAY_HAVE_VMPAGE 725 case CRYPTO_BUF_VMPAGE: 726 MPASS(size <= crp->crp_buf.cb_vm_page_len); 727 MPASS(size + off <= crp->crp_buf.cb_vm_page_len + 728 crp->crp_buf.cb_vm_page_offset); 729 cvm_page_copydata(crp->crp_buf.cb_vm_page, 730 off + crp->crp_buf.cb_vm_page_offset, size, dst); 731 break; 732 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 733 case CRYPTO_BUF_UIO: 734 cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); 735 break; 736 case CRYPTO_BUF_CONTIG: 737 MPASS(off + size <= crp->crp_buf.cb_buf_len); 738 bcopy(crp->crp_buf.cb_buf + off, dst, size); 739 break; 740 default: 741 #ifdef INVARIANTS 742 panic("invalid crp buf type %d", crp->crp_buf.cb_type); 743 #endif 744 break; 745 } 746 } 747 748 int 749 crypto_apply_buf(struct crypto_buffer *cb, int off, int len, 750 int (*f)(void *, const void *, u_int), void *arg) 751 { 752 int error; 753 754 switch (cb->cb_type) { 755 case CRYPTO_BUF_MBUF: 756 case CRYPTO_BUF_SINGLE_MBUF: 757 error = m_apply(cb->cb_mbuf, off, len, 758 (int (*)(void *, void *, u_int))f, arg); 759 break; 760 case CRYPTO_BUF_UIO: 761 error = cuio_apply(cb->cb_uio, off, len, f, arg); 762 break; 763 #if CRYPTO_MAY_HAVE_VMPAGE 764 case CRYPTO_BUF_VMPAGE: 765 error = cvm_page_apply(cb->cb_vm_page, 766 off + cb->cb_vm_page_offset, len, f, arg); 767 break; 768 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 769 case CRYPTO_BUF_CONTIG: 770 MPASS(off + len <= cb->cb_buf_len); 771 error = (*f)(arg, cb->cb_buf + off, len); 772 break; 773 default: 774 #ifdef INVARIANTS 775 panic("invalid crypto buf type %d", cb->cb_type); 776 #endif 777 error = 0; 778 break; 779 } 780 return (error); 781 } 782 783 int 784 crypto_apply(struct cryptop *crp, int off, int len, 785 int (*f)(void *, const void *, u_int), void *arg) 786 { 787 return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg)); 788 } 789 790 static inline void * 791 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len) 792 { 793 int rel_off; 794 795 MPASS(skip <= INT_MAX); 796 797 m = m_getptr(m, (int)skip, &rel_off); 798 if (m == NULL) 799 return (NULL); 800 801 MPASS(rel_off >= 0); 802 skip = rel_off; 803 if (skip + len > m->m_len) 804 return (NULL); 805 806 if (m->m_flags & M_EXTPG) 807 return (m_epg_contiguous_subsegment(m, skip, len)); 808 return (mtod(m, char*) + skip); 809 } 810 811 static inline void * 812 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len) 813 { 814 int rel_off, idx; 815 816 MPASS(skip <= INT_MAX); 817 idx = cuio_getptr(uio, (int)skip, &rel_off); 818 if (idx < 0) 819 return (NULL); 820 821 MPASS(rel_off >= 0); 822 skip = rel_off; 823 if (skip + len > uio->uio_iov[idx].iov_len) 824 return (NULL); 825 return ((char *)uio->uio_iov[idx].iov_base + skip); 826 } 827 828 void * 829 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, 830 size_t len) 831 { 832 833 switch (cb->cb_type) { 834 case CRYPTO_BUF_MBUF: 835 case CRYPTO_BUF_SINGLE_MBUF: 836 return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); 837 case CRYPTO_BUF_UIO: 838 return (cuio_contiguous_segment(cb->cb_uio, skip, len)); 839 #if CRYPTO_MAY_HAVE_VMPAGE 840 case CRYPTO_BUF_VMPAGE: 841 MPASS(skip + len <= cb->cb_vm_page_len); 842 return (cvm_page_contiguous_segment(cb->cb_vm_page, 843 skip + cb->cb_vm_page_offset, len)); 844 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 845 case CRYPTO_BUF_CONTIG: 846 MPASS(skip + len <= cb->cb_buf_len); 847 return (cb->cb_buf + skip); 848 default: 849 #ifdef INVARIANTS 850 panic("invalid crp buf type %d", cb->cb_type); 851 #endif 852 return (NULL); 853 } 854 } 855 856 void * 857 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) 858 { 859 return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len)); 860 } 861