1 /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 Theo de Raadt 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/proc.h> 36 #include <sys/errno.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/mbuf.h> 40 #include <sys/uio.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/sdt.h> 44 45 #include <machine/vmparam.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_page.h> 49 #include <vm/pmap.h> 50 51 #include <opencrypto/cryptodev.h> 52 53 SDT_PROVIDER_DECLARE(opencrypto); 54 55 /* 56 * These macros are only for avoiding code duplication, as we need to skip 57 * given number of bytes in the same way in several functions below. 58 */ 59 #define CUIO_SKIP() do { \ 60 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ 61 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ 62 while (off > 0) { \ 63 KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \ 64 if (off < iov->iov_len) \ 65 break; \ 66 off -= iov->iov_len; \ 67 iol--; \ 68 iov++; \ 69 } \ 70 } while (0) 71 72 #define CVM_PAGE_SKIP() do { \ 73 KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ 74 KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ 75 while (off > 0) { \ 76 if (off < PAGE_SIZE) \ 77 break; \ 78 processed += PAGE_SIZE - off; \ 79 off -= PAGE_SIZE - off; \ 80 pages++; \ 81 } \ 82 } while (0) 83 84 static void 85 cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) 86 { 87 struct iovec *iov = uio->uio_iov; 88 int iol = uio->uio_iovcnt; 89 unsigned count; 90 91 CUIO_SKIP(); 92 while (len > 0) { 93 KASSERT(iol >= 0, ("%s: empty", __func__)); 94 count = min(iov->iov_len - off, len); 95 bcopy(((caddr_t)iov->iov_base) + off, cp, count); 96 len -= count; 97 cp += count; 98 off = 0; 99 iol--; 100 iov++; 101 } 102 } 103 104 static void 105 cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) 106 { 107 struct iovec *iov = uio->uio_iov; 108 int iol = uio->uio_iovcnt; 109 unsigned count; 110 111 CUIO_SKIP(); 112 while (len > 0) { 113 KASSERT(iol >= 0, ("%s: empty", __func__)); 114 count = min(iov->iov_len - off, len); 115 bcopy(cp, ((caddr_t)iov->iov_base) + off, count); 116 len -= count; 117 cp += count; 118 off = 0; 119 iol--; 120 iov++; 121 } 122 } 123 124 /* 125 * Return the index and offset of location in iovec list. 126 */ 127 static int 128 cuio_getptr(struct uio *uio, int loc, int *off) 129 { 130 int ind, len; 131 132 ind = 0; 133 while (loc >= 0 && ind < uio->uio_iovcnt) { 134 len = uio->uio_iov[ind].iov_len; 135 if (len > loc) { 136 *off = loc; 137 return (ind); 138 } 139 loc -= len; 140 ind++; 141 } 142 143 if (ind > 0 && loc == 0) { 144 ind--; 145 *off = uio->uio_iov[ind].iov_len; 146 return (ind); 147 } 148 149 return (-1); 150 } 151 152 #if CRYPTO_MAY_HAVE_VMPAGE 153 /* 154 * Apply function f to the data in a vm_page_t list starting "off" bytes from 155 * the beginning, continuing for "len" bytes. 156 */ 157 static int 158 cvm_page_apply(vm_page_t *pages, int off, int len, 159 int (*f)(void *, const void *, u_int), void *arg) 160 { 161 int processed = 0; 162 unsigned count; 163 int rval; 164 165 CVM_PAGE_SKIP(); 166 while (len > 0) { 167 char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)); 168 count = min(PAGE_SIZE - off, len); 169 rval = (*f)(arg, kaddr + off, count); 170 if (rval) 171 return (rval); 172 len -= count; 173 processed += count; 174 off = 0; 175 pages++; 176 } 177 return (0); 178 } 179 180 static inline void * 181 cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len) 182 { 183 if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) 184 return (NULL); 185 186 pages += (skip / PAGE_SIZE); 187 skip -= rounddown(skip, PAGE_SIZE); 188 return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip); 189 } 190 191 /* 192 * Copy len bytes of data from the vm_page_t array, skipping the first off 193 * bytes, into the pointer cp. Return the number of bytes skipped and copied. 194 * Does not verify the length of the array. 195 */ 196 static int 197 cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp) 198 { 199 int processed = 0; 200 unsigned count; 201 202 CVM_PAGE_SKIP(); 203 while (len > 0) { 204 count = min(PAGE_SIZE - off, len); 205 bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off, 206 count); 207 len -= count; 208 cp += count; 209 processed += count; 210 off = 0; 211 pages++; 212 } 213 return (processed); 214 } 215 216 /* 217 * Copy len bytes of data from the pointer cp into the vm_page_t array, 218 * skipping the first off bytes, Return the number of bytes skipped and copied. 219 * Does not verify the length of the array. 220 */ 221 static int 222 cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp) 223 { 224 int processed = 0; 225 unsigned count; 226 227 CVM_PAGE_SKIP(); 228 while (len > 0) { 229 count = min(PAGE_SIZE - off, len); 230 bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp, 231 count); 232 len -= count; 233 cp += count; 234 processed += count; 235 off = 0; 236 pages++; 237 } 238 return processed; 239 } 240 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 241 242 /* 243 * Given a starting page in an m_epg, determine the length of the 244 * current physically contiguous segment. 245 */ 246 static __inline size_t 247 m_epg_pages_extent(struct mbuf *m, int idx, u_int pglen) 248 { 249 size_t len; 250 u_int i; 251 252 len = pglen; 253 for (i = idx + 1; i < m->m_epg_npgs; i++) { 254 if (m->m_epg_pa[i - 1] + PAGE_SIZE != m->m_epg_pa[i]) 255 break; 256 len += m_epg_pagelen(m, i, 0); 257 } 258 return (len); 259 } 260 261 static void * 262 m_epg_segment(struct mbuf *m, size_t offset, size_t *len) 263 { 264 u_int i, pglen, pgoff; 265 266 offset += mtod(m, vm_offset_t); 267 if (offset < m->m_epg_hdrlen) { 268 *len = m->m_epg_hdrlen - offset; 269 return (m->m_epg_hdr + offset); 270 } 271 offset -= m->m_epg_hdrlen; 272 pgoff = m->m_epg_1st_off; 273 for (i = 0; i < m->m_epg_npgs; i++) { 274 pglen = m_epg_pagelen(m, i, pgoff); 275 if (offset < pglen) { 276 *len = m_epg_pages_extent(m, i, pglen) - offset; 277 return ((void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff + 278 offset)); 279 } 280 offset -= pglen; 281 pgoff = 0; 282 } 283 KASSERT(offset <= m->m_epg_trllen, ("%s: offset beyond trailer", 284 __func__)); 285 *len = m->m_epg_trllen - offset; 286 return (m->m_epg_trail + offset); 287 } 288 289 static __inline void * 290 m_epg_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len) 291 { 292 void *base; 293 size_t seglen; 294 295 base = m_epg_segment(m, skip, &seglen); 296 if (len > seglen) 297 return (NULL); 298 return (base); 299 } 300 301 void 302 crypto_cursor_init(struct crypto_buffer_cursor *cc, 303 const struct crypto_buffer *cb) 304 { 305 memset(cc, 0, sizeof(*cc)); 306 cc->cc_type = cb->cb_type; 307 switch (cc->cc_type) { 308 case CRYPTO_BUF_CONTIG: 309 cc->cc_buf = cb->cb_buf; 310 cc->cc_buf_len = cb->cb_buf_len; 311 break; 312 case CRYPTO_BUF_MBUF: 313 case CRYPTO_BUF_SINGLE_MBUF: 314 cc->cc_mbuf = cb->cb_mbuf; 315 break; 316 case CRYPTO_BUF_VMPAGE: 317 cc->cc_vmpage = cb->cb_vm_page; 318 cc->cc_buf_len = cb->cb_vm_page_len; 319 cc->cc_offset = cb->cb_vm_page_offset; 320 break; 321 case CRYPTO_BUF_UIO: 322 cc->cc_iov = cb->cb_uio->uio_iov; 323 break; 324 default: 325 #ifdef INVARIANTS 326 panic("%s: invalid buffer type %d", __func__, cb->cb_type); 327 #endif 328 break; 329 } 330 } 331 332 SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t"); 333 334 void 335 crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount) 336 { 337 size_t remain; 338 339 switch (cc->cc_type) { 340 case CRYPTO_BUF_CONTIG: 341 MPASS(cc->cc_buf_len >= amount); 342 cc->cc_buf += amount; 343 cc->cc_buf_len -= amount; 344 break; 345 case CRYPTO_BUF_MBUF: 346 for (;;) { 347 remain = cc->cc_mbuf->m_len - cc->cc_offset; 348 if (amount < remain) { 349 cc->cc_offset += amount; 350 break; 351 } 352 amount -= remain; 353 cc->cc_mbuf = cc->cc_mbuf->m_next; 354 cc->cc_offset = 0; 355 if (amount == 0) 356 break; 357 } 358 break; 359 case CRYPTO_BUF_SINGLE_MBUF: 360 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + amount); 361 cc->cc_offset += amount; 362 break; 363 case CRYPTO_BUF_VMPAGE: 364 for (;;) { 365 SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage, 366 cc, amount); 367 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 368 if (amount < remain) { 369 cc->cc_buf_len -= amount; 370 cc->cc_offset += amount; 371 break; 372 } 373 cc->cc_buf_len -= remain; 374 amount -= remain; 375 cc->cc_vmpage++; 376 cc->cc_offset = 0; 377 if (amount == 0 || cc->cc_buf_len == 0) 378 break; 379 } 380 break; 381 case CRYPTO_BUF_UIO: 382 for (;;) { 383 remain = cc->cc_iov->iov_len - cc->cc_offset; 384 if (amount < remain) { 385 cc->cc_offset += amount; 386 break; 387 } 388 amount -= remain; 389 cc->cc_iov++; 390 cc->cc_offset = 0; 391 if (amount == 0) 392 break; 393 } 394 break; 395 default: 396 #ifdef INVARIANTS 397 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 398 #endif 399 break; 400 } 401 } 402 403 void * 404 crypto_cursor_segment(struct crypto_buffer_cursor *cc, size_t *len) 405 { 406 switch (cc->cc_type) { 407 case CRYPTO_BUF_CONTIG: 408 *len = cc->cc_buf_len; 409 return (cc->cc_buf); 410 case CRYPTO_BUF_MBUF: 411 case CRYPTO_BUF_SINGLE_MBUF: 412 if (cc->cc_mbuf == NULL) { 413 *len = 0; 414 return (NULL); 415 } 416 if (cc->cc_mbuf->m_flags & M_EXTPG) 417 return (m_epg_segment(cc->cc_mbuf, cc->cc_offset, len)); 418 *len = cc->cc_mbuf->m_len - cc->cc_offset; 419 return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); 420 case CRYPTO_BUF_VMPAGE: 421 *len = PAGE_SIZE - cc->cc_offset; 422 return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 423 *cc->cc_vmpage)) + cc->cc_offset); 424 case CRYPTO_BUF_UIO: 425 *len = cc->cc_iov->iov_len - cc->cc_offset; 426 return ((char *)cc->cc_iov->iov_base + cc->cc_offset); 427 default: 428 #ifdef INVARIANTS 429 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 430 #endif 431 *len = 0; 432 return (NULL); 433 } 434 } 435 436 void 437 crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, 438 const void *vsrc) 439 { 440 size_t remain, todo; 441 const char *src; 442 char *dst; 443 444 src = vsrc; 445 switch (cc->cc_type) { 446 case CRYPTO_BUF_CONTIG: 447 MPASS(cc->cc_buf_len >= size); 448 memcpy(cc->cc_buf, src, size); 449 cc->cc_buf += size; 450 cc->cc_buf_len -= size; 451 break; 452 case CRYPTO_BUF_MBUF: 453 for (;;) { 454 /* 455 * This uses m_copyback() for individual 456 * mbufs so that cc_mbuf and cc_offset are 457 * updated. 458 */ 459 remain = cc->cc_mbuf->m_len - cc->cc_offset; 460 todo = MIN(remain, size); 461 m_copyback(cc->cc_mbuf, cc->cc_offset, todo, src); 462 src += todo; 463 if (todo < remain) { 464 cc->cc_offset += todo; 465 break; 466 } 467 size -= todo; 468 cc->cc_mbuf = cc->cc_mbuf->m_next; 469 cc->cc_offset = 0; 470 if (size == 0) 471 break; 472 } 473 break; 474 case CRYPTO_BUF_SINGLE_MBUF: 475 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size); 476 m_copyback(cc->cc_mbuf, cc->cc_offset, size, src); 477 cc->cc_offset += size; 478 break; 479 case CRYPTO_BUF_VMPAGE: 480 for (;;) { 481 dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 482 *cc->cc_vmpage)) + cc->cc_offset; 483 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 484 todo = MIN(remain, size); 485 memcpy(dst, src, todo); 486 src += todo; 487 cc->cc_buf_len -= todo; 488 if (todo < remain) { 489 cc->cc_offset += todo; 490 break; 491 } 492 size -= todo; 493 cc->cc_vmpage++; 494 cc->cc_offset = 0; 495 if (size == 0) 496 break; 497 } 498 break; 499 case CRYPTO_BUF_UIO: 500 for (;;) { 501 dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; 502 remain = cc->cc_iov->iov_len - cc->cc_offset; 503 todo = MIN(remain, size); 504 memcpy(dst, src, todo); 505 src += todo; 506 if (todo < remain) { 507 cc->cc_offset += todo; 508 break; 509 } 510 size -= todo; 511 cc->cc_iov++; 512 cc->cc_offset = 0; 513 if (size == 0) 514 break; 515 } 516 break; 517 default: 518 #ifdef INVARIANTS 519 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 520 #endif 521 break; 522 } 523 } 524 525 void 526 crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst) 527 { 528 size_t remain, todo; 529 const char *src; 530 char *dst; 531 532 dst = vdst; 533 switch (cc->cc_type) { 534 case CRYPTO_BUF_CONTIG: 535 MPASS(cc->cc_buf_len >= size); 536 memcpy(dst, cc->cc_buf, size); 537 cc->cc_buf += size; 538 cc->cc_buf_len -= size; 539 break; 540 case CRYPTO_BUF_MBUF: 541 for (;;) { 542 /* 543 * This uses m_copydata() for individual 544 * mbufs so that cc_mbuf and cc_offset are 545 * updated. 546 */ 547 remain = cc->cc_mbuf->m_len - cc->cc_offset; 548 todo = MIN(remain, size); 549 m_copydata(cc->cc_mbuf, cc->cc_offset, todo, dst); 550 dst += todo; 551 if (todo < remain) { 552 cc->cc_offset += todo; 553 break; 554 } 555 size -= todo; 556 cc->cc_mbuf = cc->cc_mbuf->m_next; 557 cc->cc_offset = 0; 558 if (size == 0) 559 break; 560 } 561 break; 562 case CRYPTO_BUF_SINGLE_MBUF: 563 MPASS(cc->cc_mbuf->m_len >= cc->cc_offset + size); 564 m_copydata(cc->cc_mbuf, cc->cc_offset, size, dst); 565 cc->cc_offset += size; 566 break; 567 case CRYPTO_BUF_VMPAGE: 568 for (;;) { 569 src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( 570 *cc->cc_vmpage)) + cc->cc_offset; 571 remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); 572 todo = MIN(remain, size); 573 memcpy(dst, src, todo); 574 src += todo; 575 cc->cc_buf_len -= todo; 576 if (todo < remain) { 577 cc->cc_offset += todo; 578 break; 579 } 580 size -= todo; 581 cc->cc_vmpage++; 582 cc->cc_offset = 0; 583 if (size == 0) 584 break; 585 } 586 break; 587 case CRYPTO_BUF_UIO: 588 for (;;) { 589 src = (const char *)cc->cc_iov->iov_base + 590 cc->cc_offset; 591 remain = cc->cc_iov->iov_len - cc->cc_offset; 592 todo = MIN(remain, size); 593 memcpy(dst, src, todo); 594 dst += todo; 595 if (todo < remain) { 596 cc->cc_offset += todo; 597 break; 598 } 599 size -= todo; 600 cc->cc_iov++; 601 cc->cc_offset = 0; 602 if (size == 0) 603 break; 604 } 605 break; 606 default: 607 #ifdef INVARIANTS 608 panic("%s: invalid buffer type %d", __func__, cc->cc_type); 609 #endif 610 break; 611 } 612 } 613 614 /* 615 * To avoid advancing 'cursor', make a local copy that gets advanced 616 * instead. 617 */ 618 void 619 crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, 620 void *vdst) 621 { 622 struct crypto_buffer_cursor copy; 623 624 copy = *cc; 625 crypto_cursor_copydata(©, size, vdst); 626 } 627 628 /* 629 * Apply function f to the data in an iovec list starting "off" bytes from 630 * the beginning, continuing for "len" bytes. 631 */ 632 static int 633 cuio_apply(struct uio *uio, int off, int len, 634 int (*f)(void *, const void *, u_int), void *arg) 635 { 636 struct iovec *iov = uio->uio_iov; 637 int iol = uio->uio_iovcnt; 638 unsigned count; 639 int rval; 640 641 CUIO_SKIP(); 642 while (len > 0) { 643 KASSERT(iol >= 0, ("%s: empty", __func__)); 644 count = min(iov->iov_len - off, len); 645 rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count); 646 if (rval) 647 return (rval); 648 len -= count; 649 off = 0; 650 iol--; 651 iov++; 652 } 653 return (0); 654 } 655 656 void 657 crypto_copyback(struct cryptop *crp, int off, int size, const void *src) 658 { 659 struct crypto_buffer *cb; 660 661 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) 662 cb = &crp->crp_obuf; 663 else 664 cb = &crp->crp_buf; 665 switch (cb->cb_type) { 666 case CRYPTO_BUF_MBUF: 667 case CRYPTO_BUF_SINGLE_MBUF: 668 m_copyback(cb->cb_mbuf, off, size, src); 669 break; 670 #if CRYPTO_MAY_HAVE_VMPAGE 671 case CRYPTO_BUF_VMPAGE: 672 MPASS(size <= cb->cb_vm_page_len); 673 MPASS(size + off <= 674 cb->cb_vm_page_len + cb->cb_vm_page_offset); 675 cvm_page_copyback(cb->cb_vm_page, 676 off + cb->cb_vm_page_offset, size, src); 677 break; 678 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 679 case CRYPTO_BUF_UIO: 680 cuio_copyback(cb->cb_uio, off, size, src); 681 break; 682 case CRYPTO_BUF_CONTIG: 683 MPASS(off + size <= cb->cb_buf_len); 684 bcopy(src, cb->cb_buf + off, size); 685 break; 686 default: 687 #ifdef INVARIANTS 688 panic("invalid crp buf type %d", cb->cb_type); 689 #endif 690 break; 691 } 692 } 693 694 void 695 crypto_copydata(struct cryptop *crp, int off, int size, void *dst) 696 { 697 698 switch (crp->crp_buf.cb_type) { 699 case CRYPTO_BUF_MBUF: 700 case CRYPTO_BUF_SINGLE_MBUF: 701 m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); 702 break; 703 #if CRYPTO_MAY_HAVE_VMPAGE 704 case CRYPTO_BUF_VMPAGE: 705 MPASS(size <= crp->crp_buf.cb_vm_page_len); 706 MPASS(size + off <= crp->crp_buf.cb_vm_page_len + 707 crp->crp_buf.cb_vm_page_offset); 708 cvm_page_copydata(crp->crp_buf.cb_vm_page, 709 off + crp->crp_buf.cb_vm_page_offset, size, dst); 710 break; 711 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 712 case CRYPTO_BUF_UIO: 713 cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); 714 break; 715 case CRYPTO_BUF_CONTIG: 716 MPASS(off + size <= crp->crp_buf.cb_buf_len); 717 bcopy(crp->crp_buf.cb_buf + off, dst, size); 718 break; 719 default: 720 #ifdef INVARIANTS 721 panic("invalid crp buf type %d", crp->crp_buf.cb_type); 722 #endif 723 break; 724 } 725 } 726 727 int 728 crypto_apply_buf(struct crypto_buffer *cb, int off, int len, 729 int (*f)(void *, const void *, u_int), void *arg) 730 { 731 int error; 732 733 switch (cb->cb_type) { 734 case CRYPTO_BUF_MBUF: 735 case CRYPTO_BUF_SINGLE_MBUF: 736 error = m_apply(cb->cb_mbuf, off, len, 737 (int (*)(void *, void *, u_int))f, arg); 738 break; 739 case CRYPTO_BUF_UIO: 740 error = cuio_apply(cb->cb_uio, off, len, f, arg); 741 break; 742 #if CRYPTO_MAY_HAVE_VMPAGE 743 case CRYPTO_BUF_VMPAGE: 744 error = cvm_page_apply(cb->cb_vm_page, 745 off + cb->cb_vm_page_offset, len, f, arg); 746 break; 747 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 748 case CRYPTO_BUF_CONTIG: 749 MPASS(off + len <= cb->cb_buf_len); 750 error = (*f)(arg, cb->cb_buf + off, len); 751 break; 752 default: 753 #ifdef INVARIANTS 754 panic("invalid crypto buf type %d", cb->cb_type); 755 #endif 756 error = 0; 757 break; 758 } 759 return (error); 760 } 761 762 int 763 crypto_apply(struct cryptop *crp, int off, int len, 764 int (*f)(void *, const void *, u_int), void *arg) 765 { 766 return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg)); 767 } 768 769 static inline void * 770 m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len) 771 { 772 int rel_off; 773 774 MPASS(skip <= INT_MAX); 775 776 m = m_getptr(m, (int)skip, &rel_off); 777 if (m == NULL) 778 return (NULL); 779 780 MPASS(rel_off >= 0); 781 skip = rel_off; 782 if (skip + len > m->m_len) 783 return (NULL); 784 785 if (m->m_flags & M_EXTPG) 786 return (m_epg_contiguous_subsegment(m, skip, len)); 787 return (mtod(m, char*) + skip); 788 } 789 790 static inline void * 791 cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len) 792 { 793 int rel_off, idx; 794 795 MPASS(skip <= INT_MAX); 796 idx = cuio_getptr(uio, (int)skip, &rel_off); 797 if (idx < 0) 798 return (NULL); 799 800 MPASS(rel_off >= 0); 801 skip = rel_off; 802 if (skip + len > uio->uio_iov[idx].iov_len) 803 return (NULL); 804 return ((char *)uio->uio_iov[idx].iov_base + skip); 805 } 806 807 void * 808 crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, 809 size_t len) 810 { 811 812 switch (cb->cb_type) { 813 case CRYPTO_BUF_MBUF: 814 case CRYPTO_BUF_SINGLE_MBUF: 815 return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); 816 case CRYPTO_BUF_UIO: 817 return (cuio_contiguous_segment(cb->cb_uio, skip, len)); 818 #if CRYPTO_MAY_HAVE_VMPAGE 819 case CRYPTO_BUF_VMPAGE: 820 MPASS(skip + len <= cb->cb_vm_page_len); 821 return (cvm_page_contiguous_segment(cb->cb_vm_page, 822 skip + cb->cb_vm_page_offset, len)); 823 #endif /* CRYPTO_MAY_HAVE_VMPAGE */ 824 case CRYPTO_BUF_CONTIG: 825 MPASS(skip + len <= cb->cb_buf_len); 826 return (cb->cb_buf + skip); 827 default: 828 #ifdef INVARIANTS 829 panic("invalid crp buf type %d", cb->cb_type); 830 #endif 831 return (NULL); 832 } 833 } 834 835 void * 836 crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) 837 { 838 return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len)); 839 } 840