1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 EMC Corp. 5 * All rights reserved. 6 * 7 * Copyright (c) 1997, 1998 Justin T. Gibbs. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bus.h" 36 #include "opt_iommu.h" 37 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #include <sys/bio.h> 42 #include <sys/bus.h> 43 #include <sys/callout.h> 44 #include <sys/ktr.h> 45 #include <sys/mbuf.h> 46 #include <sys/memdesc.h> 47 #include <sys/proc.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_map.h> 53 #include <vm/pmap.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 58 #include <opencrypto/cryptodev.h> 59 60 #include <machine/bus.h> 61 62 /* 63 * Load up data starting at offset within a region specified by a 64 * list of virtual address ranges until either length or the region 65 * are exhausted. 66 */ 67 static int 68 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, 69 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, 70 int flags, size_t offset, size_t length) 71 { 72 int error; 73 74 error = 0; 75 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { 76 char *addr; 77 size_t ds_len; 78 79 KASSERT((offset < list->ds_len), 80 ("Invalid mid-segment offset")); 81 addr = (char *)(uintptr_t)list->ds_addr + offset; 82 ds_len = list->ds_len - offset; 83 offset = 0; 84 if (ds_len > length) 85 ds_len = length; 86 length -= ds_len; 87 KASSERT((ds_len != 0), ("Segment length is zero")); 88 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, 89 flags, NULL, nsegs); 90 if (error) 91 break; 92 } 93 return (error); 94 } 95 96 /* 97 * Load a list of physical addresses. 98 */ 99 static int 100 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, 101 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) 102 { 103 int error; 104 105 error = 0; 106 for (; sglist_cnt > 0; sglist_cnt--, list++) { 107 error = _bus_dmamap_load_phys(dmat, map, 108 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, 109 nsegs); 110 if (error) 111 break; 112 } 113 return (error); 114 } 115 116 /* 117 * Load an unmapped mbuf 118 */ 119 static int 120 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, 121 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 122 { 123 int error, i, off, len, pglen, pgoff, seglen, segoff; 124 125 M_ASSERTEXTPG(m); 126 127 len = m->m_len; 128 error = 0; 129 130 /* Skip over any data removed from the front. */ 131 off = mtod(m, vm_offset_t); 132 133 if (m->m_epg_hdrlen != 0) { 134 if (off >= m->m_epg_hdrlen) { 135 off -= m->m_epg_hdrlen; 136 } else { 137 seglen = m->m_epg_hdrlen - off; 138 segoff = off; 139 seglen = min(seglen, len); 140 off = 0; 141 len -= seglen; 142 error = _bus_dmamap_load_buffer(dmat, map, 143 &m->m_epg_hdr[segoff], seglen, kernel_pmap, 144 flags, segs, nsegs); 145 } 146 } 147 pgoff = m->m_epg_1st_off; 148 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { 149 pglen = m_epg_pagelen(m, i, pgoff); 150 if (off >= pglen) { 151 off -= pglen; 152 pgoff = 0; 153 continue; 154 } 155 seglen = pglen - off; 156 segoff = pgoff + off; 157 off = 0; 158 seglen = min(seglen, len); 159 len -= seglen; 160 error = _bus_dmamap_load_phys(dmat, map, 161 m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); 162 pgoff = 0; 163 }; 164 if (len != 0 && error == 0) { 165 KASSERT((off + len) <= m->m_epg_trllen, 166 ("off + len > trail (%d + %d > %d)", off, len, 167 m->m_epg_trllen)); 168 error = _bus_dmamap_load_buffer(dmat, map, 169 &m->m_epg_trail[off], len, kernel_pmap, flags, segs, 170 nsegs); 171 } 172 return (error); 173 } 174 175 /* 176 * Load an mbuf chain. 177 */ 178 static int 179 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 180 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) 181 { 182 struct mbuf *m; 183 int error; 184 185 error = 0; 186 for (m = m0; m != NULL && error == 0; m = m->m_next) { 187 if (m->m_len > 0) { 188 if ((m->m_flags & M_EXTPG) != 0) 189 error = _bus_dmamap_load_mbuf_epg(dmat, 190 map, m, segs, nsegs, flags); 191 else 192 error = _bus_dmamap_load_buffer(dmat, map, 193 m->m_data, m->m_len, kernel_pmap, 194 flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 195 } 196 } 197 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 198 __func__, dmat, flags, error, *nsegs); 199 return (error); 200 } 201 202 /* 203 * Load from block io. 204 */ 205 static int 206 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 207 int *nsegs, int flags) 208 { 209 210 if ((bio->bio_flags & BIO_VLIST) != 0) { 211 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; 212 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, 213 kernel_pmap, nsegs, flags, bio->bio_ma_offset, 214 bio->bio_bcount)); 215 } 216 217 if ((bio->bio_flags & BIO_UNMAPPED) != 0) 218 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, 219 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); 220 221 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, 222 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); 223 } 224 225 int 226 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, 227 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 228 bus_dma_segment_t *segs, int *segp) 229 { 230 vm_paddr_t paddr; 231 bus_size_t len; 232 int error, i; 233 234 error = 0; 235 for (i = 0; tlen > 0; i++, tlen -= len) { 236 len = min(PAGE_SIZE - ma_offs, tlen); 237 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; 238 error = _bus_dmamap_load_phys(dmat, map, paddr, len, 239 flags, segs, segp); 240 if (error != 0) 241 break; 242 ma_offs = 0; 243 } 244 return (error); 245 } 246 247 /* 248 * Load a cam control block. 249 */ 250 static int 251 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 252 int *nsegs, int flags) 253 { 254 struct ccb_hdr *ccb_h; 255 void *data_ptr; 256 int error; 257 uint32_t dxfer_len; 258 uint16_t sglist_cnt; 259 260 error = 0; 261 ccb_h = &ccb->ccb_h; 262 switch (ccb_h->func_code) { 263 case XPT_SCSI_IO: { 264 struct ccb_scsiio *csio; 265 266 csio = &ccb->csio; 267 data_ptr = csio->data_ptr; 268 dxfer_len = csio->dxfer_len; 269 sglist_cnt = csio->sglist_cnt; 270 break; 271 } 272 case XPT_CONT_TARGET_IO: { 273 struct ccb_scsiio *ctio; 274 275 ctio = &ccb->ctio; 276 data_ptr = ctio->data_ptr; 277 dxfer_len = ctio->dxfer_len; 278 sglist_cnt = ctio->sglist_cnt; 279 break; 280 } 281 case XPT_ATA_IO: { 282 struct ccb_ataio *ataio; 283 284 ataio = &ccb->ataio; 285 data_ptr = ataio->data_ptr; 286 dxfer_len = ataio->dxfer_len; 287 sglist_cnt = 0; 288 break; 289 } 290 case XPT_NVME_IO: 291 case XPT_NVME_ADMIN: { 292 struct ccb_nvmeio *nvmeio; 293 294 nvmeio = &ccb->nvmeio; 295 data_ptr = nvmeio->data_ptr; 296 dxfer_len = nvmeio->dxfer_len; 297 sglist_cnt = nvmeio->sglist_cnt; 298 break; 299 } 300 default: 301 panic("_bus_dmamap_load_ccb: Unsupported func code %d", 302 ccb_h->func_code); 303 } 304 305 switch ((ccb_h->flags & CAM_DATA_MASK)) { 306 case CAM_DATA_VADDR: 307 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, 308 kernel_pmap, flags, NULL, nsegs); 309 break; 310 case CAM_DATA_PADDR: 311 error = _bus_dmamap_load_phys(dmat, map, 312 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, 313 nsegs); 314 break; 315 case CAM_DATA_SG: 316 error = _bus_dmamap_load_vlist(dmat, map, 317 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, 318 nsegs, flags, 0, dxfer_len); 319 break; 320 case CAM_DATA_SG_PADDR: 321 error = _bus_dmamap_load_plist(dmat, map, 322 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); 323 break; 324 case CAM_DATA_BIO: 325 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, 326 nsegs, flags); 327 break; 328 default: 329 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", 330 ccb_h->flags); 331 } 332 return (error); 333 } 334 335 /* 336 * Load a uio. 337 */ 338 static int 339 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 340 int *nsegs, int flags) 341 { 342 bus_size_t resid; 343 bus_size_t minlen; 344 struct iovec *iov; 345 pmap_t pmap; 346 caddr_t addr; 347 int error, i; 348 349 if (uio->uio_segflg == UIO_USERSPACE) { 350 KASSERT(uio->uio_td != NULL, 351 ("bus_dmamap_load_uio: USERSPACE but no proc")); 352 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 353 } else 354 pmap = kernel_pmap; 355 resid = uio->uio_resid; 356 iov = uio->uio_iov; 357 error = 0; 358 359 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 360 /* 361 * Now at the first iovec to load. Load each iovec 362 * until we have exhausted the residual count. 363 */ 364 365 addr = (caddr_t) iov[i].iov_base; 366 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 367 if (minlen > 0) { 368 error = _bus_dmamap_load_buffer(dmat, map, addr, 369 minlen, pmap, flags, NULL, nsegs); 370 resid -= minlen; 371 } 372 } 373 374 return (error); 375 } 376 377 /* 378 * Map the buffer buf into bus space using the dmamap map. 379 */ 380 int 381 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 382 bus_size_t buflen, bus_dmamap_callback_t *callback, 383 void *callback_arg, int flags) 384 { 385 bus_dma_segment_t *segs; 386 struct memdesc mem; 387 int error; 388 int nsegs; 389 390 if ((flags & BUS_DMA_NOWAIT) == 0) { 391 mem = memdesc_vaddr(buf, buflen); 392 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 393 } 394 395 nsegs = -1; 396 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, 397 flags, NULL, &nsegs); 398 nsegs++; 399 400 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 401 __func__, dmat, flags, error, nsegs); 402 403 if (error == EINPROGRESS) 404 return (error); 405 406 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 407 if (error) 408 (*callback)(callback_arg, segs, 0, error); 409 else 410 (*callback)(callback_arg, segs, nsegs, 0); 411 412 /* 413 * Return ENOMEM to the caller so that it can pass it up the stack. 414 * This error only happens when NOWAIT is set, so deferral is disabled. 415 */ 416 if (error == ENOMEM) 417 return (error); 418 419 return (0); 420 } 421 422 int 423 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 424 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 425 { 426 bus_dma_segment_t *segs; 427 int nsegs, error; 428 429 M_ASSERTPKTHDR(m0); 430 431 flags |= BUS_DMA_NOWAIT; 432 nsegs = -1; 433 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); 434 ++nsegs; 435 436 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 437 if (error) 438 (*callback)(callback_arg, segs, 0, 0, error); 439 else 440 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); 441 442 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 443 __func__, dmat, flags, error, nsegs); 444 return (error); 445 } 446 447 int 448 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 449 bus_dma_segment_t *segs, int *nsegs, int flags) 450 { 451 int error; 452 453 flags |= BUS_DMA_NOWAIT; 454 *nsegs = -1; 455 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); 456 ++*nsegs; 457 _bus_dmamap_complete(dmat, map, segs, *nsegs, error); 458 return (error); 459 } 460 461 int 462 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 463 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 464 { 465 bus_dma_segment_t *segs; 466 int nsegs, error; 467 468 flags |= BUS_DMA_NOWAIT; 469 nsegs = -1; 470 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); 471 nsegs++; 472 473 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 474 if (error) 475 (*callback)(callback_arg, segs, 0, 0, error); 476 else 477 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); 478 479 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 480 __func__, dmat, flags, error, nsegs); 481 return (error); 482 } 483 484 int 485 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 486 bus_dmamap_callback_t *callback, void *callback_arg, 487 int flags) 488 { 489 bus_dma_segment_t *segs; 490 struct ccb_hdr *ccb_h; 491 struct memdesc mem; 492 int error; 493 int nsegs; 494 495 ccb_h = &ccb->ccb_h; 496 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 497 callback(callback_arg, NULL, 0, 0); 498 return (0); 499 } 500 if ((flags & BUS_DMA_NOWAIT) == 0) { 501 mem = memdesc_ccb(ccb); 502 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 503 } 504 nsegs = -1; 505 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); 506 nsegs++; 507 508 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 509 __func__, dmat, flags, error, nsegs); 510 511 if (error == EINPROGRESS) 512 return (error); 513 514 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 515 if (error) 516 (*callback)(callback_arg, segs, 0, error); 517 else 518 (*callback)(callback_arg, segs, nsegs, error); 519 /* 520 * Return ENOMEM to the caller so that it can pass it up the stack. 521 * This error only happens when NOWAIT is set, so deferral is disabled. 522 */ 523 if (error == ENOMEM) 524 return (error); 525 526 return (0); 527 } 528 529 int 530 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 531 bus_dmamap_callback_t *callback, void *callback_arg, 532 int flags) 533 { 534 bus_dma_segment_t *segs; 535 struct memdesc mem; 536 int error; 537 int nsegs; 538 539 if ((flags & BUS_DMA_NOWAIT) == 0) { 540 mem = memdesc_bio(bio); 541 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 542 } 543 nsegs = -1; 544 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); 545 nsegs++; 546 547 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 548 __func__, dmat, flags, error, nsegs); 549 550 if (error == EINPROGRESS) 551 return (error); 552 553 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 554 if (error) 555 (*callback)(callback_arg, segs, 0, error); 556 else 557 (*callback)(callback_arg, segs, nsegs, error); 558 /* 559 * Return ENOMEM to the caller so that it can pass it up the stack. 560 * This error only happens when NOWAIT is set, so deferral is disabled. 561 */ 562 if (error == ENOMEM) 563 return (error); 564 565 return (0); 566 } 567 568 int 569 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, 570 struct memdesc *mem, bus_dmamap_callback_t *callback, 571 void *callback_arg, int flags) 572 { 573 bus_dma_segment_t *segs; 574 int error; 575 int nsegs; 576 577 if ((flags & BUS_DMA_NOWAIT) == 0) 578 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); 579 580 nsegs = -1; 581 error = 0; 582 switch (mem->md_type) { 583 case MEMDESC_VADDR: 584 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, 585 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); 586 break; 587 case MEMDESC_PADDR: 588 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, 589 mem->md_opaque, flags, NULL, &nsegs); 590 break; 591 case MEMDESC_VLIST: 592 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, 593 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); 594 break; 595 case MEMDESC_PLIST: 596 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, 597 mem->md_opaque, &nsegs, flags); 598 break; 599 case MEMDESC_BIO: 600 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, 601 &nsegs, flags); 602 break; 603 case MEMDESC_UIO: 604 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, 605 &nsegs, flags); 606 break; 607 case MEMDESC_MBUF: 608 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, 609 NULL, &nsegs, flags); 610 break; 611 case MEMDESC_CCB: 612 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, 613 flags); 614 break; 615 } 616 nsegs++; 617 618 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 619 __func__, dmat, flags, error, nsegs); 620 621 if (error == EINPROGRESS) 622 return (error); 623 624 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 625 if (error) 626 (*callback)(callback_arg, segs, 0, error); 627 else 628 (*callback)(callback_arg, segs, nsegs, 0); 629 630 /* 631 * Return ENOMEM to the caller so that it can pass it up the stack. 632 * This error only happens when NOWAIT is set, so deferral is disabled. 633 */ 634 if (error == ENOMEM) 635 return (error); 636 637 return (0); 638 } 639 640 int 641 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, 642 struct crypto_buffer *cb, bus_dmamap_callback_t *callback, 643 void *callback_arg, int flags) 644 { 645 bus_dma_segment_t *segs; 646 int error; 647 int nsegs; 648 649 flags |= BUS_DMA_NOWAIT; 650 nsegs = -1; 651 error = 0; 652 switch (cb->cb_type) { 653 case CRYPTO_BUF_CONTIG: 654 error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, 655 cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); 656 break; 657 case CRYPTO_BUF_MBUF: 658 error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, 659 NULL, &nsegs, flags); 660 break; 661 case CRYPTO_BUF_UIO: 662 error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, 663 flags); 664 break; 665 case CRYPTO_BUF_VMPAGE: 666 error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, 667 cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, 668 &nsegs); 669 break; 670 default: 671 error = EINVAL; 672 } 673 nsegs++; 674 675 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 676 __func__, dmat, flags, error, nsegs); 677 678 if (error == EINPROGRESS) 679 return (error); 680 681 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 682 if (error) 683 (*callback)(callback_arg, segs, 0, error); 684 else 685 (*callback)(callback_arg, segs, nsegs, 0); 686 687 /* 688 * Return ENOMEM to the caller so that it can pass it up the stack. 689 * This error only happens when NOWAIT is set, so deferral is disabled. 690 */ 691 if (error == ENOMEM) 692 return (error); 693 694 return (0); 695 } 696 697 int 698 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, 699 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 700 { 701 return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, 702 callback_arg, flags)); 703 } 704 705 void 706 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) 707 { 708 709 if (t == NULL) 710 return; 711 712 t->parent = parent; 713 t->alignment = 1; 714 t->boundary = 0; 715 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; 716 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; 717 t->nsegments = BUS_SPACE_UNRESTRICTED; 718 t->lockfunc = NULL; 719 t->lockfuncarg = NULL; 720 t->flags = 0; 721 } 722 723 int 724 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) 725 { 726 727 if (t == NULL || dmat == NULL) 728 return (EINVAL); 729 730 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, 731 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, 732 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, 733 dmat)); 734 } 735 736 void 737 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) 738 { 739 bus_dma_param_t *pkv; 740 741 while (count) { 742 pkv = &kv[--count]; 743 switch (pkv->key) { 744 case BD_PARAM_PARENT: 745 t->parent = pkv->ptr; 746 break; 747 case BD_PARAM_ALIGNMENT: 748 t->alignment = pkv->num; 749 break; 750 case BD_PARAM_BOUNDARY: 751 t->boundary = pkv->num; 752 break; 753 case BD_PARAM_LOWADDR: 754 t->lowaddr = pkv->pa; 755 break; 756 case BD_PARAM_HIGHADDR: 757 t->highaddr = pkv->pa; 758 break; 759 case BD_PARAM_MAXSIZE: 760 t->maxsize = pkv->num; 761 break; 762 case BD_PARAM_NSEGMENTS: 763 t->nsegments = pkv->num; 764 break; 765 case BD_PARAM_MAXSEGSIZE: 766 t->maxsegsize = pkv->num; 767 break; 768 case BD_PARAM_FLAGS: 769 t->flags = pkv->num; 770 break; 771 case BD_PARAM_LOCKFUNC: 772 t->lockfunc = pkv->ptr; 773 break; 774 case BD_PARAM_LOCKFUNCARG: 775 t->lockfuncarg = pkv->ptr; 776 break; 777 case BD_PARAM_NAME: 778 t->name = pkv->ptr; 779 break; 780 case BD_PARAM_INVALID: 781 default: 782 KASSERT(0, ("Invalid key %d\n", pkv->key)); 783 break; 784 } 785 } 786 return; 787 } 788 789 #ifndef IOMMU 790 bool bus_dma_iommu_set_buswide(device_t dev); 791 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 792 vm_paddr_t start, vm_size_t length, int flags); 793 794 bool 795 bus_dma_iommu_set_buswide(device_t dev) 796 { 797 return (false); 798 } 799 800 int 801 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 802 vm_paddr_t start, vm_size_t length, int flags) 803 { 804 return (0); 805 } 806 #endif 807