1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 EMC Corp. 5 * All rights reserved. 6 * 7 * Copyright (c) 1997, 1998 Justin T. Gibbs. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bus.h" 36 #include "opt_iommu.h" 37 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #include <sys/bio.h> 42 #include <sys/bus.h> 43 #include <sys/callout.h> 44 #include <sys/ktr.h> 45 #include <sys/mbuf.h> 46 #include <sys/memdesc.h> 47 #include <sys/proc.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/vm_page.h> 52 #include <vm/vm_map.h> 53 #include <vm/pmap.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 58 #include <opencrypto/cryptodev.h> 59 60 #include <machine/bus.h> 61 62 /* 63 * Load up data starting at offset within a region specified by a 64 * list of virtual address ranges until either length or the region 65 * are exhausted. 66 */ 67 static int 68 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, 69 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, 70 int flags, size_t offset, size_t length) 71 { 72 int error; 73 74 error = 0; 75 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { 76 char *addr; 77 size_t ds_len; 78 79 KASSERT((offset < list->ds_len), 80 ("Invalid mid-segment offset")); 81 addr = (char *)(uintptr_t)list->ds_addr + offset; 82 ds_len = list->ds_len - offset; 83 offset = 0; 84 if (ds_len > length) 85 ds_len = length; 86 length -= ds_len; 87 KASSERT((ds_len != 0), ("Segment length is zero")); 88 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, 89 flags, NULL, nsegs); 90 if (error) 91 break; 92 } 93 return (error); 94 } 95 96 /* 97 * Load a list of physical addresses. 98 */ 99 static int 100 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, 101 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) 102 { 103 int error; 104 105 error = 0; 106 for (; sglist_cnt > 0; sglist_cnt--, list++) { 107 error = _bus_dmamap_load_phys(dmat, map, 108 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, 109 nsegs); 110 if (error) 111 break; 112 } 113 return (error); 114 } 115 116 /* 117 * Load an unmapped mbuf 118 */ 119 static int 120 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, 121 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 122 { 123 int error, i, off, len, pglen, pgoff, seglen, segoff; 124 125 M_ASSERTEXTPG(m); 126 127 len = m->m_len; 128 error = 0; 129 130 /* Skip over any data removed from the front. */ 131 off = mtod(m, vm_offset_t); 132 133 if (m->m_epg_hdrlen != 0) { 134 if (off >= m->m_epg_hdrlen) { 135 off -= m->m_epg_hdrlen; 136 } else { 137 seglen = m->m_epg_hdrlen - off; 138 segoff = off; 139 seglen = min(seglen, len); 140 off = 0; 141 len -= seglen; 142 error = _bus_dmamap_load_buffer(dmat, map, 143 &m->m_epg_hdr[segoff], seglen, kernel_pmap, 144 flags, segs, nsegs); 145 } 146 } 147 pgoff = m->m_epg_1st_off; 148 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { 149 pglen = m_epg_pagelen(m, i, pgoff); 150 if (off >= pglen) { 151 off -= pglen; 152 pgoff = 0; 153 continue; 154 } 155 seglen = pglen - off; 156 segoff = pgoff + off; 157 off = 0; 158 seglen = min(seglen, len); 159 len -= seglen; 160 error = _bus_dmamap_load_phys(dmat, map, 161 m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); 162 pgoff = 0; 163 }; 164 if (len != 0 && error == 0) { 165 KASSERT((off + len) <= m->m_epg_trllen, 166 ("off + len > trail (%d + %d > %d)", off, len, 167 m->m_epg_trllen)); 168 error = _bus_dmamap_load_buffer(dmat, map, 169 &m->m_epg_trail[off], len, kernel_pmap, flags, segs, 170 nsegs); 171 } 172 return (error); 173 } 174 175 /* 176 * Load a single mbuf. 177 */ 178 static int 179 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 180 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 181 { 182 int error; 183 184 error = 0; 185 if ((m->m_flags & M_EXTPG) != 0) 186 error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, 187 flags); 188 else 189 error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, 190 kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 191 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 192 __func__, dmat, flags, error, *nsegs); 193 return (error); 194 } 195 196 /* 197 * Load an mbuf chain. 198 */ 199 static int 200 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 201 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) 202 { 203 struct mbuf *m; 204 int error; 205 206 error = 0; 207 for (m = m0; m != NULL && error == 0; m = m->m_next) { 208 if (m->m_len > 0) { 209 if ((m->m_flags & M_EXTPG) != 0) 210 error = _bus_dmamap_load_mbuf_epg(dmat, 211 map, m, segs, nsegs, flags); 212 else 213 error = _bus_dmamap_load_buffer(dmat, map, 214 m->m_data, m->m_len, kernel_pmap, 215 flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 216 } 217 } 218 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 219 __func__, dmat, flags, error, *nsegs); 220 return (error); 221 } 222 223 /* 224 * Load from block io. 225 */ 226 static int 227 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 228 int *nsegs, int flags) 229 { 230 231 if ((bio->bio_flags & BIO_VLIST) != 0) { 232 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; 233 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, 234 kernel_pmap, nsegs, flags, bio->bio_ma_offset, 235 bio->bio_bcount)); 236 } 237 238 if ((bio->bio_flags & BIO_UNMAPPED) != 0) 239 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, 240 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); 241 242 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, 243 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); 244 } 245 246 int 247 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, 248 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 249 bus_dma_segment_t *segs, int *segp) 250 { 251 vm_paddr_t paddr; 252 bus_size_t len; 253 int error, i; 254 255 error = 0; 256 for (i = 0; tlen > 0; i++, tlen -= len) { 257 len = min(PAGE_SIZE - ma_offs, tlen); 258 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; 259 error = _bus_dmamap_load_phys(dmat, map, paddr, len, 260 flags, segs, segp); 261 if (error != 0) 262 break; 263 ma_offs = 0; 264 } 265 return (error); 266 } 267 268 /* 269 * Load a cam control block. 270 */ 271 static int 272 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 273 int *nsegs, int flags) 274 { 275 struct ccb_hdr *ccb_h; 276 void *data_ptr; 277 int error; 278 uint32_t dxfer_len; 279 uint16_t sglist_cnt; 280 281 error = 0; 282 ccb_h = &ccb->ccb_h; 283 switch (ccb_h->func_code) { 284 case XPT_SCSI_IO: { 285 struct ccb_scsiio *csio; 286 287 csio = &ccb->csio; 288 data_ptr = csio->data_ptr; 289 dxfer_len = csio->dxfer_len; 290 sglist_cnt = csio->sglist_cnt; 291 break; 292 } 293 case XPT_CONT_TARGET_IO: { 294 struct ccb_scsiio *ctio; 295 296 ctio = &ccb->ctio; 297 data_ptr = ctio->data_ptr; 298 dxfer_len = ctio->dxfer_len; 299 sglist_cnt = ctio->sglist_cnt; 300 break; 301 } 302 case XPT_ATA_IO: { 303 struct ccb_ataio *ataio; 304 305 ataio = &ccb->ataio; 306 data_ptr = ataio->data_ptr; 307 dxfer_len = ataio->dxfer_len; 308 sglist_cnt = 0; 309 break; 310 } 311 case XPT_NVME_IO: 312 case XPT_NVME_ADMIN: { 313 struct ccb_nvmeio *nvmeio; 314 315 nvmeio = &ccb->nvmeio; 316 data_ptr = nvmeio->data_ptr; 317 dxfer_len = nvmeio->dxfer_len; 318 sglist_cnt = nvmeio->sglist_cnt; 319 break; 320 } 321 default: 322 panic("_bus_dmamap_load_ccb: Unsupported func code %d", 323 ccb_h->func_code); 324 } 325 326 switch ((ccb_h->flags & CAM_DATA_MASK)) { 327 case CAM_DATA_VADDR: 328 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, 329 kernel_pmap, flags, NULL, nsegs); 330 break; 331 case CAM_DATA_PADDR: 332 error = _bus_dmamap_load_phys(dmat, map, 333 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, 334 nsegs); 335 break; 336 case CAM_DATA_SG: 337 error = _bus_dmamap_load_vlist(dmat, map, 338 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, 339 nsegs, flags, 0, dxfer_len); 340 break; 341 case CAM_DATA_SG_PADDR: 342 error = _bus_dmamap_load_plist(dmat, map, 343 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); 344 break; 345 case CAM_DATA_BIO: 346 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, 347 nsegs, flags); 348 break; 349 default: 350 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", 351 ccb_h->flags); 352 } 353 return (error); 354 } 355 356 /* 357 * Load a uio. 358 */ 359 static int 360 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 361 int *nsegs, int flags) 362 { 363 bus_size_t resid; 364 bus_size_t minlen; 365 struct iovec *iov; 366 pmap_t pmap; 367 caddr_t addr; 368 int error, i; 369 370 if (uio->uio_segflg == UIO_USERSPACE) { 371 KASSERT(uio->uio_td != NULL, 372 ("bus_dmamap_load_uio: USERSPACE but no proc")); 373 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 374 } else 375 pmap = kernel_pmap; 376 resid = uio->uio_resid; 377 iov = uio->uio_iov; 378 error = 0; 379 380 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 381 /* 382 * Now at the first iovec to load. Load each iovec 383 * until we have exhausted the residual count. 384 */ 385 386 addr = (caddr_t) iov[i].iov_base; 387 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 388 if (minlen > 0) { 389 error = _bus_dmamap_load_buffer(dmat, map, addr, 390 minlen, pmap, flags, NULL, nsegs); 391 resid -= minlen; 392 } 393 } 394 395 return (error); 396 } 397 398 /* 399 * Map the buffer buf into bus space using the dmamap map. 400 */ 401 int 402 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 403 bus_size_t buflen, bus_dmamap_callback_t *callback, 404 void *callback_arg, int flags) 405 { 406 bus_dma_segment_t *segs; 407 struct memdesc mem; 408 int error; 409 int nsegs; 410 411 #ifdef KMSAN 412 mem = memdesc_vaddr(buf, buflen); 413 _bus_dmamap_load_kmsan(dmat, map, &mem); 414 #endif 415 416 if ((flags & BUS_DMA_NOWAIT) == 0) { 417 mem = memdesc_vaddr(buf, buflen); 418 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 419 } 420 421 nsegs = -1; 422 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, 423 flags, NULL, &nsegs); 424 nsegs++; 425 426 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 427 __func__, dmat, flags, error, nsegs); 428 429 if (error == EINPROGRESS) 430 return (error); 431 432 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 433 if (error) 434 (*callback)(callback_arg, segs, 0, error); 435 else 436 (*callback)(callback_arg, segs, nsegs, 0); 437 438 /* 439 * Return ENOMEM to the caller so that it can pass it up the stack. 440 * This error only happens when NOWAIT is set, so deferral is disabled. 441 */ 442 if (error == ENOMEM) 443 return (error); 444 445 return (0); 446 } 447 448 int 449 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 450 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 451 { 452 bus_dma_segment_t *segs; 453 int nsegs, error; 454 455 M_ASSERTPKTHDR(m0); 456 457 #ifdef KMSAN 458 struct memdesc mem = memdesc_mbuf(m0); 459 _bus_dmamap_load_kmsan(dmat, map, &mem); 460 #endif 461 462 flags |= BUS_DMA_NOWAIT; 463 nsegs = -1; 464 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); 465 ++nsegs; 466 467 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 468 if (error) 469 (*callback)(callback_arg, segs, 0, 0, error); 470 else 471 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); 472 473 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 474 __func__, dmat, flags, error, nsegs); 475 return (error); 476 } 477 478 int 479 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 480 bus_dma_segment_t *segs, int *nsegs, int flags) 481 { 482 int error; 483 484 #ifdef KMSAN 485 struct memdesc mem = memdesc_mbuf(m0); 486 _bus_dmamap_load_kmsan(dmat, map, &mem); 487 #endif 488 489 flags |= BUS_DMA_NOWAIT; 490 *nsegs = -1; 491 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); 492 ++*nsegs; 493 _bus_dmamap_complete(dmat, map, segs, *nsegs, error); 494 return (error); 495 } 496 497 int 498 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 499 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 500 { 501 bus_dma_segment_t *segs; 502 int nsegs, error; 503 504 #ifdef KMSAN 505 struct memdesc mem = memdesc_uio(uio); 506 _bus_dmamap_load_kmsan(dmat, map, &mem); 507 #endif 508 509 flags |= BUS_DMA_NOWAIT; 510 nsegs = -1; 511 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); 512 nsegs++; 513 514 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 515 if (error) 516 (*callback)(callback_arg, segs, 0, 0, error); 517 else 518 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); 519 520 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 521 __func__, dmat, flags, error, nsegs); 522 return (error); 523 } 524 525 int 526 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 527 bus_dmamap_callback_t *callback, void *callback_arg, 528 int flags) 529 { 530 bus_dma_segment_t *segs; 531 struct ccb_hdr *ccb_h; 532 struct memdesc mem; 533 int error; 534 int nsegs; 535 536 #ifdef KMSAN 537 mem = memdesc_ccb(ccb); 538 _bus_dmamap_load_kmsan(dmat, map, &mem); 539 #endif 540 541 ccb_h = &ccb->ccb_h; 542 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 543 callback(callback_arg, NULL, 0, 0); 544 return (0); 545 } 546 if ((flags & BUS_DMA_NOWAIT) == 0) { 547 mem = memdesc_ccb(ccb); 548 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 549 } 550 nsegs = -1; 551 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); 552 nsegs++; 553 554 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 555 __func__, dmat, flags, error, nsegs); 556 557 if (error == EINPROGRESS) 558 return (error); 559 560 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 561 if (error) 562 (*callback)(callback_arg, segs, 0, error); 563 else 564 (*callback)(callback_arg, segs, nsegs, error); 565 /* 566 * Return ENOMEM to the caller so that it can pass it up the stack. 567 * This error only happens when NOWAIT is set, so deferral is disabled. 568 */ 569 if (error == ENOMEM) 570 return (error); 571 572 return (0); 573 } 574 575 int 576 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 577 bus_dmamap_callback_t *callback, void *callback_arg, 578 int flags) 579 { 580 bus_dma_segment_t *segs; 581 struct memdesc mem; 582 int error; 583 int nsegs; 584 585 #ifdef KMSAN 586 mem = memdesc_bio(bio); 587 _bus_dmamap_load_kmsan(dmat, map, &mem); 588 #endif 589 590 if ((flags & BUS_DMA_NOWAIT) == 0) { 591 mem = memdesc_bio(bio); 592 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 593 } 594 nsegs = -1; 595 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); 596 nsegs++; 597 598 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 599 __func__, dmat, flags, error, nsegs); 600 601 if (error == EINPROGRESS) 602 return (error); 603 604 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 605 if (error) 606 (*callback)(callback_arg, segs, 0, error); 607 else 608 (*callback)(callback_arg, segs, nsegs, error); 609 /* 610 * Return ENOMEM to the caller so that it can pass it up the stack. 611 * This error only happens when NOWAIT is set, so deferral is disabled. 612 */ 613 if (error == ENOMEM) 614 return (error); 615 616 return (0); 617 } 618 619 int 620 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, 621 struct memdesc *mem, bus_dmamap_callback_t *callback, 622 void *callback_arg, int flags) 623 { 624 bus_dma_segment_t *segs; 625 int error; 626 int nsegs; 627 628 #ifdef KMSAN 629 _bus_dmamap_load_kmsan(dmat, map, mem); 630 #endif 631 632 if ((flags & BUS_DMA_NOWAIT) == 0) 633 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); 634 635 nsegs = -1; 636 error = 0; 637 switch (mem->md_type) { 638 case MEMDESC_VADDR: 639 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, 640 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); 641 break; 642 case MEMDESC_PADDR: 643 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, 644 mem->md_opaque, flags, NULL, &nsegs); 645 break; 646 case MEMDESC_VLIST: 647 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, 648 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); 649 break; 650 case MEMDESC_PLIST: 651 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, 652 mem->md_opaque, &nsegs, flags); 653 break; 654 case MEMDESC_BIO: 655 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, 656 &nsegs, flags); 657 break; 658 case MEMDESC_UIO: 659 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, 660 &nsegs, flags); 661 break; 662 case MEMDESC_MBUF: 663 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, 664 NULL, &nsegs, flags); 665 break; 666 case MEMDESC_CCB: 667 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, 668 flags); 669 break; 670 } 671 nsegs++; 672 673 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 674 __func__, dmat, flags, error, nsegs); 675 676 if (error == EINPROGRESS) 677 return (error); 678 679 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 680 if (error) 681 (*callback)(callback_arg, segs, 0, error); 682 else 683 (*callback)(callback_arg, segs, nsegs, 0); 684 685 /* 686 * Return ENOMEM to the caller so that it can pass it up the stack. 687 * This error only happens when NOWAIT is set, so deferral is disabled. 688 */ 689 if (error == ENOMEM) 690 return (error); 691 692 return (0); 693 } 694 695 int 696 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, 697 struct crypto_buffer *cb, bus_dmamap_callback_t *callback, 698 void *callback_arg, int flags) 699 { 700 bus_dma_segment_t *segs; 701 int error; 702 int nsegs; 703 704 flags |= BUS_DMA_NOWAIT; 705 nsegs = -1; 706 error = 0; 707 switch (cb->cb_type) { 708 case CRYPTO_BUF_CONTIG: 709 error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, 710 cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); 711 break; 712 case CRYPTO_BUF_MBUF: 713 error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, 714 NULL, &nsegs, flags); 715 break; 716 case CRYPTO_BUF_SINGLE_MBUF: 717 error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf, 718 NULL, &nsegs, flags); 719 break; 720 case CRYPTO_BUF_UIO: 721 error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, 722 flags); 723 break; 724 case CRYPTO_BUF_VMPAGE: 725 error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, 726 cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, 727 &nsegs); 728 break; 729 default: 730 error = EINVAL; 731 } 732 nsegs++; 733 734 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 735 __func__, dmat, flags, error, nsegs); 736 737 if (error == EINPROGRESS) 738 return (error); 739 740 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 741 if (error) 742 (*callback)(callback_arg, segs, 0, error); 743 else 744 (*callback)(callback_arg, segs, nsegs, 0); 745 746 /* 747 * Return ENOMEM to the caller so that it can pass it up the stack. 748 * This error only happens when NOWAIT is set, so deferral is disabled. 749 */ 750 if (error == ENOMEM) 751 return (error); 752 753 return (0); 754 } 755 756 int 757 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, 758 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 759 { 760 return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, 761 callback_arg, flags)); 762 } 763 764 void 765 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) 766 { 767 768 if (t == NULL) 769 return; 770 771 t->parent = parent; 772 t->alignment = 1; 773 t->boundary = 0; 774 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; 775 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; 776 t->nsegments = BUS_SPACE_UNRESTRICTED; 777 t->lockfunc = NULL; 778 t->lockfuncarg = NULL; 779 t->flags = 0; 780 } 781 782 int 783 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) 784 { 785 786 if (t == NULL || dmat == NULL) 787 return (EINVAL); 788 789 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, 790 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, 791 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, 792 dmat)); 793 } 794 795 void 796 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) 797 { 798 bus_dma_param_t *pkv; 799 800 while (count) { 801 pkv = &kv[--count]; 802 switch (pkv->key) { 803 case BD_PARAM_PARENT: 804 t->parent = pkv->ptr; 805 break; 806 case BD_PARAM_ALIGNMENT: 807 t->alignment = pkv->num; 808 break; 809 case BD_PARAM_BOUNDARY: 810 t->boundary = pkv->num; 811 break; 812 case BD_PARAM_LOWADDR: 813 t->lowaddr = pkv->pa; 814 break; 815 case BD_PARAM_HIGHADDR: 816 t->highaddr = pkv->pa; 817 break; 818 case BD_PARAM_MAXSIZE: 819 t->maxsize = pkv->num; 820 break; 821 case BD_PARAM_NSEGMENTS: 822 t->nsegments = pkv->num; 823 break; 824 case BD_PARAM_MAXSEGSIZE: 825 t->maxsegsize = pkv->num; 826 break; 827 case BD_PARAM_FLAGS: 828 t->flags = pkv->num; 829 break; 830 case BD_PARAM_LOCKFUNC: 831 t->lockfunc = pkv->ptr; 832 break; 833 case BD_PARAM_LOCKFUNCARG: 834 t->lockfuncarg = pkv->ptr; 835 break; 836 case BD_PARAM_NAME: 837 t->name = pkv->ptr; 838 break; 839 case BD_PARAM_INVALID: 840 default: 841 KASSERT(0, ("Invalid key %d\n", pkv->key)); 842 break; 843 } 844 } 845 return; 846 } 847 848 #ifndef IOMMU 849 bool bus_dma_iommu_set_buswide(device_t dev); 850 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 851 vm_paddr_t start, vm_size_t length, int flags); 852 853 bool 854 bus_dma_iommu_set_buswide(device_t dev) 855 { 856 return (false); 857 } 858 859 int 860 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 861 vm_paddr_t start, vm_size_t length, int flags) 862 { 863 return (0); 864 } 865 #endif 866