1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 EMC Corp. 5 * All rights reserved. 6 * 7 * Copyright (c) 1997, 1998 Justin T. Gibbs. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bus.h" 36 #include "opt_iommu.h" 37 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #include <sys/bio.h> 42 #include <sys/bus.h> 43 #include <sys/callout.h> 44 #include <sys/ktr.h> 45 #include <sys/lock.h> 46 #include <sys/mbuf.h> 47 #include <sys/memdesc.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_map.h> 55 #include <vm/pmap.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 60 #include <opencrypto/cryptodev.h> 61 62 #include <machine/bus.h> 63 64 /* 65 * Convenience function for manipulating driver locks from busdma (during 66 * busdma_swi, for example). 67 */ 68 void 69 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 70 { 71 struct mtx *dmtx; 72 73 dmtx = (struct mtx *)arg; 74 switch (op) { 75 case BUS_DMA_LOCK: 76 mtx_lock(dmtx); 77 break; 78 case BUS_DMA_UNLOCK: 79 mtx_unlock(dmtx); 80 break; 81 default: 82 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 83 } 84 } 85 86 /* 87 * dflt_lock should never get called. It gets put into the dma tag when 88 * lockfunc == NULL, which is only valid if the maps that are associated 89 * with the tag are meant to never be deferred. 90 * 91 * XXX Should have a way to identify which driver is responsible here. 92 */ 93 void 94 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op) 95 { 96 97 panic("driver error: _bus_dma_dflt_lock called"); 98 } 99 100 101 /* 102 * Load up data starting at offset within a region specified by a 103 * list of virtual address ranges until either length or the region 104 * are exhausted. 105 */ 106 static int 107 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, 108 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, 109 int flags, size_t offset, size_t length) 110 { 111 int error; 112 113 error = 0; 114 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { 115 char *addr; 116 size_t ds_len; 117 118 KASSERT((offset < list->ds_len), 119 ("Invalid mid-segment offset")); 120 addr = (char *)(uintptr_t)list->ds_addr + offset; 121 ds_len = list->ds_len - offset; 122 offset = 0; 123 if (ds_len > length) 124 ds_len = length; 125 length -= ds_len; 126 KASSERT((ds_len != 0), ("Segment length is zero")); 127 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, 128 flags, NULL, nsegs); 129 if (error) 130 break; 131 } 132 return (error); 133 } 134 135 /* 136 * Load a list of physical addresses. 137 */ 138 static int 139 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, 140 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) 141 { 142 int error; 143 144 error = 0; 145 for (; sglist_cnt > 0; sglist_cnt--, list++) { 146 error = _bus_dmamap_load_phys(dmat, map, 147 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, 148 nsegs); 149 if (error) 150 break; 151 } 152 return (error); 153 } 154 155 /* 156 * Load an unmapped mbuf 157 */ 158 static int 159 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, 160 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 161 { 162 int error, i, off, len, pglen, pgoff, seglen, segoff; 163 164 M_ASSERTEXTPG(m); 165 166 len = m->m_len; 167 error = 0; 168 169 /* Skip over any data removed from the front. */ 170 off = mtod(m, vm_offset_t); 171 172 if (m->m_epg_hdrlen != 0) { 173 if (off >= m->m_epg_hdrlen) { 174 off -= m->m_epg_hdrlen; 175 } else { 176 seglen = m->m_epg_hdrlen - off; 177 segoff = off; 178 seglen = min(seglen, len); 179 off = 0; 180 len -= seglen; 181 error = _bus_dmamap_load_buffer(dmat, map, 182 &m->m_epg_hdr[segoff], seglen, kernel_pmap, 183 flags, segs, nsegs); 184 } 185 } 186 pgoff = m->m_epg_1st_off; 187 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { 188 pglen = m_epg_pagelen(m, i, pgoff); 189 if (off >= pglen) { 190 off -= pglen; 191 pgoff = 0; 192 continue; 193 } 194 seglen = pglen - off; 195 segoff = pgoff + off; 196 off = 0; 197 seglen = min(seglen, len); 198 len -= seglen; 199 error = _bus_dmamap_load_phys(dmat, map, 200 m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); 201 pgoff = 0; 202 }; 203 if (len != 0 && error == 0) { 204 KASSERT((off + len) <= m->m_epg_trllen, 205 ("off + len > trail (%d + %d > %d)", off, len, 206 m->m_epg_trllen)); 207 error = _bus_dmamap_load_buffer(dmat, map, 208 &m->m_epg_trail[off], len, kernel_pmap, flags, segs, 209 nsegs); 210 } 211 return (error); 212 } 213 214 /* 215 * Load a single mbuf. 216 */ 217 static int 218 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 219 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 220 { 221 int error; 222 223 error = 0; 224 if ((m->m_flags & M_EXTPG) != 0) 225 error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, 226 flags); 227 else 228 error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, 229 kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 230 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 231 __func__, dmat, flags, error, *nsegs); 232 return (error); 233 } 234 235 /* 236 * Load an mbuf chain. 237 */ 238 static int 239 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 240 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) 241 { 242 struct mbuf *m; 243 int error; 244 245 error = 0; 246 for (m = m0; m != NULL && error == 0; m = m->m_next) { 247 if (m->m_len > 0) { 248 if ((m->m_flags & M_EXTPG) != 0) 249 error = _bus_dmamap_load_mbuf_epg(dmat, 250 map, m, segs, nsegs, flags); 251 else 252 error = _bus_dmamap_load_buffer(dmat, map, 253 m->m_data, m->m_len, kernel_pmap, 254 flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 255 } 256 } 257 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 258 __func__, dmat, flags, error, *nsegs); 259 return (error); 260 } 261 262 /* 263 * Load from block io. 264 */ 265 static int 266 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 267 int *nsegs, int flags) 268 { 269 270 if ((bio->bio_flags & BIO_VLIST) != 0) { 271 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; 272 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, 273 kernel_pmap, nsegs, flags, bio->bio_ma_offset, 274 bio->bio_bcount)); 275 } 276 277 if ((bio->bio_flags & BIO_UNMAPPED) != 0) 278 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, 279 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); 280 281 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, 282 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); 283 } 284 285 int 286 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, 287 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 288 bus_dma_segment_t *segs, int *segp) 289 { 290 vm_paddr_t paddr; 291 bus_size_t len; 292 int error, i; 293 294 error = 0; 295 for (i = 0; tlen > 0; i++, tlen -= len) { 296 len = min(PAGE_SIZE - ma_offs, tlen); 297 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; 298 error = _bus_dmamap_load_phys(dmat, map, paddr, len, 299 flags, segs, segp); 300 if (error != 0) 301 break; 302 ma_offs = 0; 303 } 304 return (error); 305 } 306 307 /* 308 * Load a uio. 309 */ 310 static int 311 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 312 int *nsegs, int flags) 313 { 314 bus_size_t resid; 315 bus_size_t minlen; 316 struct iovec *iov; 317 pmap_t pmap; 318 caddr_t addr; 319 int error, i; 320 321 if (uio->uio_segflg == UIO_USERSPACE) { 322 KASSERT(uio->uio_td != NULL, 323 ("bus_dmamap_load_uio: USERSPACE but no proc")); 324 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 325 } else 326 pmap = kernel_pmap; 327 resid = uio->uio_resid; 328 iov = uio->uio_iov; 329 error = 0; 330 331 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 332 /* 333 * Now at the first iovec to load. Load each iovec 334 * until we have exhausted the residual count. 335 */ 336 337 addr = (caddr_t) iov[i].iov_base; 338 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 339 if (minlen > 0) { 340 error = _bus_dmamap_load_buffer(dmat, map, addr, 341 minlen, pmap, flags, NULL, nsegs); 342 resid -= minlen; 343 } 344 } 345 346 return (error); 347 } 348 349 /* 350 * Map the buffer buf into bus space using the dmamap map. 351 */ 352 int 353 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 354 bus_size_t buflen, bus_dmamap_callback_t *callback, 355 void *callback_arg, int flags) 356 { 357 bus_dma_segment_t *segs; 358 struct memdesc mem; 359 int error; 360 int nsegs; 361 362 #ifdef KMSAN 363 mem = memdesc_vaddr(buf, buflen); 364 _bus_dmamap_load_kmsan(dmat, map, &mem); 365 #endif 366 367 if ((flags & BUS_DMA_NOWAIT) == 0) { 368 mem = memdesc_vaddr(buf, buflen); 369 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 370 } 371 372 nsegs = -1; 373 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, 374 flags, NULL, &nsegs); 375 nsegs++; 376 377 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 378 __func__, dmat, flags, error, nsegs); 379 380 if (error == EINPROGRESS) 381 return (error); 382 383 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 384 if (error) 385 (*callback)(callback_arg, segs, 0, error); 386 else 387 (*callback)(callback_arg, segs, nsegs, 0); 388 389 /* 390 * Return ENOMEM to the caller so that it can pass it up the stack. 391 * This error only happens when NOWAIT is set, so deferral is disabled. 392 */ 393 if (error == ENOMEM) 394 return (error); 395 396 return (0); 397 } 398 399 int 400 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 401 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 402 { 403 bus_dma_segment_t *segs; 404 int nsegs, error; 405 406 M_ASSERTPKTHDR(m0); 407 408 #ifdef KMSAN 409 struct memdesc mem = memdesc_mbuf(m0); 410 _bus_dmamap_load_kmsan(dmat, map, &mem); 411 #endif 412 413 flags |= BUS_DMA_NOWAIT; 414 nsegs = -1; 415 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); 416 ++nsegs; 417 418 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 419 if (error) 420 (*callback)(callback_arg, segs, 0, 0, error); 421 else 422 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); 423 424 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 425 __func__, dmat, flags, error, nsegs); 426 return (error); 427 } 428 429 int 430 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 431 bus_dma_segment_t *segs, int *nsegs, int flags) 432 { 433 int error; 434 435 #ifdef KMSAN 436 struct memdesc mem = memdesc_mbuf(m0); 437 _bus_dmamap_load_kmsan(dmat, map, &mem); 438 #endif 439 440 flags |= BUS_DMA_NOWAIT; 441 *nsegs = -1; 442 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); 443 ++*nsegs; 444 _bus_dmamap_complete(dmat, map, segs, *nsegs, error); 445 return (error); 446 } 447 448 int 449 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 450 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 451 { 452 bus_dma_segment_t *segs; 453 int nsegs, error; 454 455 #ifdef KMSAN 456 struct memdesc mem = memdesc_uio(uio); 457 _bus_dmamap_load_kmsan(dmat, map, &mem); 458 #endif 459 460 flags |= BUS_DMA_NOWAIT; 461 nsegs = -1; 462 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); 463 nsegs++; 464 465 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 466 if (error) 467 (*callback)(callback_arg, segs, 0, 0, error); 468 else 469 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); 470 471 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 472 __func__, dmat, flags, error, nsegs); 473 return (error); 474 } 475 476 int 477 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 478 bus_dmamap_callback_t *callback, void *callback_arg, 479 int flags) 480 { 481 struct ccb_hdr *ccb_h; 482 struct memdesc mem; 483 484 ccb_h = &ccb->ccb_h; 485 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 486 callback(callback_arg, NULL, 0, 0); 487 return (0); 488 } 489 490 mem = memdesc_ccb(ccb); 491 return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg, 492 flags)); 493 } 494 495 int 496 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 497 bus_dmamap_callback_t *callback, void *callback_arg, 498 int flags) 499 { 500 bus_dma_segment_t *segs; 501 struct memdesc mem; 502 int error; 503 int nsegs; 504 505 #ifdef KMSAN 506 mem = memdesc_bio(bio); 507 _bus_dmamap_load_kmsan(dmat, map, &mem); 508 #endif 509 510 if ((flags & BUS_DMA_NOWAIT) == 0) { 511 mem = memdesc_bio(bio); 512 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 513 } 514 nsegs = -1; 515 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); 516 nsegs++; 517 518 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 519 __func__, dmat, flags, error, nsegs); 520 521 if (error == EINPROGRESS) 522 return (error); 523 524 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 525 if (error) 526 (*callback)(callback_arg, segs, 0, error); 527 else 528 (*callback)(callback_arg, segs, nsegs, error); 529 /* 530 * Return ENOMEM to the caller so that it can pass it up the stack. 531 * This error only happens when NOWAIT is set, so deferral is disabled. 532 */ 533 if (error == ENOMEM) 534 return (error); 535 536 return (0); 537 } 538 539 int 540 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, 541 struct memdesc *mem, bus_dmamap_callback_t *callback, 542 void *callback_arg, int flags) 543 { 544 bus_dma_segment_t *segs; 545 int error; 546 int nsegs; 547 548 #ifdef KMSAN 549 _bus_dmamap_load_kmsan(dmat, map, mem); 550 #endif 551 552 if ((flags & BUS_DMA_NOWAIT) == 0) 553 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); 554 555 nsegs = -1; 556 error = 0; 557 switch (mem->md_type) { 558 case MEMDESC_VADDR: 559 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, 560 mem->md_len, kernel_pmap, flags, NULL, &nsegs); 561 break; 562 case MEMDESC_PADDR: 563 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, 564 mem->md_len, flags, NULL, &nsegs); 565 break; 566 case MEMDESC_VLIST: 567 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, 568 mem->md_nseg, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); 569 break; 570 case MEMDESC_PLIST: 571 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, 572 mem->md_nseg, &nsegs, flags); 573 break; 574 case MEMDESC_BIO: 575 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, 576 &nsegs, flags); 577 break; 578 case MEMDESC_UIO: 579 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, 580 &nsegs, flags); 581 break; 582 case MEMDESC_MBUF: 583 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, 584 NULL, &nsegs, flags); 585 break; 586 } 587 nsegs++; 588 589 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 590 __func__, dmat, flags, error, nsegs); 591 592 if (error == EINPROGRESS) 593 return (error); 594 595 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 596 if (error) 597 (*callback)(callback_arg, segs, 0, error); 598 else 599 (*callback)(callback_arg, segs, nsegs, 0); 600 601 /* 602 * Return ENOMEM to the caller so that it can pass it up the stack. 603 * This error only happens when NOWAIT is set, so deferral is disabled. 604 */ 605 if (error == ENOMEM) 606 return (error); 607 608 return (0); 609 } 610 611 int 612 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, 613 struct crypto_buffer *cb, bus_dmamap_callback_t *callback, 614 void *callback_arg, int flags) 615 { 616 bus_dma_segment_t *segs; 617 int error; 618 int nsegs; 619 620 flags |= BUS_DMA_NOWAIT; 621 nsegs = -1; 622 error = 0; 623 switch (cb->cb_type) { 624 case CRYPTO_BUF_CONTIG: 625 error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, 626 cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); 627 break; 628 case CRYPTO_BUF_MBUF: 629 error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, 630 NULL, &nsegs, flags); 631 break; 632 case CRYPTO_BUF_SINGLE_MBUF: 633 error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf, 634 NULL, &nsegs, flags); 635 break; 636 case CRYPTO_BUF_UIO: 637 error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, 638 flags); 639 break; 640 case CRYPTO_BUF_VMPAGE: 641 error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, 642 cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, 643 &nsegs); 644 break; 645 default: 646 error = EINVAL; 647 } 648 nsegs++; 649 650 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 651 __func__, dmat, flags, error, nsegs); 652 653 if (error == EINPROGRESS) 654 return (error); 655 656 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 657 if (error) 658 (*callback)(callback_arg, segs, 0, error); 659 else 660 (*callback)(callback_arg, segs, nsegs, 0); 661 662 /* 663 * Return ENOMEM to the caller so that it can pass it up the stack. 664 * This error only happens when NOWAIT is set, so deferral is disabled. 665 */ 666 if (error == ENOMEM) 667 return (error); 668 669 return (0); 670 } 671 672 int 673 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, 674 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 675 { 676 return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, 677 callback_arg, flags)); 678 } 679 680 void 681 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) 682 { 683 684 if (t == NULL) 685 return; 686 687 t->parent = parent; 688 t->alignment = 1; 689 t->boundary = 0; 690 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; 691 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; 692 t->nsegments = BUS_SPACE_UNRESTRICTED; 693 t->lockfunc = NULL; 694 t->lockfuncarg = NULL; 695 t->flags = 0; 696 } 697 698 int 699 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) 700 { 701 702 if (t == NULL || dmat == NULL) 703 return (EINVAL); 704 705 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, 706 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, 707 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, 708 dmat)); 709 } 710 711 void 712 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) 713 { 714 bus_dma_param_t *pkv; 715 716 while (count) { 717 pkv = &kv[--count]; 718 switch (pkv->key) { 719 case BD_PARAM_PARENT: 720 t->parent = pkv->ptr; 721 break; 722 case BD_PARAM_ALIGNMENT: 723 t->alignment = pkv->num; 724 break; 725 case BD_PARAM_BOUNDARY: 726 t->boundary = pkv->num; 727 break; 728 case BD_PARAM_LOWADDR: 729 t->lowaddr = pkv->pa; 730 break; 731 case BD_PARAM_HIGHADDR: 732 t->highaddr = pkv->pa; 733 break; 734 case BD_PARAM_MAXSIZE: 735 t->maxsize = pkv->num; 736 break; 737 case BD_PARAM_NSEGMENTS: 738 t->nsegments = pkv->num; 739 break; 740 case BD_PARAM_MAXSEGSIZE: 741 t->maxsegsize = pkv->num; 742 break; 743 case BD_PARAM_FLAGS: 744 t->flags = pkv->num; 745 break; 746 case BD_PARAM_LOCKFUNC: 747 t->lockfunc = pkv->ptr; 748 break; 749 case BD_PARAM_LOCKFUNCARG: 750 t->lockfuncarg = pkv->ptr; 751 break; 752 case BD_PARAM_NAME: 753 t->name = pkv->ptr; 754 break; 755 case BD_PARAM_INVALID: 756 default: 757 KASSERT(0, ("Invalid key %d\n", pkv->key)); 758 break; 759 } 760 } 761 return; 762 } 763 764 #ifndef IOMMU 765 bool bus_dma_iommu_set_buswide(device_t dev); 766 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 767 vm_paddr_t start, vm_size_t length, int flags); 768 769 bool 770 bus_dma_iommu_set_buswide(device_t dev) 771 { 772 return (false); 773 } 774 775 int 776 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 777 vm_paddr_t start, vm_size_t length, int flags) 778 { 779 return (0); 780 } 781 #endif 782