1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012 EMC Corp. 5 * All rights reserved. 6 * 7 * Copyright (c) 1997, 1998 Justin T. Gibbs. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bus.h" 36 #include "opt_iommu.h" 37 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #include <sys/bio.h> 42 #include <sys/bus.h> 43 #include <sys/callout.h> 44 #include <sys/ktr.h> 45 #include <sys/lock.h> 46 #include <sys/mbuf.h> 47 #include <sys/memdesc.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_map.h> 55 #include <vm/pmap.h> 56 57 #include <cam/cam.h> 58 #include <cam/cam_ccb.h> 59 60 #include <opencrypto/cryptodev.h> 61 62 #include <machine/bus.h> 63 64 /* 65 * Convenience function for manipulating driver locks from busdma (during 66 * busdma_swi, for example). 67 */ 68 void 69 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 70 { 71 struct mtx *dmtx; 72 73 dmtx = (struct mtx *)arg; 74 switch (op) { 75 case BUS_DMA_LOCK: 76 mtx_lock(dmtx); 77 break; 78 case BUS_DMA_UNLOCK: 79 mtx_unlock(dmtx); 80 break; 81 default: 82 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 83 } 84 } 85 86 /* 87 * dflt_lock should never get called. It gets put into the dma tag when 88 * lockfunc == NULL, which is only valid if the maps that are associated 89 * with the tag are meant to never be deferred. 90 * 91 * XXX Should have a way to identify which driver is responsible here. 92 */ 93 void 94 _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op) 95 { 96 97 panic("driver error: _bus_dma_dflt_lock called"); 98 } 99 100 101 /* 102 * Load up data starting at offset within a region specified by a 103 * list of virtual address ranges until either length or the region 104 * are exhausted. 105 */ 106 static int 107 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, 108 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, 109 int flags, size_t offset, size_t length) 110 { 111 int error; 112 113 error = 0; 114 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { 115 char *addr; 116 size_t ds_len; 117 118 KASSERT((offset < list->ds_len), 119 ("Invalid mid-segment offset")); 120 addr = (char *)(uintptr_t)list->ds_addr + offset; 121 ds_len = list->ds_len - offset; 122 offset = 0; 123 if (ds_len > length) 124 ds_len = length; 125 length -= ds_len; 126 KASSERT((ds_len != 0), ("Segment length is zero")); 127 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, 128 flags, NULL, nsegs); 129 if (error) 130 break; 131 } 132 return (error); 133 } 134 135 /* 136 * Load a list of physical addresses. 137 */ 138 static int 139 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, 140 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) 141 { 142 int error; 143 144 error = 0; 145 for (; sglist_cnt > 0; sglist_cnt--, list++) { 146 error = _bus_dmamap_load_phys(dmat, map, 147 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, 148 nsegs); 149 if (error) 150 break; 151 } 152 return (error); 153 } 154 155 /* 156 * Load an unmapped mbuf 157 */ 158 static int 159 _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, 160 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 161 { 162 int error, i, off, len, pglen, pgoff, seglen, segoff; 163 164 M_ASSERTEXTPG(m); 165 166 len = m->m_len; 167 error = 0; 168 169 /* Skip over any data removed from the front. */ 170 off = mtod(m, vm_offset_t); 171 172 if (m->m_epg_hdrlen != 0) { 173 if (off >= m->m_epg_hdrlen) { 174 off -= m->m_epg_hdrlen; 175 } else { 176 seglen = m->m_epg_hdrlen - off; 177 segoff = off; 178 seglen = min(seglen, len); 179 off = 0; 180 len -= seglen; 181 error = _bus_dmamap_load_buffer(dmat, map, 182 &m->m_epg_hdr[segoff], seglen, kernel_pmap, 183 flags, segs, nsegs); 184 } 185 } 186 pgoff = m->m_epg_1st_off; 187 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { 188 pglen = m_epg_pagelen(m, i, pgoff); 189 if (off >= pglen) { 190 off -= pglen; 191 pgoff = 0; 192 continue; 193 } 194 seglen = pglen - off; 195 segoff = pgoff + off; 196 off = 0; 197 seglen = min(seglen, len); 198 len -= seglen; 199 error = _bus_dmamap_load_phys(dmat, map, 200 m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); 201 pgoff = 0; 202 }; 203 if (len != 0 && error == 0) { 204 KASSERT((off + len) <= m->m_epg_trllen, 205 ("off + len > trail (%d + %d > %d)", off, len, 206 m->m_epg_trllen)); 207 error = _bus_dmamap_load_buffer(dmat, map, 208 &m->m_epg_trail[off], len, kernel_pmap, flags, segs, 209 nsegs); 210 } 211 return (error); 212 } 213 214 /* 215 * Load a single mbuf. 216 */ 217 static int 218 _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, 219 struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) 220 { 221 int error; 222 223 error = 0; 224 if ((m->m_flags & M_EXTPG) != 0) 225 error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, 226 flags); 227 else 228 error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, 229 kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 230 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 231 __func__, dmat, flags, error, *nsegs); 232 return (error); 233 } 234 235 /* 236 * Load an mbuf chain. 237 */ 238 static int 239 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 240 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) 241 { 242 struct mbuf *m; 243 int error; 244 245 error = 0; 246 for (m = m0; m != NULL && error == 0; m = m->m_next) { 247 if (m->m_len > 0) { 248 if ((m->m_flags & M_EXTPG) != 0) 249 error = _bus_dmamap_load_mbuf_epg(dmat, 250 map, m, segs, nsegs, flags); 251 else 252 error = _bus_dmamap_load_buffer(dmat, map, 253 m->m_data, m->m_len, kernel_pmap, 254 flags | BUS_DMA_LOAD_MBUF, segs, nsegs); 255 } 256 } 257 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 258 __func__, dmat, flags, error, *nsegs); 259 return (error); 260 } 261 262 /* 263 * Load from block io. 264 */ 265 static int 266 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 267 int *nsegs, int flags) 268 { 269 270 if ((bio->bio_flags & BIO_VLIST) != 0) { 271 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; 272 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, 273 kernel_pmap, nsegs, flags, bio->bio_ma_offset, 274 bio->bio_bcount)); 275 } 276 277 if ((bio->bio_flags & BIO_UNMAPPED) != 0) 278 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, 279 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); 280 281 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, 282 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); 283 } 284 285 int 286 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, 287 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 288 bus_dma_segment_t *segs, int *segp) 289 { 290 vm_paddr_t paddr; 291 bus_size_t len; 292 int error, i; 293 294 error = 0; 295 for (i = 0; tlen > 0; i++, tlen -= len) { 296 len = min(PAGE_SIZE - ma_offs, tlen); 297 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; 298 error = _bus_dmamap_load_phys(dmat, map, paddr, len, 299 flags, segs, segp); 300 if (error != 0) 301 break; 302 ma_offs = 0; 303 } 304 return (error); 305 } 306 307 /* 308 * Load a cam control block. 309 */ 310 static int 311 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 312 int *nsegs, int flags) 313 { 314 struct ccb_hdr *ccb_h; 315 void *data_ptr; 316 int error; 317 uint32_t dxfer_len; 318 uint16_t sglist_cnt; 319 320 error = 0; 321 ccb_h = &ccb->ccb_h; 322 switch (ccb_h->func_code) { 323 case XPT_SCSI_IO: { 324 struct ccb_scsiio *csio; 325 326 csio = &ccb->csio; 327 data_ptr = csio->data_ptr; 328 dxfer_len = csio->dxfer_len; 329 sglist_cnt = csio->sglist_cnt; 330 break; 331 } 332 case XPT_CONT_TARGET_IO: { 333 struct ccb_scsiio *ctio; 334 335 ctio = &ccb->ctio; 336 data_ptr = ctio->data_ptr; 337 dxfer_len = ctio->dxfer_len; 338 sglist_cnt = ctio->sglist_cnt; 339 break; 340 } 341 case XPT_ATA_IO: { 342 struct ccb_ataio *ataio; 343 344 ataio = &ccb->ataio; 345 data_ptr = ataio->data_ptr; 346 dxfer_len = ataio->dxfer_len; 347 sglist_cnt = 0; 348 break; 349 } 350 case XPT_NVME_IO: 351 case XPT_NVME_ADMIN: { 352 struct ccb_nvmeio *nvmeio; 353 354 nvmeio = &ccb->nvmeio; 355 data_ptr = nvmeio->data_ptr; 356 dxfer_len = nvmeio->dxfer_len; 357 sglist_cnt = nvmeio->sglist_cnt; 358 break; 359 } 360 default: 361 panic("_bus_dmamap_load_ccb: Unsupported func code %d", 362 ccb_h->func_code); 363 } 364 365 switch ((ccb_h->flags & CAM_DATA_MASK)) { 366 case CAM_DATA_VADDR: 367 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, 368 kernel_pmap, flags, NULL, nsegs); 369 break; 370 case CAM_DATA_PADDR: 371 error = _bus_dmamap_load_phys(dmat, map, 372 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, 373 nsegs); 374 break; 375 case CAM_DATA_SG: 376 error = _bus_dmamap_load_vlist(dmat, map, 377 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, 378 nsegs, flags, 0, dxfer_len); 379 break; 380 case CAM_DATA_SG_PADDR: 381 error = _bus_dmamap_load_plist(dmat, map, 382 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); 383 break; 384 case CAM_DATA_BIO: 385 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, 386 nsegs, flags); 387 break; 388 default: 389 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", 390 ccb_h->flags); 391 } 392 return (error); 393 } 394 395 /* 396 * Load a uio. 397 */ 398 static int 399 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 400 int *nsegs, int flags) 401 { 402 bus_size_t resid; 403 bus_size_t minlen; 404 struct iovec *iov; 405 pmap_t pmap; 406 caddr_t addr; 407 int error, i; 408 409 if (uio->uio_segflg == UIO_USERSPACE) { 410 KASSERT(uio->uio_td != NULL, 411 ("bus_dmamap_load_uio: USERSPACE but no proc")); 412 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 413 } else 414 pmap = kernel_pmap; 415 resid = uio->uio_resid; 416 iov = uio->uio_iov; 417 error = 0; 418 419 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 420 /* 421 * Now at the first iovec to load. Load each iovec 422 * until we have exhausted the residual count. 423 */ 424 425 addr = (caddr_t) iov[i].iov_base; 426 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 427 if (minlen > 0) { 428 error = _bus_dmamap_load_buffer(dmat, map, addr, 429 minlen, pmap, flags, NULL, nsegs); 430 resid -= minlen; 431 } 432 } 433 434 return (error); 435 } 436 437 /* 438 * Map the buffer buf into bus space using the dmamap map. 439 */ 440 int 441 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 442 bus_size_t buflen, bus_dmamap_callback_t *callback, 443 void *callback_arg, int flags) 444 { 445 bus_dma_segment_t *segs; 446 struct memdesc mem; 447 int error; 448 int nsegs; 449 450 #ifdef KMSAN 451 mem = memdesc_vaddr(buf, buflen); 452 _bus_dmamap_load_kmsan(dmat, map, &mem); 453 #endif 454 455 if ((flags & BUS_DMA_NOWAIT) == 0) { 456 mem = memdesc_vaddr(buf, buflen); 457 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 458 } 459 460 nsegs = -1; 461 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, 462 flags, NULL, &nsegs); 463 nsegs++; 464 465 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 466 __func__, dmat, flags, error, nsegs); 467 468 if (error == EINPROGRESS) 469 return (error); 470 471 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 472 if (error) 473 (*callback)(callback_arg, segs, 0, error); 474 else 475 (*callback)(callback_arg, segs, nsegs, 0); 476 477 /* 478 * Return ENOMEM to the caller so that it can pass it up the stack. 479 * This error only happens when NOWAIT is set, so deferral is disabled. 480 */ 481 if (error == ENOMEM) 482 return (error); 483 484 return (0); 485 } 486 487 int 488 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 489 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 490 { 491 bus_dma_segment_t *segs; 492 int nsegs, error; 493 494 M_ASSERTPKTHDR(m0); 495 496 #ifdef KMSAN 497 struct memdesc mem = memdesc_mbuf(m0); 498 _bus_dmamap_load_kmsan(dmat, map, &mem); 499 #endif 500 501 flags |= BUS_DMA_NOWAIT; 502 nsegs = -1; 503 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); 504 ++nsegs; 505 506 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 507 if (error) 508 (*callback)(callback_arg, segs, 0, 0, error); 509 else 510 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); 511 512 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 513 __func__, dmat, flags, error, nsegs); 514 return (error); 515 } 516 517 int 518 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 519 bus_dma_segment_t *segs, int *nsegs, int flags) 520 { 521 int error; 522 523 #ifdef KMSAN 524 struct memdesc mem = memdesc_mbuf(m0); 525 _bus_dmamap_load_kmsan(dmat, map, &mem); 526 #endif 527 528 flags |= BUS_DMA_NOWAIT; 529 *nsegs = -1; 530 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); 531 ++*nsegs; 532 _bus_dmamap_complete(dmat, map, segs, *nsegs, error); 533 return (error); 534 } 535 536 int 537 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 538 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 539 { 540 bus_dma_segment_t *segs; 541 int nsegs, error; 542 543 #ifdef KMSAN 544 struct memdesc mem = memdesc_uio(uio); 545 _bus_dmamap_load_kmsan(dmat, map, &mem); 546 #endif 547 548 flags |= BUS_DMA_NOWAIT; 549 nsegs = -1; 550 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); 551 nsegs++; 552 553 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 554 if (error) 555 (*callback)(callback_arg, segs, 0, 0, error); 556 else 557 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); 558 559 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 560 __func__, dmat, flags, error, nsegs); 561 return (error); 562 } 563 564 int 565 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 566 bus_dmamap_callback_t *callback, void *callback_arg, 567 int flags) 568 { 569 bus_dma_segment_t *segs; 570 struct ccb_hdr *ccb_h; 571 struct memdesc mem; 572 int error; 573 int nsegs; 574 575 #ifdef KMSAN 576 mem = memdesc_ccb(ccb); 577 _bus_dmamap_load_kmsan(dmat, map, &mem); 578 #endif 579 580 ccb_h = &ccb->ccb_h; 581 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 582 callback(callback_arg, NULL, 0, 0); 583 return (0); 584 } 585 if ((flags & BUS_DMA_NOWAIT) == 0) { 586 mem = memdesc_ccb(ccb); 587 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 588 } 589 nsegs = -1; 590 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); 591 nsegs++; 592 593 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 594 __func__, dmat, flags, error, nsegs); 595 596 if (error == EINPROGRESS) 597 return (error); 598 599 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 600 if (error) 601 (*callback)(callback_arg, segs, 0, error); 602 else 603 (*callback)(callback_arg, segs, nsegs, error); 604 /* 605 * Return ENOMEM to the caller so that it can pass it up the stack. 606 * This error only happens when NOWAIT is set, so deferral is disabled. 607 */ 608 if (error == ENOMEM) 609 return (error); 610 611 return (0); 612 } 613 614 int 615 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 616 bus_dmamap_callback_t *callback, void *callback_arg, 617 int flags) 618 { 619 bus_dma_segment_t *segs; 620 struct memdesc mem; 621 int error; 622 int nsegs; 623 624 #ifdef KMSAN 625 mem = memdesc_bio(bio); 626 _bus_dmamap_load_kmsan(dmat, map, &mem); 627 #endif 628 629 if ((flags & BUS_DMA_NOWAIT) == 0) { 630 mem = memdesc_bio(bio); 631 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 632 } 633 nsegs = -1; 634 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); 635 nsegs++; 636 637 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 638 __func__, dmat, flags, error, nsegs); 639 640 if (error == EINPROGRESS) 641 return (error); 642 643 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 644 if (error) 645 (*callback)(callback_arg, segs, 0, error); 646 else 647 (*callback)(callback_arg, segs, nsegs, error); 648 /* 649 * Return ENOMEM to the caller so that it can pass it up the stack. 650 * This error only happens when NOWAIT is set, so deferral is disabled. 651 */ 652 if (error == ENOMEM) 653 return (error); 654 655 return (0); 656 } 657 658 int 659 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, 660 struct memdesc *mem, bus_dmamap_callback_t *callback, 661 void *callback_arg, int flags) 662 { 663 bus_dma_segment_t *segs; 664 int error; 665 int nsegs; 666 667 #ifdef KMSAN 668 _bus_dmamap_load_kmsan(dmat, map, mem); 669 #endif 670 671 if ((flags & BUS_DMA_NOWAIT) == 0) 672 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); 673 674 nsegs = -1; 675 error = 0; 676 switch (mem->md_type) { 677 case MEMDESC_VADDR: 678 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, 679 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); 680 break; 681 case MEMDESC_PADDR: 682 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, 683 mem->md_opaque, flags, NULL, &nsegs); 684 break; 685 case MEMDESC_VLIST: 686 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, 687 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); 688 break; 689 case MEMDESC_PLIST: 690 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, 691 mem->md_opaque, &nsegs, flags); 692 break; 693 case MEMDESC_BIO: 694 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, 695 &nsegs, flags); 696 break; 697 case MEMDESC_UIO: 698 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, 699 &nsegs, flags); 700 break; 701 case MEMDESC_MBUF: 702 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, 703 NULL, &nsegs, flags); 704 break; 705 case MEMDESC_CCB: 706 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, 707 flags); 708 break; 709 } 710 nsegs++; 711 712 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 713 __func__, dmat, flags, error, nsegs); 714 715 if (error == EINPROGRESS) 716 return (error); 717 718 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 719 if (error) 720 (*callback)(callback_arg, segs, 0, error); 721 else 722 (*callback)(callback_arg, segs, nsegs, 0); 723 724 /* 725 * Return ENOMEM to the caller so that it can pass it up the stack. 726 * This error only happens when NOWAIT is set, so deferral is disabled. 727 */ 728 if (error == ENOMEM) 729 return (error); 730 731 return (0); 732 } 733 734 int 735 bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, 736 struct crypto_buffer *cb, bus_dmamap_callback_t *callback, 737 void *callback_arg, int flags) 738 { 739 bus_dma_segment_t *segs; 740 int error; 741 int nsegs; 742 743 flags |= BUS_DMA_NOWAIT; 744 nsegs = -1; 745 error = 0; 746 switch (cb->cb_type) { 747 case CRYPTO_BUF_CONTIG: 748 error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, 749 cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); 750 break; 751 case CRYPTO_BUF_MBUF: 752 error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, 753 NULL, &nsegs, flags); 754 break; 755 case CRYPTO_BUF_SINGLE_MBUF: 756 error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf, 757 NULL, &nsegs, flags); 758 break; 759 case CRYPTO_BUF_UIO: 760 error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, 761 flags); 762 break; 763 case CRYPTO_BUF_VMPAGE: 764 error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, 765 cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, 766 &nsegs); 767 break; 768 default: 769 error = EINVAL; 770 } 771 nsegs++; 772 773 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 774 __func__, dmat, flags, error, nsegs); 775 776 if (error == EINPROGRESS) 777 return (error); 778 779 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 780 if (error) 781 (*callback)(callback_arg, segs, 0, error); 782 else 783 (*callback)(callback_arg, segs, nsegs, 0); 784 785 /* 786 * Return ENOMEM to the caller so that it can pass it up the stack. 787 * This error only happens when NOWAIT is set, so deferral is disabled. 788 */ 789 if (error == ENOMEM) 790 return (error); 791 792 return (0); 793 } 794 795 int 796 bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, 797 bus_dmamap_callback_t *callback, void *callback_arg, int flags) 798 { 799 return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, 800 callback_arg, flags)); 801 } 802 803 void 804 bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) 805 { 806 807 if (t == NULL) 808 return; 809 810 t->parent = parent; 811 t->alignment = 1; 812 t->boundary = 0; 813 t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; 814 t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; 815 t->nsegments = BUS_SPACE_UNRESTRICTED; 816 t->lockfunc = NULL; 817 t->lockfuncarg = NULL; 818 t->flags = 0; 819 } 820 821 int 822 bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) 823 { 824 825 if (t == NULL || dmat == NULL) 826 return (EINVAL); 827 828 return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, 829 t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, 830 t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, 831 dmat)); 832 } 833 834 void 835 bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) 836 { 837 bus_dma_param_t *pkv; 838 839 while (count) { 840 pkv = &kv[--count]; 841 switch (pkv->key) { 842 case BD_PARAM_PARENT: 843 t->parent = pkv->ptr; 844 break; 845 case BD_PARAM_ALIGNMENT: 846 t->alignment = pkv->num; 847 break; 848 case BD_PARAM_BOUNDARY: 849 t->boundary = pkv->num; 850 break; 851 case BD_PARAM_LOWADDR: 852 t->lowaddr = pkv->pa; 853 break; 854 case BD_PARAM_HIGHADDR: 855 t->highaddr = pkv->pa; 856 break; 857 case BD_PARAM_MAXSIZE: 858 t->maxsize = pkv->num; 859 break; 860 case BD_PARAM_NSEGMENTS: 861 t->nsegments = pkv->num; 862 break; 863 case BD_PARAM_MAXSEGSIZE: 864 t->maxsegsize = pkv->num; 865 break; 866 case BD_PARAM_FLAGS: 867 t->flags = pkv->num; 868 break; 869 case BD_PARAM_LOCKFUNC: 870 t->lockfunc = pkv->ptr; 871 break; 872 case BD_PARAM_LOCKFUNCARG: 873 t->lockfuncarg = pkv->ptr; 874 break; 875 case BD_PARAM_NAME: 876 t->name = pkv->ptr; 877 break; 878 case BD_PARAM_INVALID: 879 default: 880 KASSERT(0, ("Invalid key %d\n", pkv->key)); 881 break; 882 } 883 } 884 return; 885 } 886 887 #ifndef IOMMU 888 bool bus_dma_iommu_set_buswide(device_t dev); 889 int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 890 vm_paddr_t start, vm_size_t length, int flags); 891 892 bool 893 bus_dma_iommu_set_buswide(device_t dev) 894 { 895 return (false); 896 } 897 898 int 899 bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, 900 vm_paddr_t start, vm_size_t length, int flags) 901 { 902 return (0); 903 } 904 #endif 905