1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 EMC Corp. 5 * All rights reserved. 6 * 7 * Copyright (c) 1997, 1998 Justin T. Gibbs. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bus.h" 36 37 #include <sys/param.h> 38 #include <sys/conf.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/bus.h> 42 #include <sys/callout.h> 43 #include <sys/mbuf.h> 44 #include <sys/memdesc.h> 45 #include <sys/proc.h> 46 #include <sys/uio.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_map.h> 51 #include <vm/pmap.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 56 #include <machine/bus.h> 57 58 /* 59 * Load up data starting at offset within a region specified by a 60 * list of virtual address ranges until either length or the region 61 * are exhausted. 62 */ 63 static int 64 _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, 65 bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, 66 int flags, size_t offset, size_t length) 67 { 68 int error; 69 70 error = 0; 71 for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { 72 char *addr; 73 size_t ds_len; 74 75 KASSERT((offset < list->ds_len), 76 ("Invalid mid-segment offset")); 77 addr = (char *)(uintptr_t)list->ds_addr + offset; 78 ds_len = list->ds_len - offset; 79 offset = 0; 80 if (ds_len > length) 81 ds_len = length; 82 length -= ds_len; 83 KASSERT((ds_len != 0), ("Segment length is zero")); 84 error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, 85 flags, NULL, nsegs); 86 if (error) 87 break; 88 } 89 return (error); 90 } 91 92 /* 93 * Load a list of physical addresses. 94 */ 95 static int 96 _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, 97 bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) 98 { 99 int error; 100 101 error = 0; 102 for (; sglist_cnt > 0; sglist_cnt--, list++) { 103 error = _bus_dmamap_load_phys(dmat, map, 104 (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, 105 nsegs); 106 if (error) 107 break; 108 } 109 return (error); 110 } 111 112 /* 113 * Load an mbuf chain. 114 */ 115 static int 116 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, 117 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) 118 { 119 struct mbuf *m; 120 int error; 121 122 error = 0; 123 for (m = m0; m != NULL && error == 0; m = m->m_next) { 124 if (m->m_len > 0) { 125 error = _bus_dmamap_load_buffer(dmat, map, m->m_data, 126 m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, 127 segs, nsegs); 128 } 129 } 130 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 131 __func__, dmat, flags, error, *nsegs); 132 return (error); 133 } 134 135 /* 136 * Load from block io. 137 */ 138 static int 139 _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 140 int *nsegs, int flags) 141 { 142 143 if ((bio->bio_flags & BIO_VLIST) != 0) { 144 bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; 145 return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, 146 kernel_pmap, nsegs, flags, bio->bio_ma_offset, 147 bio->bio_bcount)); 148 } 149 150 if ((bio->bio_flags & BIO_UNMAPPED) != 0) 151 return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, 152 bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); 153 154 return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, 155 bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); 156 } 157 158 int 159 bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, 160 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 161 bus_dma_segment_t *segs, int *segp) 162 { 163 vm_paddr_t paddr; 164 bus_size_t len; 165 int error, i; 166 167 error = 0; 168 for (i = 0; tlen > 0; i++, tlen -= len) { 169 len = min(PAGE_SIZE - ma_offs, tlen); 170 paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; 171 error = _bus_dmamap_load_phys(dmat, map, paddr, len, 172 flags, segs, segp); 173 if (error != 0) 174 break; 175 ma_offs = 0; 176 } 177 return (error); 178 } 179 180 /* 181 * Load a cam control block. 182 */ 183 static int 184 _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 185 int *nsegs, int flags) 186 { 187 struct ccb_hdr *ccb_h; 188 void *data_ptr; 189 int error; 190 uint32_t dxfer_len; 191 uint16_t sglist_cnt; 192 193 error = 0; 194 ccb_h = &ccb->ccb_h; 195 switch (ccb_h->func_code) { 196 case XPT_SCSI_IO: { 197 struct ccb_scsiio *csio; 198 199 csio = &ccb->csio; 200 data_ptr = csio->data_ptr; 201 dxfer_len = csio->dxfer_len; 202 sglist_cnt = csio->sglist_cnt; 203 break; 204 } 205 case XPT_CONT_TARGET_IO: { 206 struct ccb_scsiio *ctio; 207 208 ctio = &ccb->ctio; 209 data_ptr = ctio->data_ptr; 210 dxfer_len = ctio->dxfer_len; 211 sglist_cnt = ctio->sglist_cnt; 212 break; 213 } 214 case XPT_ATA_IO: { 215 struct ccb_ataio *ataio; 216 217 ataio = &ccb->ataio; 218 data_ptr = ataio->data_ptr; 219 dxfer_len = ataio->dxfer_len; 220 sglist_cnt = 0; 221 break; 222 } 223 case XPT_NVME_IO: 224 case XPT_NVME_ADMIN: { 225 struct ccb_nvmeio *nvmeio; 226 227 nvmeio = &ccb->nvmeio; 228 data_ptr = nvmeio->data_ptr; 229 dxfer_len = nvmeio->dxfer_len; 230 sglist_cnt = nvmeio->sglist_cnt; 231 break; 232 } 233 default: 234 panic("_bus_dmamap_load_ccb: Unsupported func code %d", 235 ccb_h->func_code); 236 } 237 238 switch ((ccb_h->flags & CAM_DATA_MASK)) { 239 case CAM_DATA_VADDR: 240 error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, 241 kernel_pmap, flags, NULL, nsegs); 242 break; 243 case CAM_DATA_PADDR: 244 error = _bus_dmamap_load_phys(dmat, map, 245 (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, 246 nsegs); 247 break; 248 case CAM_DATA_SG: 249 error = _bus_dmamap_load_vlist(dmat, map, 250 (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, 251 nsegs, flags, 0, dxfer_len); 252 break; 253 case CAM_DATA_SG_PADDR: 254 error = _bus_dmamap_load_plist(dmat, map, 255 (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); 256 break; 257 case CAM_DATA_BIO: 258 error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, 259 nsegs, flags); 260 break; 261 default: 262 panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", 263 ccb_h->flags); 264 } 265 return (error); 266 } 267 268 /* 269 * Load a uio. 270 */ 271 static int 272 _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 273 int *nsegs, int flags) 274 { 275 bus_size_t resid; 276 bus_size_t minlen; 277 struct iovec *iov; 278 pmap_t pmap; 279 caddr_t addr; 280 int error, i; 281 282 if (uio->uio_segflg == UIO_USERSPACE) { 283 KASSERT(uio->uio_td != NULL, 284 ("bus_dmamap_load_uio: USERSPACE but no proc")); 285 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); 286 } else 287 pmap = kernel_pmap; 288 resid = uio->uio_resid; 289 iov = uio->uio_iov; 290 error = 0; 291 292 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { 293 /* 294 * Now at the first iovec to load. Load each iovec 295 * until we have exhausted the residual count. 296 */ 297 298 addr = (caddr_t) iov[i].iov_base; 299 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; 300 if (minlen > 0) { 301 error = _bus_dmamap_load_buffer(dmat, map, addr, 302 minlen, pmap, flags, NULL, nsegs); 303 resid -= minlen; 304 } 305 } 306 307 return (error); 308 } 309 310 /* 311 * Map the buffer buf into bus space using the dmamap map. 312 */ 313 int 314 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 315 bus_size_t buflen, bus_dmamap_callback_t *callback, 316 void *callback_arg, int flags) 317 { 318 bus_dma_segment_t *segs; 319 struct memdesc mem; 320 int error; 321 int nsegs; 322 323 if ((flags & BUS_DMA_NOWAIT) == 0) { 324 mem = memdesc_vaddr(buf, buflen); 325 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 326 } 327 328 nsegs = -1; 329 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, 330 flags, NULL, &nsegs); 331 nsegs++; 332 333 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 334 __func__, dmat, flags, error, nsegs); 335 336 if (error == EINPROGRESS) 337 return (error); 338 339 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 340 if (error) 341 (*callback)(callback_arg, segs, 0, error); 342 else 343 (*callback)(callback_arg, segs, nsegs, 0); 344 345 /* 346 * Return ENOMEM to the caller so that it can pass it up the stack. 347 * This error only happens when NOWAIT is set, so deferral is disabled. 348 */ 349 if (error == ENOMEM) 350 return (error); 351 352 return (0); 353 } 354 355 int 356 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 357 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 358 { 359 bus_dma_segment_t *segs; 360 int nsegs, error; 361 362 M_ASSERTPKTHDR(m0); 363 364 flags |= BUS_DMA_NOWAIT; 365 nsegs = -1; 366 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); 367 ++nsegs; 368 369 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 370 if (error) 371 (*callback)(callback_arg, segs, 0, 0, error); 372 else 373 (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); 374 375 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 376 __func__, dmat, flags, error, nsegs); 377 return (error); 378 } 379 380 int 381 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, 382 bus_dma_segment_t *segs, int *nsegs, int flags) 383 { 384 int error; 385 386 flags |= BUS_DMA_NOWAIT; 387 *nsegs = -1; 388 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); 389 ++*nsegs; 390 _bus_dmamap_complete(dmat, map, segs, *nsegs, error); 391 return (error); 392 } 393 394 int 395 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, 396 bus_dmamap_callback2_t *callback, void *callback_arg, int flags) 397 { 398 bus_dma_segment_t *segs; 399 int nsegs, error; 400 401 flags |= BUS_DMA_NOWAIT; 402 nsegs = -1; 403 error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); 404 nsegs++; 405 406 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 407 if (error) 408 (*callback)(callback_arg, segs, 0, 0, error); 409 else 410 (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); 411 412 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 413 __func__, dmat, flags, error, nsegs); 414 return (error); 415 } 416 417 int 418 bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, 419 bus_dmamap_callback_t *callback, void *callback_arg, 420 int flags) 421 { 422 bus_dma_segment_t *segs; 423 struct ccb_hdr *ccb_h; 424 struct memdesc mem; 425 int error; 426 int nsegs; 427 428 ccb_h = &ccb->ccb_h; 429 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 430 callback(callback_arg, NULL, 0, 0); 431 return (0); 432 } 433 if ((flags & BUS_DMA_NOWAIT) == 0) { 434 mem = memdesc_ccb(ccb); 435 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 436 } 437 nsegs = -1; 438 error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); 439 nsegs++; 440 441 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 442 __func__, dmat, flags, error, nsegs); 443 444 if (error == EINPROGRESS) 445 return (error); 446 447 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 448 if (error) 449 (*callback)(callback_arg, segs, 0, error); 450 else 451 (*callback)(callback_arg, segs, nsegs, error); 452 /* 453 * Return ENOMEM to the caller so that it can pass it up the stack. 454 * This error only happens when NOWAIT is set, so deferral is disabled. 455 */ 456 if (error == ENOMEM) 457 return (error); 458 459 return (0); 460 } 461 462 int 463 bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, 464 bus_dmamap_callback_t *callback, void *callback_arg, 465 int flags) 466 { 467 bus_dma_segment_t *segs; 468 struct memdesc mem; 469 int error; 470 int nsegs; 471 472 if ((flags & BUS_DMA_NOWAIT) == 0) { 473 mem = memdesc_bio(bio); 474 _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); 475 } 476 nsegs = -1; 477 error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); 478 nsegs++; 479 480 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 481 __func__, dmat, flags, error, nsegs); 482 483 if (error == EINPROGRESS) 484 return (error); 485 486 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 487 if (error) 488 (*callback)(callback_arg, segs, 0, error); 489 else 490 (*callback)(callback_arg, segs, nsegs, error); 491 /* 492 * Return ENOMEM to the caller so that it can pass it up the stack. 493 * This error only happens when NOWAIT is set, so deferral is disabled. 494 */ 495 if (error == ENOMEM) 496 return (error); 497 498 return (0); 499 } 500 501 int 502 bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, 503 struct memdesc *mem, bus_dmamap_callback_t *callback, 504 void *callback_arg, int flags) 505 { 506 bus_dma_segment_t *segs; 507 int error; 508 int nsegs; 509 510 if ((flags & BUS_DMA_NOWAIT) == 0) 511 _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); 512 513 nsegs = -1; 514 error = 0; 515 switch (mem->md_type) { 516 case MEMDESC_VADDR: 517 error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, 518 mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); 519 break; 520 case MEMDESC_PADDR: 521 error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, 522 mem->md_opaque, flags, NULL, &nsegs); 523 break; 524 case MEMDESC_VLIST: 525 error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, 526 mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); 527 break; 528 case MEMDESC_PLIST: 529 error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, 530 mem->md_opaque, &nsegs, flags); 531 break; 532 case MEMDESC_BIO: 533 error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, 534 &nsegs, flags); 535 break; 536 case MEMDESC_UIO: 537 error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, 538 &nsegs, flags); 539 break; 540 case MEMDESC_MBUF: 541 error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, 542 NULL, &nsegs, flags); 543 break; 544 case MEMDESC_CCB: 545 error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, 546 flags); 547 break; 548 } 549 nsegs++; 550 551 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", 552 __func__, dmat, flags, error, nsegs); 553 554 if (error == EINPROGRESS) 555 return (error); 556 557 segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); 558 if (error) 559 (*callback)(callback_arg, segs, 0, error); 560 else 561 (*callback)(callback_arg, segs, nsegs, 0); 562 563 /* 564 * Return ENOMEM to the caller so that it can pass it up the stack. 565 * This error only happens when NOWAIT is set, so deferral is disabled. 566 */ 567 if (error == ENOMEM) 568 return (error); 569 570 return (0); 571 } 572