1 /*- 2 * Copyright (c) 2013 Hans Petter Selasky. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <bsd_global.h> 27 28 #if USB_HAVE_BUSDMA 29 static void usb_pc_common_mem_cb(struct usb_page_cache *pc, 30 void *vaddr, uint32_t length); 31 #endif 32 33 /*------------------------------------------------------------------------* 34 * usbd_get_page - lookup DMA-able memory for the given offset 35 * 36 * NOTE: Only call this function when the "page_cache" structure has 37 * been properly initialized ! 38 *------------------------------------------------------------------------*/ 39 void 40 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 41 struct usb_page_search *res) 42 { 43 #if USB_HAVE_BUSDMA 44 struct usb_page *page; 45 46 if (pc->page_start) { 47 48 /* Case 1 - something has been loaded into DMA */ 49 50 if (pc->buffer) { 51 52 /* Case 1a - Kernel Virtual Address */ 53 54 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 55 } 56 offset += pc->page_offset_buf; 57 58 /* compute destination page */ 59 60 page = pc->page_start; 61 62 if (pc->ismultiseg) { 63 64 page += (offset / USB_PAGE_SIZE); 65 66 offset %= USB_PAGE_SIZE; 67 68 res->length = USB_PAGE_SIZE - offset; 69 res->physaddr = page->physaddr + offset; 70 } else { 71 res->length = (usb_size_t)-1; 72 res->physaddr = page->physaddr + offset; 73 } 74 if (!pc->buffer) { 75 76 /* Case 1b - Non Kernel Virtual Address */ 77 78 res->buffer = USB_ADD_BYTES(page->buffer, offset); 79 } 80 return; 81 } 82 #endif 83 /* Case 2 - Plain PIO */ 84 85 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 86 res->length = (usb_size_t)-1; 87 #if USB_HAVE_BUSDMA 88 res->physaddr = 0; 89 #endif 90 } 91 92 /*------------------------------------------------------------------------* 93 * usbd_copy_in - copy directly to DMA-able memory 94 *------------------------------------------------------------------------*/ 95 void 96 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 97 const void *ptr, usb_frlength_t len) 98 { 99 struct usb_page_search buf_res; 100 101 while (len != 0) { 102 103 usbd_get_page(cache, offset, &buf_res); 104 105 if (buf_res.length > len) { 106 buf_res.length = len; 107 } 108 memcpy(buf_res.buffer, ptr, buf_res.length); 109 110 offset += buf_res.length; 111 len -= buf_res.length; 112 ptr = USB_ADD_BYTES(ptr, buf_res.length); 113 } 114 } 115 116 /*------------------------------------------------------------------------* 117 * usbd_copy_out - copy directly from DMA-able memory 118 *------------------------------------------------------------------------*/ 119 void 120 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 121 void *ptr, usb_frlength_t len) 122 { 123 struct usb_page_search res; 124 125 while (len != 0) { 126 127 usbd_get_page(cache, offset, &res); 128 129 if (res.length > len) { 130 res.length = len; 131 } 132 memcpy(ptr, res.buffer, res.length); 133 134 offset += res.length; 135 len -= res.length; 136 ptr = USB_ADD_BYTES(ptr, res.length); 137 } 138 } 139 140 /*------------------------------------------------------------------------* 141 * usbd_frame_zero - zero DMA-able memory 142 *------------------------------------------------------------------------*/ 143 void 144 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 145 usb_frlength_t len) 146 { 147 struct usb_page_search res; 148 149 while (len != 0) { 150 151 usbd_get_page(cache, offset, &res); 152 153 if (res.length > len) { 154 res.length = len; 155 } 156 memset(res.buffer, 0, res.length); 157 158 offset += res.length; 159 len -= res.length; 160 } 161 } 162 163 #if USB_HAVE_BUSDMA 164 165 /*------------------------------------------------------------------------* 166 * usb_pc_common_mem_cb - BUS-DMA callback function 167 *------------------------------------------------------------------------*/ 168 static void 169 usb_pc_common_mem_cb(struct usb_page_cache *pc, 170 void *vaddr, uint32_t length) 171 { 172 struct usb_page *pg; 173 usb_size_t rem; 174 bus_size_t off; 175 bus_addr_t phys = (uintptr_t)vaddr; /* XXX */ 176 uint32_t nseg; 177 178 if (length == 0) 179 nseg = 1; 180 else 181 nseg = ((length + USB_PAGE_SIZE - 1) / USB_PAGE_SIZE); 182 183 pg = pc->page_start; 184 pg->physaddr = phys & ~(USB_PAGE_SIZE - 1); 185 rem = phys & (USB_PAGE_SIZE - 1); 186 pc->page_offset_buf = rem; 187 pc->page_offset_end += rem; 188 length += rem; 189 190 for (off = USB_PAGE_SIZE; off < length; off += USB_PAGE_SIZE) { 191 pg++; 192 pg->physaddr = (phys + off) & ~(USB_PAGE_SIZE - 1); 193 } 194 } 195 196 /*------------------------------------------------------------------------* 197 * usb_pc_alloc_mem - allocate DMA'able memory 198 * 199 * Returns: 200 * 0: Success 201 * Else: Failure 202 *------------------------------------------------------------------------*/ 203 uint8_t 204 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 205 usb_size_t size, usb_size_t align) 206 { 207 void *ptr; 208 uint32_t rem; 209 210 /* allocate zeroed memory */ 211 212 if (align != 1) { 213 ptr = malloc(size + align, XXX, XXX); 214 if (ptr == NULL) 215 goto error; 216 217 rem = (-((uintptr_t)ptr)) & (align - 1); 218 } else { 219 ptr = malloc(size, XXX, XXX); 220 if (ptr == NULL) 221 goto error; 222 rem = 0; 223 } 224 225 /* setup page cache */ 226 pc->buffer = ((uint8_t *)ptr) + rem; 227 pc->page_start = pg; 228 pc->page_offset_buf = 0; 229 pc->page_offset_end = size; 230 pc->map = NULL; 231 pc->tag = ptr; 232 pc->ismultiseg = (align == 1); 233 234 /* compute physical address */ 235 usb_pc_common_mem_cb(pc, pc->buffer, size); 236 237 usb_pc_cpu_flush(pc); 238 return (0); 239 240 error: 241 /* reset most of the page cache */ 242 pc->buffer = NULL; 243 pc->page_start = NULL; 244 pc->page_offset_buf = 0; 245 pc->page_offset_end = 0; 246 pc->map = NULL; 247 pc->tag = NULL; 248 return (1); 249 } 250 251 /*------------------------------------------------------------------------* 252 * usb_pc_free_mem - free DMA memory 253 * 254 * This function is NULL safe. 255 *------------------------------------------------------------------------*/ 256 void 257 usb_pc_free_mem(struct usb_page_cache *pc) 258 { 259 if (pc != NULL && pc->buffer != NULL) { 260 free(pc->tag, XXX); 261 pc->buffer = NULL; 262 } 263 } 264 265 /*------------------------------------------------------------------------* 266 * usb_pc_load_mem - load virtual memory into DMA 267 * 268 * Return values: 269 * 0: Success 270 * Else: Error 271 *------------------------------------------------------------------------*/ 272 uint8_t 273 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) 274 { 275 /* setup page cache */ 276 pc->page_offset_buf = 0; 277 pc->page_offset_end = size; 278 pc->ismultiseg = 1; 279 280 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 281 282 if (size > 0) { 283 /* compute physical address */ 284 usb_pc_common_mem_cb(pc, pc->buffer, size); 285 } 286 if (sync == 0) { 287 /* 288 * Call callback so that refcount is decremented 289 * properly: 290 */ 291 pc->tag_parent->dma_error = 0; 292 (pc->tag_parent->func) (pc->tag_parent); 293 } 294 return (0); 295 } 296 297 /*------------------------------------------------------------------------* 298 * usb_pc_cpu_invalidate - invalidate CPU cache 299 *------------------------------------------------------------------------*/ 300 void 301 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 302 { 303 if (pc->page_offset_end == pc->page_offset_buf) { 304 /* nothing has been loaded into this page cache! */ 305 return; 306 } 307 /* NOP */ 308 } 309 310 /*------------------------------------------------------------------------* 311 * usb_pc_cpu_flush - flush CPU cache 312 *------------------------------------------------------------------------*/ 313 void 314 usb_pc_cpu_flush(struct usb_page_cache *pc) 315 { 316 if (pc->page_offset_end == pc->page_offset_buf) { 317 /* nothing has been loaded into this page cache! */ 318 return; 319 } 320 /* NOP */ 321 } 322 323 /*------------------------------------------------------------------------* 324 * usb_pc_dmamap_create - create a DMA map 325 * 326 * Returns: 327 * 0: Success 328 * Else: Failure 329 *------------------------------------------------------------------------*/ 330 uint8_t 331 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 332 { 333 return (0); /* NOP, success */ 334 } 335 336 /*------------------------------------------------------------------------* 337 * usb_pc_dmamap_destroy 338 * 339 * This function is NULL safe. 340 *------------------------------------------------------------------------*/ 341 void 342 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 343 { 344 /* NOP */ 345 } 346 347 /*------------------------------------------------------------------------* 348 * usb_dma_tag_setup - initialise USB DMA tags 349 *------------------------------------------------------------------------*/ 350 void 351 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 352 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 353 struct mtx *mtx, usb_dma_callback_t *func, 354 uint8_t ndmabits, uint8_t nudt) 355 { 356 memset(udpt, 0, sizeof(*udpt)); 357 358 /* sanity checking */ 359 if ((nudt == 0) || 360 (ndmabits == 0) || 361 (mtx == NULL)) { 362 /* something is corrupt */ 363 return; 364 } 365 /* initialise condition variable */ 366 cv_init(udpt->cv, "USB DMA CV"); 367 368 /* store some information */ 369 udpt->mtx = mtx; 370 udpt->func = func; 371 udpt->tag = dmat; 372 udpt->utag_first = udt; 373 udpt->utag_max = nudt; 374 udpt->dma_bits = ndmabits; 375 376 while (nudt--) { 377 memset(udt, 0, sizeof(*udt)); 378 udt->tag_parent = udpt; 379 udt++; 380 } 381 } 382 383 /*------------------------------------------------------------------------* 384 * usb_bus_tag_unsetup - factored out code 385 *------------------------------------------------------------------------*/ 386 void 387 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 388 { 389 struct usb_dma_tag *udt; 390 uint8_t nudt; 391 392 udt = udpt->utag_first; 393 nudt = udpt->utag_max; 394 395 while (nudt--) { 396 udt->align = 0; 397 udt++; 398 } 399 400 if (udpt->utag_max) { 401 /* destroy the condition variable */ 402 cv_destroy(udpt->cv); 403 } 404 } 405 406 /*------------------------------------------------------------------------* 407 * usb_bdma_work_loop 408 * 409 * This function handles loading of virtual buffers into DMA and is 410 * only called when "dma_refcount" is zero. 411 *------------------------------------------------------------------------*/ 412 void 413 usb_bdma_work_loop(struct usb_xfer_queue *pq) 414 { 415 struct usb_xfer_root *info; 416 struct usb_xfer *xfer; 417 usb_frcount_t nframes; 418 419 xfer = pq->curr; 420 info = xfer->xroot; 421 422 mtx_assert(info->xfer_mtx, MA_OWNED); 423 424 if (xfer->error) { 425 /* some error happened */ 426 USB_BUS_LOCK(info->bus); 427 usbd_transfer_done(xfer, 0); 428 USB_BUS_UNLOCK(info->bus); 429 return; 430 } 431 if (!xfer->flags_int.bdma_setup) { 432 struct usb_page *pg; 433 usb_frlength_t frlength_0; 434 uint8_t isread; 435 436 xfer->flags_int.bdma_setup = 1; 437 438 /* reset BUS-DMA load state */ 439 440 info->dma_error = 0; 441 442 if (xfer->flags_int.isochronous_xfr) { 443 /* only one frame buffer */ 444 nframes = 1; 445 frlength_0 = xfer->sumlen; 446 } else { 447 /* can be multiple frame buffers */ 448 nframes = xfer->nframes; 449 frlength_0 = xfer->frlengths[0]; 450 } 451 452 /* 453 * Set DMA direction first. This is needed to 454 * select the correct cache invalidate and cache 455 * flush operations. 456 */ 457 isread = USB_GET_DATA_ISREAD(xfer); 458 pg = xfer->dma_page_ptr; 459 460 if (xfer->flags_int.control_xfr && 461 xfer->flags_int.control_hdr) { 462 /* special case */ 463 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 464 /* The device controller writes to memory */ 465 xfer->frbuffers[0].isread = 1; 466 } else { 467 /* The host controller reads from memory */ 468 xfer->frbuffers[0].isread = 0; 469 } 470 } else { 471 /* default case */ 472 xfer->frbuffers[0].isread = isread; 473 } 474 475 /* 476 * Setup the "page_start" pointer which points to an array of 477 * USB pages where information about the physical address of a 478 * page will be stored. Also initialise the "isread" field of 479 * the USB page caches. 480 */ 481 xfer->frbuffers[0].page_start = pg; 482 483 info->dma_nframes = nframes; 484 info->dma_currframe = 0; 485 info->dma_frlength_0 = frlength_0; 486 487 pg += (frlength_0 / USB_PAGE_SIZE); 488 pg += 2; 489 490 while (--nframes > 0) { 491 xfer->frbuffers[nframes].isread = isread; 492 xfer->frbuffers[nframes].page_start = pg; 493 494 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 495 pg += 2; 496 } 497 498 } 499 if (info->dma_error) { 500 USB_BUS_LOCK(info->bus); 501 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 502 USB_BUS_UNLOCK(info->bus); 503 return; 504 } 505 if (info->dma_currframe != info->dma_nframes) { 506 507 if (info->dma_currframe == 0) { 508 /* special case */ 509 usb_pc_load_mem(xfer->frbuffers, 510 info->dma_frlength_0, 0); 511 } else { 512 /* default case */ 513 nframes = info->dma_currframe; 514 usb_pc_load_mem(xfer->frbuffers + nframes, 515 xfer->frlengths[nframes], 0); 516 } 517 518 /* advance frame index */ 519 info->dma_currframe++; 520 521 return; 522 } 523 /* go ahead */ 524 usb_bdma_pre_sync(xfer); 525 526 /* start loading next USB transfer, if any */ 527 usb_command_wrapper(pq, NULL); 528 529 /* finally start the hardware */ 530 usbd_pipe_enter(xfer); 531 } 532 533 /*------------------------------------------------------------------------* 534 * usb_bdma_done_event 535 * 536 * This function is called when the BUS-DMA has loaded virtual memory 537 * into DMA, if any. 538 *------------------------------------------------------------------------*/ 539 void 540 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 541 { 542 struct usb_xfer_root *info; 543 544 info = USB_DMATAG_TO_XROOT(udpt); 545 546 mtx_assert(info->xfer_mtx, MA_OWNED); 547 548 /* copy error */ 549 info->dma_error = udpt->dma_error; 550 551 /* enter workloop again */ 552 usb_command_wrapper(&info->dma_q, 553 info->dma_q.curr); 554 } 555 556 /*------------------------------------------------------------------------* 557 * usb_bdma_pre_sync 558 * 559 * This function handles DMA synchronisation that must be done before 560 * an USB transfer is started. 561 *------------------------------------------------------------------------*/ 562 void 563 usb_bdma_pre_sync(struct usb_xfer *xfer) 564 { 565 struct usb_page_cache *pc; 566 usb_frcount_t nframes; 567 568 if (xfer->flags_int.isochronous_xfr) { 569 /* only one frame buffer */ 570 nframes = 1; 571 } else { 572 /* can be multiple frame buffers */ 573 nframes = xfer->nframes; 574 } 575 576 pc = xfer->frbuffers; 577 578 while (nframes--) { 579 580 if (pc->isread) { 581 usb_pc_cpu_invalidate(pc); 582 } else { 583 usb_pc_cpu_flush(pc); 584 } 585 pc++; 586 } 587 } 588 589 /*------------------------------------------------------------------------* 590 * usb_bdma_post_sync 591 * 592 * This function handles DMA synchronisation that must be done after 593 * an USB transfer is complete. 594 *------------------------------------------------------------------------*/ 595 void 596 usb_bdma_post_sync(struct usb_xfer *xfer) 597 { 598 struct usb_page_cache *pc; 599 usb_frcount_t nframes; 600 601 if (xfer->flags_int.isochronous_xfr) { 602 /* only one frame buffer */ 603 nframes = 1; 604 } else { 605 /* can be multiple frame buffers */ 606 nframes = xfer->nframes; 607 } 608 609 pc = xfer->frbuffers; 610 611 while (nframes--) { 612 if (pc->isread) { 613 usb_pc_cpu_invalidate(pc); 614 } 615 pc++; 616 } 617 } 618 #endif 619