1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Common code for managing bounce pages for bus_dma backends. As 31 * this code currently assumes it can access internal members of 32 * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in 33 * backends rather than being compiled standalone. 34 * 35 * Prerequisites: 36 * 37 * - M_BUSDMA malloc type 38 * - struct bus_dmamap 39 * - hw_busdma SYSCTL_NODE 40 * - macros to access the following fields of bus_dma_tag_t: 41 * - dmat_alignment() 42 * - dmat_flags() 43 * - dmat_lowaddr() 44 * - dmat_lockfunc() 45 * - dmat_lockarg() 46 */ 47 48 #include <sys/kthread.h> 49 #include <sys/sched.h> 50 51 struct bounce_page { 52 vm_offset_t vaddr; /* kva of bounce buffer */ 53 bus_addr_t busaddr; /* Physical address */ 54 vm_offset_t datavaddr; /* kva of client data */ 55 #if defined(__amd64__) || defined(__i386__) 56 vm_page_t datapage[2]; /* physical page(s) of client data */ 57 #else 58 vm_page_t datapage; /* physical page of client data */ 59 #endif 60 vm_offset_t dataoffs; /* page offset of client data */ 61 bus_size_t datacount; /* client data count */ 62 STAILQ_ENTRY(bounce_page) links; 63 }; 64 65 struct bounce_zone { 66 STAILQ_ENTRY(bounce_zone) links; 67 STAILQ_HEAD(, bounce_page) bounce_page_list; 68 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 69 int total_bpages; 70 int free_bpages; 71 int reserved_bpages; 72 int active_bpages; 73 int total_bounced; 74 int total_deferred; 75 int map_count; 76 #ifdef dmat_domain 77 int domain; 78 #endif 79 sbintime_t total_deferred_time; 80 bus_size_t alignment; 81 bus_addr_t lowaddr; 82 char zoneid[8]; 83 char lowaddrid[20]; 84 struct sysctl_ctx_list sysctl_tree; 85 struct sysctl_oid *sysctl_tree_top; 86 }; 87 88 static struct mtx bounce_lock; 89 static int total_bpages; 90 static int busdma_zonecount; 91 92 static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 93 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 94 95 static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); 96 97 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 98 "Total bounce pages"); 99 100 static void busdma_thread(void *); 101 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 102 int commit); 103 104 static int 105 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 106 { 107 struct bounce_zone *bz; 108 109 /* Reserve Necessary Bounce Pages */ 110 mtx_lock(&bounce_lock); 111 if (flags & BUS_DMA_NOWAIT) { 112 if (reserve_bounce_pages(dmat, map, 0) != 0) { 113 map->pagesneeded = 0; 114 mtx_unlock(&bounce_lock); 115 return (ENOMEM); 116 } 117 } else { 118 if (reserve_bounce_pages(dmat, map, 1) != 0) { 119 /* Queue us for resources */ 120 bz = dmat->bounce_zone; 121 STAILQ_INSERT_TAIL(&bz->bounce_map_waitinglist, map, 122 links); 123 map->queued_time = sbinuptime(); 124 mtx_unlock(&bounce_lock); 125 return (EINPROGRESS); 126 } 127 } 128 mtx_unlock(&bounce_lock); 129 130 return (0); 131 } 132 133 static void 134 init_bounce_pages(void *dummy __unused) 135 { 136 137 total_bpages = 0; 138 STAILQ_INIT(&bounce_zone_list); 139 STAILQ_INIT(&bounce_map_callbacklist); 140 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 141 } 142 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 143 144 static struct sysctl_ctx_list * 145 busdma_sysctl_tree(struct bounce_zone *bz) 146 { 147 148 return (&bz->sysctl_tree); 149 } 150 151 static struct sysctl_oid * 152 busdma_sysctl_tree_top(struct bounce_zone *bz) 153 { 154 155 return (bz->sysctl_tree_top); 156 } 157 158 static int 159 alloc_bounce_zone(bus_dma_tag_t dmat) 160 { 161 struct bounce_zone *bz; 162 bool start_thread; 163 164 /* Check to see if we already have a suitable zone */ 165 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 166 if ((dmat_alignment(dmat) <= bz->alignment) && 167 #ifdef dmat_domain 168 dmat_domain(dmat) == bz->domain && 169 #endif 170 (dmat_lowaddr(dmat) >= bz->lowaddr)) { 171 dmat->bounce_zone = bz; 172 return (0); 173 } 174 } 175 176 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, 177 M_NOWAIT | M_ZERO)) == NULL) 178 return (ENOMEM); 179 180 STAILQ_INIT(&bz->bounce_page_list); 181 STAILQ_INIT(&bz->bounce_map_waitinglist); 182 bz->free_bpages = 0; 183 bz->reserved_bpages = 0; 184 bz->active_bpages = 0; 185 bz->lowaddr = dmat_lowaddr(dmat); 186 bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE); 187 bz->map_count = 0; 188 #ifdef dmat_domain 189 bz->domain = dmat_domain(dmat); 190 #endif 191 snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount); 192 busdma_zonecount++; 193 snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx", 194 (uintmax_t)bz->lowaddr); 195 start_thread = STAILQ_EMPTY(&bounce_zone_list); 196 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 197 dmat->bounce_zone = bz; 198 199 sysctl_ctx_init(&bz->sysctl_tree); 200 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 201 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 202 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); 203 if (bz->sysctl_tree_top == NULL) { 204 sysctl_ctx_free(&bz->sysctl_tree); 205 return (0); /* XXX error code? */ 206 } 207 208 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 209 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 210 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 211 "Total bounce pages"); 212 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 213 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 214 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 215 "Free bounce pages"); 216 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 217 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 218 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 219 "Reserved bounce pages"); 220 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 221 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 222 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 223 "Active bounce pages"); 224 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 225 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 226 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 227 "Total bounce requests (pages bounced)"); 228 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 229 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 230 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 231 "Total bounce requests that were deferred"); 232 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 233 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 234 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 235 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 236 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 237 "alignment", CTLFLAG_RD, &bz->alignment, ""); 238 #ifdef dmat_domain 239 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 240 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 241 "domain", CTLFLAG_RD, &bz->domain, 0, 242 "memory domain"); 243 #endif 244 SYSCTL_ADD_SBINTIME_USEC(busdma_sysctl_tree(bz), 245 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 246 "total_deferred_time", CTLFLAG_RD, &bz->total_deferred_time, 247 "Cumulative time busdma requests are deferred (us)"); 248 if (start_thread) { 249 if (kproc_create(busdma_thread, NULL, NULL, 0, 0, "busdma") != 250 0) 251 printf("failed to create busdma thread"); 252 } 253 return (0); 254 } 255 256 static int 257 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 258 { 259 struct bounce_zone *bz; 260 int count; 261 262 bz = dmat->bounce_zone; 263 count = 0; 264 while (numpages > 0) { 265 struct bounce_page *bpage; 266 267 #ifdef dmat_domain 268 bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA, 269 DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO); 270 #else 271 bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO); 272 #endif 273 274 if (bpage == NULL) 275 break; 276 #ifdef dmat_domain 277 bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE, 278 M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT, 279 0ul, bz->lowaddr, PAGE_SIZE, 0); 280 #else 281 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, 282 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); 283 #endif 284 if (bpage->vaddr == 0) { 285 free(bpage, M_BUSDMA); 286 break; 287 } 288 bpage->busaddr = pmap_kextract(bpage->vaddr); 289 mtx_lock(&bounce_lock); 290 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 291 total_bpages++; 292 bz->total_bpages++; 293 bz->free_bpages++; 294 mtx_unlock(&bounce_lock); 295 count++; 296 numpages--; 297 } 298 return (count); 299 } 300 301 static int 302 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 303 { 304 struct bounce_zone *bz; 305 int pages; 306 307 mtx_assert(&bounce_lock, MA_OWNED); 308 bz = dmat->bounce_zone; 309 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 310 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 311 return (map->pagesneeded - (map->pagesreserved + pages)); 312 bz->free_bpages -= pages; 313 bz->reserved_bpages += pages; 314 map->pagesreserved += pages; 315 pages = map->pagesneeded - map->pagesreserved; 316 317 return (pages); 318 } 319 320 #if defined(__amd64__) || defined(__i386__) 321 static bus_addr_t 322 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 323 vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size) 324 #else 325 static bus_addr_t 326 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 327 bus_addr_t addr, bus_size_t size) 328 #endif 329 { 330 struct bounce_zone *bz; 331 struct bounce_page *bpage; 332 333 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 334 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 335 #if defined(__amd64__) || defined(__i386__) 336 KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map)); 337 #endif 338 #ifdef __riscv 339 KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0, 340 ("add_bounce_page: bad map %p", map)); 341 #endif 342 343 bz = dmat->bounce_zone; 344 if (map->pagesneeded == 0) 345 panic("add_bounce_page: map doesn't need any pages"); 346 map->pagesneeded--; 347 348 if (map->pagesreserved == 0) 349 panic("add_bounce_page: map doesn't need any pages"); 350 map->pagesreserved--; 351 352 mtx_lock(&bounce_lock); 353 bpage = STAILQ_FIRST(&bz->bounce_page_list); 354 if (bpage == NULL) 355 panic("add_bounce_page: free page list is empty"); 356 357 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 358 bz->reserved_bpages--; 359 bz->active_bpages++; 360 mtx_unlock(&bounce_lock); 361 362 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) { 363 /* Page offset needs to be preserved. */ 364 #if defined(__amd64__) || defined(__i386__) 365 bpage->vaddr |= addr1 & PAGE_MASK; 366 bpage->busaddr |= addr1 & PAGE_MASK; 367 KASSERT(addr2 == 0, 368 ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET")); 369 #else 370 bpage->vaddr |= addr & PAGE_MASK; 371 bpage->busaddr |= addr & PAGE_MASK; 372 #endif 373 } 374 bpage->datavaddr = vaddr; 375 #if defined(__amd64__) || defined(__i386__) 376 bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1); 377 KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned")); 378 bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2); 379 bpage->dataoffs = addr1 & PAGE_MASK; 380 #else 381 bpage->datapage = PHYS_TO_VM_PAGE(addr); 382 bpage->dataoffs = addr & PAGE_MASK; 383 #endif 384 bpage->datacount = size; 385 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 386 return (bpage->busaddr); 387 } 388 389 static void 390 free_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) 391 { 392 struct bounce_page *bpage; 393 struct bounce_zone *bz; 394 bool schedule_thread; 395 u_int count; 396 397 if (STAILQ_EMPTY(&map->bpages)) 398 return; 399 400 bz = dmat->bounce_zone; 401 count = 0; 402 schedule_thread = false; 403 STAILQ_FOREACH(bpage, &map->bpages, links) { 404 bpage->datavaddr = 0; 405 bpage->datacount = 0; 406 407 if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) { 408 /* 409 * Reset the bounce page to start at offset 0. 410 * Other uses of this bounce page may need to 411 * store a full page of data and/or assume it 412 * starts on a page boundary. 413 */ 414 bpage->vaddr &= ~PAGE_MASK; 415 bpage->busaddr &= ~PAGE_MASK; 416 } 417 count++; 418 } 419 420 mtx_lock(&bounce_lock); 421 STAILQ_CONCAT(&bz->bounce_page_list, &map->bpages); 422 bz->free_bpages += count; 423 bz->active_bpages -= count; 424 while ((map = STAILQ_FIRST(&bz->bounce_map_waitinglist)) != NULL) { 425 if (reserve_bounce_pages(map->dmat, map, 1) != 0) 426 break; 427 428 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links); 429 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); 430 bz->total_deferred++; 431 schedule_thread = true; 432 } 433 mtx_unlock(&bounce_lock); 434 if (schedule_thread) 435 wakeup(&bounce_map_callbacklist); 436 } 437 438 static void 439 busdma_thread(void *dummy __unused) 440 { 441 STAILQ_HEAD(, bus_dmamap) callbacklist; 442 bus_dma_tag_t dmat; 443 struct bus_dmamap *map, *nmap; 444 struct bounce_zone *bz; 445 446 thread_lock(curthread); 447 sched_class(curthread, PRI_ITHD); 448 sched_ithread_prio(curthread, PI_SWI(SWI_BUSDMA)); 449 thread_unlock(curthread); 450 for (;;) { 451 mtx_lock(&bounce_lock); 452 while (STAILQ_EMPTY(&bounce_map_callbacklist)) 453 mtx_sleep(&bounce_map_callbacklist, &bounce_lock, 0, 454 "-", 0); 455 STAILQ_INIT(&callbacklist); 456 STAILQ_CONCAT(&callbacklist, &bounce_map_callbacklist); 457 mtx_unlock(&bounce_lock); 458 459 STAILQ_FOREACH_SAFE(map, &callbacklist, links, nmap) { 460 dmat = map->dmat; 461 bz = dmat->bounce_zone; 462 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), 463 BUS_DMA_LOCK); 464 bz->total_deferred_time += (sbinuptime() - map->queued_time); 465 bus_dmamap_load_mem(map->dmat, map, &map->mem, 466 map->callback, map->callback_arg, BUS_DMA_WAITOK); 467 dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), 468 BUS_DMA_UNLOCK); 469 } 470 } 471 } 472