1 // SPDX-License-Identifier: GPL-2.0 2 /* iommu.c: Generic sparc64 IOMMU support. 3 * 4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) 5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/export.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/dma-map-ops.h> 14 #include <linux/errno.h> 15 #include <linux/iommu-helper.h> 16 #include <linux/bitmap.h> 17 #include <asm/iommu-common.h> 18 19 #ifdef CONFIG_PCI 20 #include <linux/pci.h> 21 #endif 22 23 #include <asm/iommu.h> 24 25 #include "iommu_common.h" 26 #include "kernel.h" 27 28 #define STC_CTXMATCH_ADDR(STC, CTX) \ 29 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) 30 #define STC_FLUSHFLAG_INIT(STC) \ 31 (*((STC)->strbuf_flushflag) = 0UL) 32 #define STC_FLUSHFLAG_SET(STC) \ 33 (*((STC)->strbuf_flushflag) != 0UL) 34 35 #define iommu_read(__reg) \ 36 ({ u64 __ret; \ 37 __asm__ __volatile__("ldxa [%1] %2, %0" \ 38 : "=r" (__ret) \ 39 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ 40 : "memory"); \ 41 __ret; \ 42 }) 43 #define iommu_write(__reg, __val) \ 44 __asm__ __volatile__("stxa %0, [%1] %2" \ 45 : /* no outputs */ \ 46 : "r" (__val), "r" (__reg), \ 47 "i" (ASI_PHYS_BYPASS_EC_E)) 48 49 /* Must be invoked under the IOMMU lock. */ 50 static void iommu_flushall(struct iommu_map_table *iommu_map_table) 51 { 52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); 53 if (iommu->iommu_flushinv) { 54 iommu_write(iommu->iommu_flushinv, ~(u64)0); 55 } else { 56 unsigned long tag; 57 int entry; 58 59 tag = iommu->iommu_tags; 60 for (entry = 0; entry < 16; entry++) { 61 iommu_write(tag, 0); 62 tag += 8; 63 } 64 65 /* Ensure completion of previous PIO writes. */ 66 (void) iommu_read(iommu->write_complete_reg); 67 } 68 } 69 70 #define IOPTE_CONSISTENT(CTX) \ 71 (IOPTE_VALID | IOPTE_CACHE | \ 72 (((CTX) << 47) & IOPTE_CONTEXT)) 73 74 #define IOPTE_STREAMING(CTX) \ 75 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) 76 77 /* Existing mappings are never marked invalid, instead they 78 * are pointed to a dummy page. 79 */ 80 #define IOPTE_IS_DUMMY(iommu, iopte) \ 81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 82 83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) 84 { 85 unsigned long val = iopte_val(*iopte); 86 87 val &= ~IOPTE_PAGE; 88 val |= iommu->dummy_page_pa; 89 90 iopte_val(*iopte) = val; 91 } 92 93 int iommu_table_init(struct iommu *iommu, int tsbsize, 94 u32 dma_offset, u32 dma_addr_mask, 95 int numa_node) 96 { 97 unsigned long i, order, sz, num_tsb_entries; 98 struct page *page; 99 100 num_tsb_entries = tsbsize / sizeof(iopte_t); 101 102 /* Setup initial software IOMMU state. */ 103 spin_lock_init(&iommu->lock); 104 iommu->ctx_lowest_free = 1; 105 iommu->tbl.table_map_base = dma_offset; 106 iommu->dma_addr_mask = dma_addr_mask; 107 108 /* Allocate and initialize the free area map. */ 109 sz = num_tsb_entries / 8; 110 sz = (sz + 7UL) & ~7UL; 111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); 112 if (!iommu->tbl.map) 113 return -ENOMEM; 114 115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, 116 (tlb_type != hypervisor ? iommu_flushall : NULL), 117 false, 1, false); 118 119 /* Allocate and initialize the dummy page which we 120 * set inactive IO PTEs to point to. 121 */ 122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0); 123 if (!page) { 124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); 125 goto out_free_map; 126 } 127 iommu->dummy_page = (unsigned long) page_address(page); 128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE); 129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); 130 131 /* Now allocate and setup the IOMMU page table itself. */ 132 order = get_order(tsbsize); 133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); 134 if (!page) { 135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); 136 goto out_free_dummy_page; 137 } 138 iommu->page_table = (iopte_t *)page_address(page); 139 140 for (i = 0; i < num_tsb_entries; i++) 141 iopte_make_dummy(iommu, &iommu->page_table[i]); 142 143 return 0; 144 145 out_free_dummy_page: 146 free_page(iommu->dummy_page); 147 iommu->dummy_page = 0UL; 148 149 out_free_map: 150 kfree(iommu->tbl.map); 151 iommu->tbl.map = NULL; 152 153 return -ENOMEM; 154 } 155 156 static inline iopte_t *alloc_npages(struct device *dev, 157 struct iommu *iommu, 158 unsigned long npages) 159 { 160 unsigned long entry; 161 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 163 (unsigned long)(-1), 0); 164 if (unlikely(entry == IOMMU_ERROR_CODE)) 165 return NULL; 166 167 return iommu->page_table + entry; 168 } 169 170 static int iommu_alloc_ctx(struct iommu *iommu) 171 { 172 int lowest = iommu->ctx_lowest_free; 173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); 174 175 if (unlikely(n == IOMMU_NUM_CTXS)) { 176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); 177 if (unlikely(n == lowest)) { 178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); 179 n = 0; 180 } 181 } 182 if (n) 183 __set_bit(n, iommu->ctx_bitmap); 184 185 return n; 186 } 187 188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx) 189 { 190 if (likely(ctx)) { 191 __clear_bit(ctx, iommu->ctx_bitmap); 192 if (ctx < iommu->ctx_lowest_free) 193 iommu->ctx_lowest_free = ctx; 194 } 195 } 196 197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size, 198 dma_addr_t *dma_addrp, gfp_t gfp, 199 unsigned long attrs) 200 { 201 unsigned long order, first_page; 202 struct iommu *iommu; 203 struct page *page; 204 int npages, nid; 205 iopte_t *iopte; 206 void *ret; 207 208 size = IO_PAGE_ALIGN(size); 209 order = get_order(size); 210 if (order >= 10) 211 return NULL; 212 213 nid = dev->archdata.numa_node; 214 page = alloc_pages_node(nid, gfp, order); 215 if (unlikely(!page)) 216 return NULL; 217 218 first_page = (unsigned long) page_address(page); 219 memset((char *)first_page, 0, PAGE_SIZE << order); 220 221 iommu = dev->archdata.iommu; 222 223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); 224 225 if (unlikely(iopte == NULL)) { 226 free_pages(first_page, order); 227 return NULL; 228 } 229 230 *dma_addrp = (iommu->tbl.table_map_base + 231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 232 ret = (void *) first_page; 233 npages = size >> IO_PAGE_SHIFT; 234 first_page = __pa(first_page); 235 while (npages--) { 236 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | 237 IOPTE_WRITE | 238 (first_page & IOPTE_PAGE)); 239 iopte++; 240 first_page += IO_PAGE_SIZE; 241 } 242 243 return ret; 244 } 245 246 static void dma_4u_free_coherent(struct device *dev, size_t size, 247 void *cpu, dma_addr_t dvma, 248 unsigned long attrs) 249 { 250 struct iommu *iommu; 251 unsigned long order, npages; 252 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 254 iommu = dev->archdata.iommu; 255 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); 257 258 order = get_order(size); 259 if (order < 10) 260 free_pages((unsigned long)cpu, order); 261 } 262 263 static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys, 264 size_t sz, enum dma_data_direction direction, 265 unsigned long attrs) 266 { 267 struct iommu *iommu; 268 struct strbuf *strbuf; 269 iopte_t *base; 270 unsigned long flags, npages, oaddr; 271 unsigned long i, ctx; 272 u32 bus_addr, ret; 273 unsigned long iopte_protection; 274 275 if (unlikely(attrs & DMA_ATTR_MMIO)) 276 /* 277 * This check is included because older versions of the code 278 * lacked MMIO path support, and my ability to test this path 279 * is limited. However, from a software technical standpoint, 280 * there is no restriction, as the following code operates 281 * solely on physical addresses. 282 */ 283 goto bad_no_ctx; 284 285 iommu = dev->archdata.iommu; 286 strbuf = dev->archdata.stc; 287 288 if (unlikely(direction == DMA_NONE)) 289 goto bad_no_ctx; 290 291 oaddr = (unsigned long)(phys_to_virt(phys)); 292 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 293 npages >>= IO_PAGE_SHIFT; 294 295 base = alloc_npages(dev, iommu, npages); 296 spin_lock_irqsave(&iommu->lock, flags); 297 ctx = 0; 298 if (iommu->iommu_ctxflush) 299 ctx = iommu_alloc_ctx(iommu); 300 spin_unlock_irqrestore(&iommu->lock, flags); 301 302 if (unlikely(!base)) 303 goto bad; 304 305 bus_addr = (iommu->tbl.table_map_base + 306 ((base - iommu->page_table) << IO_PAGE_SHIFT)); 307 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 308 if (strbuf->strbuf_enabled) 309 iopte_protection = IOPTE_STREAMING(ctx); 310 else 311 iopte_protection = IOPTE_CONSISTENT(ctx); 312 if (direction != DMA_TO_DEVICE) 313 iopte_protection |= IOPTE_WRITE; 314 315 phys &= IO_PAGE_MASK; 316 317 for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE) 318 iopte_val(*base) = iopte_protection | phys; 319 320 return ret; 321 322 bad: 323 iommu_free_ctx(iommu, ctx); 324 bad_no_ctx: 325 if (printk_ratelimit()) 326 WARN_ON(1); 327 return DMA_MAPPING_ERROR; 328 } 329 330 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, 331 u32 vaddr, unsigned long ctx, unsigned long npages, 332 enum dma_data_direction direction) 333 { 334 int limit; 335 336 if (strbuf->strbuf_ctxflush && 337 iommu->iommu_ctxflush) { 338 unsigned long matchreg, flushreg; 339 u64 val; 340 341 flushreg = strbuf->strbuf_ctxflush; 342 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); 343 344 iommu_write(flushreg, ctx); 345 val = iommu_read(matchreg); 346 val &= 0xffff; 347 if (!val) 348 goto do_flush_sync; 349 350 while (val) { 351 if (val & 0x1) 352 iommu_write(flushreg, ctx); 353 val >>= 1; 354 } 355 val = iommu_read(matchreg); 356 if (unlikely(val)) { 357 printk(KERN_WARNING "strbuf_flush: ctx flush " 358 "timeout matchreg[%llx] ctx[%lx]\n", 359 val, ctx); 360 goto do_page_flush; 361 } 362 } else { 363 unsigned long i; 364 365 do_page_flush: 366 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) 367 iommu_write(strbuf->strbuf_pflush, vaddr); 368 } 369 370 do_flush_sync: 371 /* If the device could not have possibly put dirty data into 372 * the streaming cache, no flush-flag synchronization needs 373 * to be performed. 374 */ 375 if (direction == DMA_TO_DEVICE) 376 return; 377 378 STC_FLUSHFLAG_INIT(strbuf); 379 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); 380 (void) iommu_read(iommu->write_complete_reg); 381 382 limit = 100000; 383 while (!STC_FLUSHFLAG_SET(strbuf)) { 384 limit--; 385 if (!limit) 386 break; 387 udelay(1); 388 rmb(); 389 } 390 if (!limit) 391 printk(KERN_WARNING "strbuf_flush: flushflag timeout " 392 "vaddr[%08x] ctx[%lx] npages[%ld]\n", 393 vaddr, ctx, npages); 394 } 395 396 static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr, 397 size_t sz, enum dma_data_direction direction, 398 unsigned long attrs) 399 { 400 struct iommu *iommu; 401 struct strbuf *strbuf; 402 iopte_t *base; 403 unsigned long flags, npages, ctx, i; 404 405 if (unlikely(direction == DMA_NONE)) { 406 if (printk_ratelimit()) 407 WARN_ON(1); 408 return; 409 } 410 411 iommu = dev->archdata.iommu; 412 strbuf = dev->archdata.stc; 413 414 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 415 npages >>= IO_PAGE_SHIFT; 416 base = iommu->page_table + 417 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 418 bus_addr &= IO_PAGE_MASK; 419 420 spin_lock_irqsave(&iommu->lock, flags); 421 422 /* Record the context, if any. */ 423 ctx = 0; 424 if (iommu->iommu_ctxflush) 425 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 426 427 /* Step 1: Kick data out of streaming buffers if necessary. */ 428 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 429 strbuf_flush(strbuf, iommu, bus_addr, ctx, 430 npages, direction); 431 432 /* Step 2: Clear out TSB entries. */ 433 for (i = 0; i < npages; i++) 434 iopte_make_dummy(iommu, base + i); 435 436 iommu_free_ctx(iommu, ctx); 437 spin_unlock_irqrestore(&iommu->lock, flags); 438 439 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 440 } 441 442 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 443 int nelems, enum dma_data_direction direction, 444 unsigned long attrs) 445 { 446 struct scatterlist *s, *outs, *segstart; 447 unsigned long flags, handle, prot, ctx; 448 dma_addr_t dma_next = 0, dma_addr; 449 unsigned int max_seg_size; 450 unsigned long seg_boundary_size; 451 int outcount, incount, i; 452 struct strbuf *strbuf; 453 struct iommu *iommu; 454 unsigned long base_shift; 455 456 BUG_ON(direction == DMA_NONE); 457 458 iommu = dev->archdata.iommu; 459 strbuf = dev->archdata.stc; 460 if (nelems == 0 || !iommu) 461 return -EINVAL; 462 463 spin_lock_irqsave(&iommu->lock, flags); 464 465 ctx = 0; 466 if (iommu->iommu_ctxflush) 467 ctx = iommu_alloc_ctx(iommu); 468 469 if (strbuf->strbuf_enabled) 470 prot = IOPTE_STREAMING(ctx); 471 else 472 prot = IOPTE_CONSISTENT(ctx); 473 if (direction != DMA_TO_DEVICE) 474 prot |= IOPTE_WRITE; 475 476 outs = s = segstart = &sglist[0]; 477 outcount = 1; 478 incount = nelems; 479 handle = 0; 480 481 /* Init first segment length for backout at failure */ 482 outs->dma_length = 0; 483 484 max_seg_size = dma_get_max_seg_size(dev); 485 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); 486 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; 487 for_each_sg(sglist, s, nelems, i) { 488 unsigned long paddr, npages, entry, out_entry = 0, slen; 489 iopte_t *base; 490 491 slen = s->length; 492 /* Sanity check */ 493 if (slen == 0) { 494 dma_next = 0; 495 continue; 496 } 497 /* Allocate iommu entries for that segment */ 498 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 499 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 500 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, 501 &handle, (unsigned long)(-1), 0); 502 503 /* Handle failure */ 504 if (unlikely(entry == IOMMU_ERROR_CODE)) { 505 if (printk_ratelimit()) 506 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 507 " npages %lx\n", iommu, paddr, npages); 508 goto iommu_map_failed; 509 } 510 511 base = iommu->page_table + entry; 512 513 /* Convert entry to a dma_addr_t */ 514 dma_addr = iommu->tbl.table_map_base + 515 (entry << IO_PAGE_SHIFT); 516 dma_addr |= (s->offset & ~IO_PAGE_MASK); 517 518 /* Insert into HW table */ 519 paddr &= IO_PAGE_MASK; 520 while (npages--) { 521 iopte_val(*base) = prot | paddr; 522 base++; 523 paddr += IO_PAGE_SIZE; 524 } 525 526 /* If we are in an open segment, try merging */ 527 if (segstart != s) { 528 /* We cannot merge if: 529 * - allocated dma_addr isn't contiguous to previous allocation 530 */ 531 if ((dma_addr != dma_next) || 532 (outs->dma_length + s->length > max_seg_size) || 533 (is_span_boundary(out_entry, base_shift, 534 seg_boundary_size, outs, s))) { 535 /* Can't merge: create a new segment */ 536 segstart = s; 537 outcount++; 538 outs = sg_next(outs); 539 } else { 540 outs->dma_length += s->length; 541 } 542 } 543 544 if (segstart == s) { 545 /* This is a new segment, fill entries */ 546 outs->dma_address = dma_addr; 547 outs->dma_length = slen; 548 out_entry = entry; 549 } 550 551 /* Calculate next page pointer for contiguous check */ 552 dma_next = dma_addr + slen; 553 } 554 555 spin_unlock_irqrestore(&iommu->lock, flags); 556 557 if (outcount < incount) { 558 outs = sg_next(outs); 559 outs->dma_length = 0; 560 } 561 562 return outcount; 563 564 iommu_map_failed: 565 for_each_sg(sglist, s, nelems, i) { 566 if (s->dma_length != 0) { 567 unsigned long vaddr, npages, entry, j; 568 iopte_t *base; 569 570 vaddr = s->dma_address & IO_PAGE_MASK; 571 npages = iommu_num_pages(s->dma_address, s->dma_length, 572 IO_PAGE_SIZE); 573 574 entry = (vaddr - iommu->tbl.table_map_base) 575 >> IO_PAGE_SHIFT; 576 base = iommu->page_table + entry; 577 578 for (j = 0; j < npages; j++) 579 iopte_make_dummy(iommu, base + j); 580 581 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 582 IOMMU_ERROR_CODE); 583 584 s->dma_length = 0; 585 } 586 if (s == outs) 587 break; 588 } 589 spin_unlock_irqrestore(&iommu->lock, flags); 590 591 return -EINVAL; 592 } 593 594 /* If contexts are being used, they are the same in all of the mappings 595 * we make for a particular SG. 596 */ 597 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) 598 { 599 unsigned long ctx = 0; 600 601 if (iommu->iommu_ctxflush) { 602 iopte_t *base; 603 u32 bus_addr; 604 struct iommu_map_table *tbl = &iommu->tbl; 605 606 bus_addr = sg->dma_address & IO_PAGE_MASK; 607 base = iommu->page_table + 608 ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); 609 610 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 611 } 612 return ctx; 613 } 614 615 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 616 int nelems, enum dma_data_direction direction, 617 unsigned long attrs) 618 { 619 unsigned long flags, ctx; 620 struct scatterlist *sg; 621 struct strbuf *strbuf; 622 struct iommu *iommu; 623 624 BUG_ON(direction == DMA_NONE); 625 626 iommu = dev->archdata.iommu; 627 strbuf = dev->archdata.stc; 628 629 ctx = fetch_sg_ctx(iommu, sglist); 630 631 spin_lock_irqsave(&iommu->lock, flags); 632 633 sg = sglist; 634 while (nelems--) { 635 dma_addr_t dma_handle = sg->dma_address; 636 unsigned int len = sg->dma_length; 637 unsigned long npages, entry; 638 iopte_t *base; 639 int i; 640 641 if (!len) 642 break; 643 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 644 645 entry = ((dma_handle - iommu->tbl.table_map_base) 646 >> IO_PAGE_SHIFT); 647 base = iommu->page_table + entry; 648 649 dma_handle &= IO_PAGE_MASK; 650 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 651 strbuf_flush(strbuf, iommu, dma_handle, ctx, 652 npages, direction); 653 654 for (i = 0; i < npages; i++) 655 iopte_make_dummy(iommu, base + i); 656 657 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 658 IOMMU_ERROR_CODE); 659 sg = sg_next(sg); 660 } 661 662 iommu_free_ctx(iommu, ctx); 663 664 spin_unlock_irqrestore(&iommu->lock, flags); 665 } 666 667 static void dma_4u_sync_single_for_cpu(struct device *dev, 668 dma_addr_t bus_addr, size_t sz, 669 enum dma_data_direction direction) 670 { 671 struct iommu *iommu; 672 struct strbuf *strbuf; 673 unsigned long flags, ctx, npages; 674 675 iommu = dev->archdata.iommu; 676 strbuf = dev->archdata.stc; 677 678 if (!strbuf->strbuf_enabled) 679 return; 680 681 spin_lock_irqsave(&iommu->lock, flags); 682 683 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 684 npages >>= IO_PAGE_SHIFT; 685 bus_addr &= IO_PAGE_MASK; 686 687 /* Step 1: Record the context, if any. */ 688 ctx = 0; 689 if (iommu->iommu_ctxflush && 690 strbuf->strbuf_ctxflush) { 691 iopte_t *iopte; 692 struct iommu_map_table *tbl = &iommu->tbl; 693 694 iopte = iommu->page_table + 695 ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); 696 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; 697 } 698 699 /* Step 2: Kick data out of streaming buffers. */ 700 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 701 702 spin_unlock_irqrestore(&iommu->lock, flags); 703 } 704 705 static void dma_4u_sync_sg_for_cpu(struct device *dev, 706 struct scatterlist *sglist, int nelems, 707 enum dma_data_direction direction) 708 { 709 struct iommu *iommu; 710 struct strbuf *strbuf; 711 unsigned long flags, ctx, npages, i; 712 struct scatterlist *sg, *sgprv; 713 u32 bus_addr; 714 715 iommu = dev->archdata.iommu; 716 strbuf = dev->archdata.stc; 717 718 if (!strbuf->strbuf_enabled) 719 return; 720 721 spin_lock_irqsave(&iommu->lock, flags); 722 723 /* Step 1: Record the context, if any. */ 724 ctx = 0; 725 if (iommu->iommu_ctxflush && 726 strbuf->strbuf_ctxflush) { 727 iopte_t *iopte; 728 struct iommu_map_table *tbl = &iommu->tbl; 729 730 iopte = iommu->page_table + ((sglist[0].dma_address - 731 tbl->table_map_base) >> IO_PAGE_SHIFT); 732 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; 733 } 734 735 /* Step 2: Kick data out of streaming buffers. */ 736 bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 737 sgprv = NULL; 738 for_each_sg(sglist, sg, nelems, i) { 739 if (sg->dma_length == 0) 740 break; 741 sgprv = sg; 742 } 743 744 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) 745 - bus_addr) >> IO_PAGE_SHIFT; 746 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 747 748 spin_unlock_irqrestore(&iommu->lock, flags); 749 } 750 751 static int dma_4u_supported(struct device *dev, u64 device_mask) 752 { 753 struct iommu *iommu = dev->archdata.iommu; 754 755 if (ali_sound_dma_hack(dev, device_mask)) 756 return 1; 757 758 if (device_mask < iommu->dma_addr_mask) 759 return 0; 760 return 1; 761 } 762 763 static const struct dma_map_ops sun4u_dma_ops = { 764 .alloc = dma_4u_alloc_coherent, 765 .free = dma_4u_free_coherent, 766 .map_phys = dma_4u_map_phys, 767 .unmap_phys = dma_4u_unmap_phys, 768 .map_sg = dma_4u_map_sg, 769 .unmap_sg = dma_4u_unmap_sg, 770 .sync_single_for_cpu = dma_4u_sync_single_for_cpu, 771 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 772 .dma_supported = dma_4u_supported, 773 }; 774 775 const struct dma_map_ops *dma_ops = &sun4u_dma_ops; 776 EXPORT_SYMBOL(dma_ops); 777