pci-dma.c (7f1501053811414ddeff63db8f5d41bdbe38068f) pci-dma.c (c1f59375b3782f478ac2c488889abdc00dd8e25f)
1// SPDX-License-Identifier: GPL-2.0
2/*
3** PARISC 1.1 Dynamic DMA mapping support.
4** This implementation is for PA-RISC platforms that do not support
5** I/O TLBs (aka DMA address translation hardware).
6** See Documentation/DMA-API-HOWTO.txt for interface definitions.
7**
8** (c) Copyright 1999,2000 Hewlett-Packard Company

--- 7 unchanged lines hidden (view full) ---

16** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17**
18** - ggg
19*/
20
21#include <linux/init.h>
22#include <linux/gfp.h>
23#include <linux/mm.h>
1// SPDX-License-Identifier: GPL-2.0
2/*
3** PARISC 1.1 Dynamic DMA mapping support.
4** This implementation is for PA-RISC platforms that do not support
5** I/O TLBs (aka DMA address translation hardware).
6** See Documentation/DMA-API-HOWTO.txt for interface definitions.
7**
8** (c) Copyright 1999,2000 Hewlett-Packard Company

--- 7 unchanged lines hidden (view full) ---

16** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17**
18** - ggg
19*/
20
21#include <linux/init.h>
22#include <linux/gfp.h>
23#include <linux/mm.h>
24#include <linux/pci.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/string.h>
28#include <linux/types.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/string.h>
27#include <linux/types.h>
29#include <linux/scatterlist.h>
30#include <linux/export.h>
28#include <linux/dma-direct.h>
29#include <linux/dma-noncoherent.h>
31
32#include <asm/cacheflush.h>
33#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
34#include <asm/io.h>
35#include <asm/page.h> /* get_order */
36#include <asm/pgalloc.h>
37#include <linux/uaccess.h>
38#include <asm/tlbflush.h> /* for purge_tlb_*() macros */

--- 393 unchanged lines hidden (view full) ---

432
433 addr = (void *)__get_free_pages(flag, get_order(size));
434 if (addr)
435 *dma_handle = (dma_addr_t)virt_to_phys(addr);
436
437 return addr;
438}
439
30
31#include <asm/cacheflush.h>
32#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
33#include <asm/io.h>
34#include <asm/page.h> /* get_order */
35#include <asm/pgalloc.h>
36#include <linux/uaccess.h>
37#include <asm/tlbflush.h> /* for purge_tlb_*() macros */

--- 393 unchanged lines hidden (view full) ---

431
432 addr = (void *)__get_free_pages(flag, get_order(size));
433 if (addr)
434 *dma_handle = (dma_addr_t)virt_to_phys(addr);
435
436 return addr;
437}
438
440static void *pa11_dma_alloc(struct device *dev, size_t size,
439void *arch_dma_alloc(struct device *dev, size_t size,
441 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
442{
443
444 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
445 return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
446 else
447 return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
448}
449
440 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
441{
442
443 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
444 return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
445 else
446 return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
447}
448
450static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
449void arch_dma_free(struct device *dev, size_t size, void *vaddr,
451 dma_addr_t dma_handle, unsigned long attrs)
452{
453 int order = get_order(size);
454
455 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
456 size = 1 << (order + PAGE_SHIFT);
457 unmap_uncached_pages((unsigned long)vaddr, size);
458 pcxl_free_range((unsigned long)vaddr, size);
459
460 vaddr = __va(dma_handle);
461 }
462 free_pages((unsigned long)vaddr, get_order(size));
463}
464
450 dma_addr_t dma_handle, unsigned long attrs)
451{
452 int order = get_order(size);
453
454 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
455 size = 1 << (order + PAGE_SHIFT);
456 unmap_uncached_pages((unsigned long)vaddr, size);
457 pcxl_free_range((unsigned long)vaddr, size);
458
459 vaddr = __va(dma_handle);
460 }
461 free_pages((unsigned long)vaddr, get_order(size));
462}
463
465static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
466 unsigned long offset, size_t size,
467 enum dma_data_direction direction, unsigned long attrs)
464void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
465 size_t size, enum dma_data_direction dir)
468{
466{
469 void *addr = page_address(page) + offset;
470 BUG_ON(direction == DMA_NONE);
471
472 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
473 flush_kernel_dcache_range((unsigned long) addr, size);
474
475 return virt_to_phys(addr);
467 flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
476}
477
468}
469
478static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
479 size_t size, enum dma_data_direction direction,
480 unsigned long attrs)
470void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
471 size_t size, enum dma_data_direction dir)
481{
472{
482 BUG_ON(direction == DMA_NONE);
483
484 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
485 return;
486
487 if (direction == DMA_TO_DEVICE)
488 return;
489
490 /*
491 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
492 * simple map/unmap case. However, it IS necessary if if
493 * pci_dma_sync_single_* has been called and the buffer reused.
494 */
495
496 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
473 flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
497}
498
474}
475
499static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
500 int nents, enum dma_data_direction direction,
501 unsigned long attrs)
502{
503 int i;
504 struct scatterlist *sg;
505
506 BUG_ON(direction == DMA_NONE);
507
508 for_each_sg(sglist, sg, nents, i) {
509 unsigned long vaddr = (unsigned long)sg_virt(sg);
510
511 sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
512 sg_dma_len(sg) = sg->length;
513
514 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
515 continue;
516
517 flush_kernel_dcache_range(vaddr, sg->length);
518 }
519 return nents;
520}
521
522static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
523 int nents, enum dma_data_direction direction,
524 unsigned long attrs)
525{
526 int i;
527 struct scatterlist *sg;
528
529 BUG_ON(direction == DMA_NONE);
530
531 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
532 return;
533
534 if (direction == DMA_TO_DEVICE)
535 return;
536
537 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
538
539 for_each_sg(sglist, sg, nents, i)
540 flush_kernel_dcache_range(sg_virt(sg), sg->length);
541}
542
543static void pa11_dma_sync_single_for_cpu(struct device *dev,
544 dma_addr_t dma_handle, size_t size,
545 enum dma_data_direction direction)
546{
547 BUG_ON(direction == DMA_NONE);
548
549 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
550 size);
551}
552
553static void pa11_dma_sync_single_for_device(struct device *dev,
554 dma_addr_t dma_handle, size_t size,
555 enum dma_data_direction direction)
556{
557 BUG_ON(direction == DMA_NONE);
558
559 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
560 size);
561}
562
563static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
564{
565 int i;
566 struct scatterlist *sg;
567
568 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
569
570 for_each_sg(sglist, sg, nents, i)
571 flush_kernel_dcache_range(sg_virt(sg), sg->length);
572}
573
574static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
575{
576 int i;
577 struct scatterlist *sg;
578
579 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
580
581 for_each_sg(sglist, sg, nents, i)
582 flush_kernel_dcache_range(sg_virt(sg), sg->length);
583}
584
585static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
476void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
586 enum dma_data_direction direction)
587{
588 flush_kernel_dcache_range((unsigned long)vaddr, size);
589}
477 enum dma_data_direction direction)
478{
479 flush_kernel_dcache_range((unsigned long)vaddr, size);
480}
590
591const struct dma_map_ops pa11_dma_ops = {
592 .alloc = pa11_dma_alloc,
593 .free = pa11_dma_free,
594 .map_page = pa11_dma_map_page,
595 .unmap_page = pa11_dma_unmap_page,
596 .map_sg = pa11_dma_map_sg,
597 .unmap_sg = pa11_dma_unmap_sg,
598 .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
599 .sync_single_for_device = pa11_dma_sync_single_for_device,
600 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
601 .sync_sg_for_device = pa11_dma_sync_sg_for_device,
602 .cache_sync = pa11_dma_cache_sync,
603};