1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC Linux
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 *
13 * DMA mapping callbacks...
14 */
15
16 #include <linux/dma-map-ops.h>
17 #include <linux/pagewalk.h>
18
19 #include <asm/cpuinfo.h>
20 #include <asm/cacheflush.h>
21 #include <asm/spr_defs.h>
22 #include <asm/tlbflush.h>
23
24 static int
page_set_nocache(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)25 page_set_nocache(pte_t *pte, unsigned long addr,
26 unsigned long next, struct mm_walk *walk)
27 {
28 pte_val(*pte) |= _PAGE_CI;
29
30 /*
31 * Flush the page out of the TLB so that the new page flags get
32 * picked up next time there's an access
33 */
34 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
35
36 /* Flush page out of dcache */
37 local_dcache_range_flush(__pa(addr), __pa(next));
38
39 return 0;
40 }
41
42 static const struct mm_walk_ops set_nocache_walk_ops = {
43 .pte_entry = page_set_nocache,
44 };
45
46 static int
page_clear_nocache(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)47 page_clear_nocache(pte_t *pte, unsigned long addr,
48 unsigned long next, struct mm_walk *walk)
49 {
50 pte_val(*pte) &= ~_PAGE_CI;
51
52 /*
53 * Flush the page out of the TLB so that the new page flags get
54 * picked up next time there's an access
55 */
56 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
57
58 return 0;
59 }
60
61 static const struct mm_walk_ops clear_nocache_walk_ops = {
62 .pte_entry = page_clear_nocache,
63 };
64
arch_dma_set_uncached(void * cpu_addr,size_t size)65 void *arch_dma_set_uncached(void *cpu_addr, size_t size)
66 {
67 unsigned long va = (unsigned long)cpu_addr;
68 int error;
69
70 /*
71 * We need to iterate through the pages, clearing the dcache for
72 * them and setting the cache-inhibit bit.
73 */
74 mmap_write_lock(&init_mm);
75 error = walk_page_range_novma(&init_mm, va, va + size,
76 &set_nocache_walk_ops, NULL, NULL);
77 mmap_write_unlock(&init_mm);
78
79 if (error)
80 return ERR_PTR(error);
81 return cpu_addr;
82 }
83
arch_dma_clear_uncached(void * cpu_addr,size_t size)84 void arch_dma_clear_uncached(void *cpu_addr, size_t size)
85 {
86 unsigned long va = (unsigned long)cpu_addr;
87
88 mmap_write_lock(&init_mm);
89 /* walk_page_range shouldn't be able to fail here */
90 WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
91 &clear_nocache_walk_ops, NULL, NULL));
92 mmap_write_unlock(&init_mm);
93 }
94
arch_sync_dma_for_device(phys_addr_t addr,size_t size,enum dma_data_direction dir)95 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
96 enum dma_data_direction dir)
97 {
98 switch (dir) {
99 case DMA_TO_DEVICE:
100 /* Flush the dcache for the requested range */
101 local_dcache_range_flush(addr, addr + size);
102 break;
103 case DMA_FROM_DEVICE:
104 /* Invalidate the dcache for the requested range */
105 local_dcache_range_inv(addr, addr + size);
106 break;
107 default:
108 /*
109 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
110 * flush nor invalidate the cache here as the area will need
111 * to be manually synced anyway.
112 */
113 break;
114 }
115 }
116