1 /* 2 * arch/sh/mm/cache-sh7705.c 3 * 4 * Copyright (C) 1999, 2000 Niibe Yutaka 5 * Copyright (C) 2004 Alex Song 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 */ 12 #include <linux/init.h> 13 #include <linux/mman.h> 14 #include <linux/mm.h> 15 #include <linux/fs.h> 16 #include <linux/threads.h> 17 #include <asm/addrspace.h> 18 #include <asm/page.h> 19 #include <asm/pgtable.h> 20 #include <asm/processor.h> 21 #include <asm/cache.h> 22 #include <asm/io.h> 23 #include <asm/uaccess.h> 24 #include <asm/pgalloc.h> 25 #include <asm/mmu_context.h> 26 #include <asm/cacheflush.h> 27 28 /* 29 * The 32KB cache on the SH7705 suffers from the same synonym problem 30 * as SH4 CPUs 31 */ 32 static inline void cache_wback_all(void) 33 { 34 unsigned long ways, waysize, addrstart; 35 36 ways = current_cpu_data.dcache.ways; 37 waysize = current_cpu_data.dcache.sets; 38 waysize <<= current_cpu_data.dcache.entry_shift; 39 40 addrstart = CACHE_OC_ADDRESS_ARRAY; 41 42 do { 43 unsigned long addr; 44 45 for (addr = addrstart; 46 addr < addrstart + waysize; 47 addr += current_cpu_data.dcache.linesz) { 48 unsigned long data; 49 int v = SH_CACHE_UPDATED | SH_CACHE_VALID; 50 51 data = ctrl_inl(addr); 52 53 if ((data & v) == v) 54 ctrl_outl(data & ~v, addr); 55 56 } 57 58 addrstart += current_cpu_data.dcache.way_incr; 59 } while (--ways); 60 } 61 62 /* 63 * Write back the range of D-cache, and purge the I-cache. 64 * 65 * Called from kernel/module.c:sys_init_module and routine for a.out format. 66 */ 67 void flush_icache_range(unsigned long start, unsigned long end) 68 { 69 __flush_wback_region((void *)start, end - start); 70 } 71 72 /* 73 * Writeback&Invalidate the D-cache of the page 74 */ 75 static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) 76 { 77 unsigned long ways, waysize, addrstart; 78 unsigned long flags; 79 80 phys |= SH_CACHE_VALID; 81 82 /* 83 * Here, phys is the physical address of the page. We check all the 84 * tags in the cache for those with the same page number as this page 85 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are 86 * derived from the offset within in the 4k page). Matching valid 87 * entries are invalidated. 88 * 89 * Since 2 bits of the cache index are derived from the virtual page 90 * number, knowing this would reduce the number of cache entries to be 91 * searched by a factor of 4. However this function exists to deal with 92 * potential cache aliasing, therefore the optimisation is probably not 93 * possible. 94 */ 95 local_irq_save(flags); 96 jump_to_uncached(); 97 98 ways = current_cpu_data.dcache.ways; 99 waysize = current_cpu_data.dcache.sets; 100 waysize <<= current_cpu_data.dcache.entry_shift; 101 102 addrstart = CACHE_OC_ADDRESS_ARRAY; 103 104 do { 105 unsigned long addr; 106 107 for (addr = addrstart; 108 addr < addrstart + waysize; 109 addr += current_cpu_data.dcache.linesz) { 110 unsigned long data; 111 112 data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); 113 if (data == phys) { 114 data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); 115 ctrl_outl(data, addr); 116 } 117 } 118 119 addrstart += current_cpu_data.dcache.way_incr; 120 } while (--ways); 121 122 back_to_cached(); 123 local_irq_restore(flags); 124 } 125 126 /* 127 * Write back & invalidate the D-cache of the page. 128 * (To avoid "alias" issues) 129 */ 130 void flush_dcache_page(struct page *page) 131 { 132 struct address_space *mapping = page_mapping(page); 133 134 if (mapping && !mapping_mapped(mapping)) 135 set_bit(PG_dcache_dirty, &page->flags); 136 else 137 __flush_dcache_page(PHYSADDR(page_address(page))); 138 } 139 140 void __uses_jump_to_uncached flush_cache_all(void) 141 { 142 unsigned long flags; 143 144 local_irq_save(flags); 145 jump_to_uncached(); 146 147 cache_wback_all(); 148 back_to_cached(); 149 local_irq_restore(flags); 150 } 151 152 void flush_cache_mm(struct mm_struct *mm) 153 { 154 /* Is there any good way? */ 155 /* XXX: possibly call flush_cache_range for each vm area */ 156 flush_cache_all(); 157 } 158 159 /* 160 * Write back and invalidate D-caches. 161 * 162 * START, END: Virtual Address (U0 address) 163 * 164 * NOTE: We need to flush the _physical_ page entry. 165 * Flushing the cache lines for U0 only isn't enough. 166 * We need to flush for P1 too, which may contain aliases. 167 */ 168 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 169 unsigned long end) 170 { 171 172 /* 173 * We could call flush_cache_page for the pages of these range, 174 * but it's not efficient (scan the caches all the time...). 175 * 176 * We can't use A-bit magic, as there's the case we don't have 177 * valid entry on TLB. 178 */ 179 flush_cache_all(); 180 } 181 182 /* 183 * Write back and invalidate I/D-caches for the page. 184 * 185 * ADDRESS: Virtual Address (U0 address) 186 */ 187 void flush_cache_page(struct vm_area_struct *vma, unsigned long address, 188 unsigned long pfn) 189 { 190 __flush_dcache_page(pfn << PAGE_SHIFT); 191 } 192 193 /* 194 * This is called when a page-cache page is about to be mapped into a 195 * user process' address space. It offers an opportunity for a 196 * port to ensure d-cache/i-cache coherency if necessary. 197 * 198 * Not entirely sure why this is necessary on SH3 with 32K cache but 199 * without it we get occasional "Memory fault" when loading a program. 200 */ 201 void flush_icache_page(struct vm_area_struct *vma, struct page *page) 202 { 203 __flush_purge_region(page_address(page), PAGE_SIZE); 204 } 205