1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2003 by Ralf Baechle 7 */ 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 14 #include <asm/cacheflush.h> 15 #include <asm/processor.h> 16 #include <asm/cpu.h> 17 #include <asm/cpu-features.h> 18 19 /* Cache operations. */ 20 void (*flush_cache_all)(void); 21 void (*__flush_cache_all)(void); 22 void (*flush_cache_mm)(struct mm_struct *mm); 23 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, 24 unsigned long end); 25 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 26 unsigned long pfn); 27 void (*flush_icache_range)(unsigned long start, unsigned long end); 28 void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); 29 30 /* MIPS specific cache operations */ 31 void (*flush_cache_sigtramp)(unsigned long addr); 32 void (*local_flush_data_cache_page)(void * addr); 33 void (*flush_data_cache_page)(unsigned long addr); 34 void (*flush_icache_all)(void); 35 36 EXPORT_SYMBOL(flush_data_cache_page); 37 38 #ifdef CONFIG_DMA_NONCOHERENT 39 40 /* DMA cache operations. */ 41 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 42 void (*_dma_cache_wback)(unsigned long start, unsigned long size); 43 void (*_dma_cache_inv)(unsigned long start, unsigned long size); 44 45 EXPORT_SYMBOL(_dma_cache_wback_inv); 46 EXPORT_SYMBOL(_dma_cache_wback); 47 EXPORT_SYMBOL(_dma_cache_inv); 48 49 #endif /* CONFIG_DMA_NONCOHERENT */ 50 51 /* 52 * We could optimize the case where the cache argument is not BCACHE but 53 * that seems very atypical use ... 54 */ 55 asmlinkage int sys_cacheflush(unsigned long addr, 56 unsigned long bytes, unsigned int cache) 57 { 58 if (bytes == 0) 59 return 0; 60 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) 61 return -EFAULT; 62 63 flush_icache_range(addr, addr + bytes); 64 65 return 0; 66 } 67 68 void __flush_dcache_page(struct page *page) 69 { 70 struct address_space *mapping = page_mapping(page); 71 unsigned long addr; 72 73 if (mapping && !mapping_mapped(mapping)) { 74 SetPageDcacheDirty(page); 75 return; 76 } 77 78 /* 79 * We could delay the flush for the !page_mapping case too. But that 80 * case is for exec env/arg pages and those are %99 certainly going to 81 * get faulted into the tlb (and thus flushed) anyways. 82 */ 83 addr = (unsigned long) page_address(page); 84 flush_data_cache_page(addr); 85 } 86 87 EXPORT_SYMBOL(__flush_dcache_page); 88 89 void __update_cache(struct vm_area_struct *vma, unsigned long address, 90 pte_t pte) 91 { 92 struct page *page; 93 unsigned long pfn, addr; 94 95 pfn = pte_pfn(pte); 96 if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) && 97 Page_dcache_dirty(page)) { 98 if (pages_do_alias((unsigned long)page_address(page), 99 address & PAGE_MASK)) { 100 addr = (unsigned long) page_address(page); 101 flush_data_cache_page(addr); 102 } 103 104 ClearPageDcacheDirty(page); 105 } 106 } 107 108 #define __weak __attribute__((weak)) 109 110 static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; 111 112 void __init cpu_cache_init(void) 113 { 114 if (cpu_has_3k_cache) { 115 extern void __weak r3k_cache_init(void); 116 117 r3k_cache_init(); 118 return; 119 } 120 if (cpu_has_6k_cache) { 121 extern void __weak r6k_cache_init(void); 122 123 r6k_cache_init(); 124 return; 125 } 126 if (cpu_has_4k_cache) { 127 extern void __weak r4k_cache_init(void); 128 129 r4k_cache_init(); 130 return; 131 } 132 if (cpu_has_8k_cache) { 133 extern void __weak r8k_cache_init(void); 134 135 r8k_cache_init(); 136 return; 137 } 138 if (cpu_has_tx39_cache) { 139 extern void __weak tx39_cache_init(void); 140 141 tx39_cache_init(); 142 return; 143 } 144 if (cpu_has_sb1_cache) { 145 extern void __weak sb1_cache_init(void); 146 147 sb1_cache_init(); 148 return; 149 } 150 151 panic(cache_panic); 152 } 153