1 /* 2 * linux/arch/arm/lib/copypage-xscale.S 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This handles the mini data cache, as found on SA11x0 and XScale 11 * processors. When we copy a user page page, we map it in such a way 12 * that accesses to this page will not touch the main data cache, but 13 * will be cached in the mini data cache. This prevents us thrashing 14 * the main data cache on page faults. 15 */ 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 19 #include <asm/page.h> 20 #include <asm/pgtable.h> 21 #include <asm/tlbflush.h> 22 23 /* 24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 25 * specific hacks for copying pages efficiently. 26 */ 27 #define COPYPAGE_MINICACHE 0xffff8000 28 29 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 30 L_PTE_CACHEABLE) 31 32 #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 33 34 static DEFINE_SPINLOCK(minicache_lock); 35 36 /* 37 * XScale mini-dcache optimised copy_user_page 38 * 39 * We flush the destination cache lines just before we write the data into the 40 * corresponding address. Since the Dcache is read-allocate, this removes the 41 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 42 * and merged as appropriate. 43 */ 44 static void __attribute__((naked)) 45 mc_copy_user_page(void *from, void *to) 46 { 47 /* 48 * Strangely enough, best performance is achieved 49 * when prefetching destination as well. (NP) 50 */ 51 asm volatile( 52 "stmfd sp!, {r4, r5, lr} \n\ 53 mov lr, %2 \n\ 54 pld [r0, #0] \n\ 55 pld [r0, #32] \n\ 56 pld [r1, #0] \n\ 57 pld [r1, #32] \n\ 58 1: pld [r0, #64] \n\ 59 pld [r0, #96] \n\ 60 pld [r1, #64] \n\ 61 pld [r1, #96] \n\ 62 2: ldrd r2, [r0], #8 \n\ 63 ldrd r4, [r0], #8 \n\ 64 mov ip, r1 \n\ 65 strd r2, [r1], #8 \n\ 66 ldrd r2, [r0], #8 \n\ 67 strd r4, [r1], #8 \n\ 68 ldrd r4, [r0], #8 \n\ 69 strd r2, [r1], #8 \n\ 70 strd r4, [r1], #8 \n\ 71 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 72 ldrd r2, [r0], #8 \n\ 73 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 74 ldrd r4, [r0], #8 \n\ 75 mov ip, r1 \n\ 76 strd r2, [r1], #8 \n\ 77 ldrd r2, [r0], #8 \n\ 78 strd r4, [r1], #8 \n\ 79 ldrd r4, [r0], #8 \n\ 80 strd r2, [r1], #8 \n\ 81 strd r4, [r1], #8 \n\ 82 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 83 subs lr, lr, #1 \n\ 84 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 85 bgt 1b \n\ 86 beq 2b \n\ 87 ldmfd sp!, {r4, r5, pc} " 88 : 89 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 90 } 91 92 void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93 { 94 spin_lock(&minicache_lock); 95 96 set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); 97 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 98 99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 100 101 spin_unlock(&minicache_lock); 102 } 103 104 /* 105 * XScale optimised clear_user_page 106 */ 107 void __attribute__((naked)) 108 xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) 109 { 110 asm volatile( 111 "mov r1, %0 \n\ 112 mov r2, #0 \n\ 113 mov r3, #0 \n\ 114 1: mov ip, r0 \n\ 115 strd r2, [r0], #8 \n\ 116 strd r2, [r0], #8 \n\ 117 strd r2, [r0], #8 \n\ 118 strd r2, [r0], #8 \n\ 119 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 120 subs r1, r1, #1 \n\ 121 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 122 bne 1b \n\ 123 mov pc, lr" 124 : 125 : "I" (PAGE_SIZE / 32)); 126 } 127 128 struct cpu_user_fns xscale_mc_user_fns __initdata = { 129 .cpu_clear_user_page = xscale_mc_clear_user_page, 130 .cpu_copy_user_page = xscale_mc_copy_user_page, 131 }; 132