1/* 2 * linux/arch/arm/mm/proc-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * Modified by Catalin Marinas for noMMU support 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This is the "shell" of the ARMv6 processor support. 12 */ 13#include <linux/linkage.h> 14#include <asm/assembler.h> 15#include <asm/asm-offsets.h> 16#include <asm/hardware/arm_scu.h> 17#include <asm/procinfo.h> 18#include <asm/pgtable-hwdef.h> 19#include <asm/pgtable.h> 20 21#include "proc-macros.S" 22 23#define D_CACHE_LINE_SIZE 32 24 25#define TTB_C (1 << 0) 26#define TTB_S (1 << 1) 27#define TTB_IMP (1 << 2) 28#define TTB_RGN_NC (0 << 3) 29#define TTB_RGN_WBWA (1 << 3) 30#define TTB_RGN_WT (2 << 3) 31#define TTB_RGN_WB (3 << 3) 32 33ENTRY(cpu_v6_proc_init) 34 mov pc, lr 35 36ENTRY(cpu_v6_proc_fin) 37 stmfd sp!, {lr} 38 cpsid if @ disable interrupts 39 bl v6_flush_kern_cache_all 40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 41 bic r0, r0, #0x1000 @ ...i............ 42 bic r0, r0, #0x0006 @ .............ca. 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 ldmfd sp!, {pc} 45 46/* 47 * cpu_v6_reset(loc) 48 * 49 * Perform a soft reset of the system. Put the CPU into the 50 * same state as it would be if it had been reset, and branch 51 * to what would be the reset vector. 52 * 53 * - loc - location to jump to for soft reset 54 * 55 * It is assumed that: 56 */ 57 .align 5 58ENTRY(cpu_v6_reset) 59 mov pc, r0 60 61/* 62 * cpu_v6_do_idle() 63 * 64 * Idle the processor (eg, wait for interrupt). 65 * 66 * IRQs are already disabled. 67 */ 68ENTRY(cpu_v6_do_idle) 69 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 70 mov pc, lr 71 72ENTRY(cpu_v6_dcache_clean_area) 73#ifndef TLB_CAN_READ_FROM_L1_CACHE 741: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 75 add r0, r0, #D_CACHE_LINE_SIZE 76 subs r1, r1, #D_CACHE_LINE_SIZE 77 bhi 1b 78#endif 79 mov pc, lr 80 81/* 82 * cpu_arm926_switch_mm(pgd_phys, tsk) 83 * 84 * Set the translation table base pointer to be pgd_phys 85 * 86 * - pgd_phys - physical address of new TTB 87 * 88 * It is assumed that: 89 * - we are not using split page tables 90 */ 91ENTRY(cpu_v6_switch_mm) 92#ifdef CONFIG_MMU 93 mov r2, #0 94 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 95#ifdef CONFIG_SMP 96 orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable 97#endif 98 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 99 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 100 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 101 mcr p15, 0, r1, c13, c0, 1 @ set context ID 102#endif 103 mov pc, lr 104 105/* 106 * cpu_v6_set_pte(ptep, pte) 107 * 108 * Set a level 2 translation table entry. 109 * 110 * - ptep - pointer to level 2 translation table entry 111 * (hardware version is stored at -1024 bytes) 112 * - pte - PTE value to store 113 * 114 * Permissions: 115 * YUWD APX AP1 AP0 SVC User 116 * 0xxx 0 0 0 no acc no acc 117 * 100x 1 0 1 r/o no acc 118 * 10x0 1 0 1 r/o no acc 119 * 1011 0 0 1 r/w no acc 120 * 110x 0 1 0 r/w r/o 121 * 11x0 0 1 0 r/w r/o 122 * 1111 0 1 1 r/w r/w 123 */ 124ENTRY(cpu_v6_set_pte) 125#ifdef CONFIG_MMU 126 str r1, [r0], #-2048 @ linux version 127 128 bic r2, r1, #0x000003f0 129 bic r2, r2, #0x00000003 130 orr r2, r2, #PTE_EXT_AP0 | 2 131 132 tst r1, #L_PTE_WRITE 133 tstne r1, #L_PTE_DIRTY 134 orreq r2, r2, #PTE_EXT_APX 135 136 tst r1, #L_PTE_USER 137 orrne r2, r2, #PTE_EXT_AP1 138 tstne r2, #PTE_EXT_APX 139 bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0 140 141 tst r1, #L_PTE_YOUNG 142 biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK 143 144 tst r1, #L_PTE_EXEC 145 orreq r2, r2, #PTE_EXT_XN 146 147 tst r1, #L_PTE_PRESENT 148 moveq r2, #0 149 150 str r2, [r0] 151 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 152#endif 153 mov pc, lr 154 155 156 157 158cpu_v6_name: 159 .asciz "Some Random V6 Processor" 160 .align 161 162 .section ".text.init", #alloc, #execinstr 163 164/* 165 * __v6_setup 166 * 167 * Initialise TLB, Caches, and MMU state ready to switch the MMU 168 * on. Return in r0 the new CP15 C1 control register setting. 169 * 170 * We automatically detect if we have a Harvard cache, and use the 171 * Harvard cache control instructions insead of the unified cache 172 * control instructions. 173 * 174 * This should be able to cover all ARMv6 cores. 175 * 176 * It is assumed that: 177 * - cache type register is implemented 178 */ 179__v6_setup: 180#ifdef CONFIG_SMP 181 /* Set up the SCU on core 0 only */ 182 mrc p15, 0, r0, c0, c0, 5 @ CPU core number 183 ands r0, r0, #15 184 moveq r0, #0x10000000 @ SCU_BASE 185 orreq r0, r0, #0x00100000 186 ldreq r5, [r0, #SCU_CTRL] 187 orreq r5, r5, #1 188 streq r5, [r0, #SCU_CTRL] 189 190#ifndef CONFIG_CPU_DCACHE_DISABLE 191 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 192 orr r0, r0, #0x20 193 mcr p15, 0, r0, c1, c0, 1 194#endif 195#endif 196 197 mov r0, #0 198 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 199 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 200 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 202#ifdef CONFIG_MMU 203 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 204 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 205#ifdef CONFIG_SMP 206 orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable 207#endif 208 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 209#endif /* CONFIG_MMU */ 210#ifdef CONFIG_VFP 211 mrc p15, 0, r0, c1, c0, 2 212 orr r0, r0, #(0xf << 20) 213 mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP 214#endif 215 adr r5, v6_crval 216 ldmia r5, {r5, r6} 217 mrc p15, 0, r0, c1, c0, 0 @ read control register 218 bic r0, r0, r5 @ clear bits them 219 orr r0, r0, r6 @ set them 220 mov pc, lr @ return to head.S:__ret 221 222 /* 223 * V X F I D LR 224 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 225 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 226 * 0 110 0011 1.00 .111 1101 < we want 227 */ 228 .type v6_crval, #object 229v6_crval: 230 crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c 231 232 .type v6_processor_functions, #object 233ENTRY(v6_processor_functions) 234 .word v6_early_abort 235 .word cpu_v6_proc_init 236 .word cpu_v6_proc_fin 237 .word cpu_v6_reset 238 .word cpu_v6_do_idle 239 .word cpu_v6_dcache_clean_area 240 .word cpu_v6_switch_mm 241 .word cpu_v6_set_pte 242 .size v6_processor_functions, . - v6_processor_functions 243 244 .type cpu_arch_name, #object 245cpu_arch_name: 246 .asciz "armv6" 247 .size cpu_arch_name, . - cpu_arch_name 248 249 .type cpu_elf_name, #object 250cpu_elf_name: 251 .asciz "v6" 252 .size cpu_elf_name, . - cpu_elf_name 253 .align 254 255 .section ".proc.info.init", #alloc, #execinstr 256 257 /* 258 * Match any ARMv6 processor core. 259 */ 260 .type __v6_proc_info, #object 261__v6_proc_info: 262 .long 0x0007b000 263 .long 0x0007f000 264 .long PMD_TYPE_SECT | \ 265 PMD_SECT_BUFFERABLE | \ 266 PMD_SECT_CACHEABLE | \ 267 PMD_SECT_AP_WRITE | \ 268 PMD_SECT_AP_READ 269 .long PMD_TYPE_SECT | \ 270 PMD_SECT_XN | \ 271 PMD_SECT_AP_WRITE | \ 272 PMD_SECT_AP_READ 273 b __v6_setup 274 .long cpu_arch_name 275 .long cpu_elf_name 276 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA 277 .long cpu_v6_name 278 .long v6_processor_functions 279 .long v6wbi_tlb_fns 280 .long v6_user_fns 281 .long v6_cache_fns 282 .size __v6_proc_info, . - __v6_proc_info 283