1/* 2 * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * 14 * These are the low level assembler for performing cache and TLB 15 * functions on the ARM1022E. 16 */ 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/assembler.h> 20#include <asm/asm-offsets.h> 21#include <asm/hwcap.h> 22#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable.h> 24#include <asm/ptrace.h> 25 26#include "proc-macros.S" 27 28/* 29 * This is the maximum size of an area which will be invalidated 30 * using the single invalidate entry instructions. Anything larger 31 * than this, and we go for the whole cache. 32 * 33 * This value should be chosen such that we choose the cheapest 34 * alternative. 35 */ 36#define MAX_AREA_SIZE 32768 37 38/* 39 * The size of one data cache line. 40 */ 41#define CACHE_DLINESIZE 32 42 43/* 44 * The number of data cache segments. 45 */ 46#define CACHE_DSEGMENTS 16 47 48/* 49 * The number of lines in a cache segment. 50 */ 51#define CACHE_DENTRIES 64 52 53/* 54 * This is the size at which it becomes more efficient to 55 * clean the whole cache, rather than using the individual 56 * cache line maintenance instructions. 57 */ 58#define CACHE_DLIMIT 32768 59 60 .text 61/* 62 * cpu_arm1022_proc_init() 63 */ 64ENTRY(cpu_arm1022_proc_init) 65 mov pc, lr 66 67/* 68 * cpu_arm1022_proc_fin() 69 */ 70ENTRY(cpu_arm1022_proc_fin) 71 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 72 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x000e @ ............wca. 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mov pc, lr 76 77/* 78 * cpu_arm1022_reset(loc) 79 * 80 * Perform a soft reset of the system. Put the CPU into the 81 * same state as it would be if it had been reset, and branch 82 * to what would be the reset vector. 83 * 84 * loc: location to jump to for soft reset 85 */ 86 .align 5 87ENTRY(cpu_arm1022_reset) 88 mov ip, #0 89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c10, 4 @ drain WB 91#ifdef CONFIG_MMU 92 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 93#endif 94 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 95 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x1100 @ ...i...s........ 97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mov pc, r0 99 100/* 101 * cpu_arm1022_do_idle() 102 */ 103 .align 5 104ENTRY(cpu_arm1022_do_idle) 105 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 106 mov pc, lr 107 108/* ================================= CACHE ================================ */ 109 110 .align 5 111 112/* 113 * flush_icache_all() 114 * 115 * Unconditionally clean and invalidate the entire icache. 116 */ 117ENTRY(arm1022_flush_icache_all) 118#ifndef CONFIG_CPU_ICACHE_DISABLE 119 mov r0, #0 120 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 121#endif 122 mov pc, lr 123ENDPROC(arm1022_flush_icache_all) 124 125/* 126 * flush_user_cache_all() 127 * 128 * Invalidate all cache entries in a particular address 129 * space. 130 */ 131ENTRY(arm1022_flush_user_cache_all) 132 /* FALLTHROUGH */ 133/* 134 * flush_kern_cache_all() 135 * 136 * Clean and invalidate the entire cache. 137 */ 138ENTRY(arm1022_flush_kern_cache_all) 139 mov r2, #VM_EXEC 140 mov ip, #0 141__flush_whole_cache: 142#ifndef CONFIG_CPU_DCACHE_DISABLE 143 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1441: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1452: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 146 subs r3, r3, #1 << 26 147 bcs 2b @ entries 63 to 0 148 subs r1, r1, #1 << 5 149 bcs 1b @ segments 15 to 0 150#endif 151 tst r2, #VM_EXEC 152#ifndef CONFIG_CPU_ICACHE_DISABLE 153 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 154#endif 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 156 mov pc, lr 157 158/* 159 * flush_user_cache_range(start, end, flags) 160 * 161 * Invalidate a range of cache entries in the specified 162 * address space. 163 * 164 * - start - start address (inclusive) 165 * - end - end address (exclusive) 166 * - flags - vm_flags for this space 167 */ 168ENTRY(arm1022_flush_user_cache_range) 169 mov ip, #0 170 sub r3, r1, r0 @ calculate total size 171 cmp r3, #CACHE_DLIMIT 172 bhs __flush_whole_cache 173 174#ifndef CONFIG_CPU_DCACHE_DISABLE 1751: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 176 add r0, r0, #CACHE_DLINESIZE 177 cmp r0, r1 178 blo 1b 179#endif 180 tst r2, #VM_EXEC 181#ifndef CONFIG_CPU_ICACHE_DISABLE 182 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 183#endif 184 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 185 mov pc, lr 186 187/* 188 * coherent_kern_range(start, end) 189 * 190 * Ensure coherency between the Icache and the Dcache in the 191 * region described by start. If you have non-snooping 192 * Harvard caches, you need to implement this function. 193 * 194 * - start - virtual start address 195 * - end - virtual end address 196 */ 197ENTRY(arm1022_coherent_kern_range) 198 /* FALLTHROUGH */ 199 200/* 201 * coherent_user_range(start, end) 202 * 203 * Ensure coherency between the Icache and the Dcache in the 204 * region described by start. If you have non-snooping 205 * Harvard caches, you need to implement this function. 206 * 207 * - start - virtual start address 208 * - end - virtual end address 209 */ 210ENTRY(arm1022_coherent_user_range) 211 mov ip, #0 212 bic r0, r0, #CACHE_DLINESIZE - 1 2131: 214#ifndef CONFIG_CPU_DCACHE_DISABLE 215 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 216#endif 217#ifndef CONFIG_CPU_ICACHE_DISABLE 218 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 219#endif 220 add r0, r0, #CACHE_DLINESIZE 221 cmp r0, r1 222 blo 1b 223 mcr p15, 0, ip, c7, c10, 4 @ drain WB 224 mov pc, lr 225 226/* 227 * flush_kern_dcache_area(void *addr, size_t size) 228 * 229 * Ensure no D cache aliasing occurs, either with itself or 230 * the I cache 231 * 232 * - addr - kernel address 233 * - size - region size 234 */ 235ENTRY(arm1022_flush_kern_dcache_area) 236 mov ip, #0 237#ifndef CONFIG_CPU_DCACHE_DISABLE 238 add r1, r0, r1 2391: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 240 add r0, r0, #CACHE_DLINESIZE 241 cmp r0, r1 242 blo 1b 243#endif 244 mcr p15, 0, ip, c7, c10, 4 @ drain WB 245 mov pc, lr 246 247/* 248 * dma_inv_range(start, end) 249 * 250 * Invalidate (discard) the specified virtual address range. 251 * May not write back any entries. If 'start' or 'end' 252 * are not cache line aligned, those lines must be written 253 * back. 254 * 255 * - start - virtual start address 256 * - end - virtual end address 257 * 258 * (same as v4wb) 259 */ 260arm1022_dma_inv_range: 261 mov ip, #0 262#ifndef CONFIG_CPU_DCACHE_DISABLE 263 tst r0, #CACHE_DLINESIZE - 1 264 bic r0, r0, #CACHE_DLINESIZE - 1 265 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 266 tst r1, #CACHE_DLINESIZE - 1 267 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2681: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 269 add r0, r0, #CACHE_DLINESIZE 270 cmp r0, r1 271 blo 1b 272#endif 273 mcr p15, 0, ip, c7, c10, 4 @ drain WB 274 mov pc, lr 275 276/* 277 * dma_clean_range(start, end) 278 * 279 * Clean the specified virtual address range. 280 * 281 * - start - virtual start address 282 * - end - virtual end address 283 * 284 * (same as v4wb) 285 */ 286arm1022_dma_clean_range: 287 mov ip, #0 288#ifndef CONFIG_CPU_DCACHE_DISABLE 289 bic r0, r0, #CACHE_DLINESIZE - 1 2901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 291 add r0, r0, #CACHE_DLINESIZE 292 cmp r0, r1 293 blo 1b 294#endif 295 mcr p15, 0, ip, c7, c10, 4 @ drain WB 296 mov pc, lr 297 298/* 299 * dma_flush_range(start, end) 300 * 301 * Clean and invalidate the specified virtual address range. 302 * 303 * - start - virtual start address 304 * - end - virtual end address 305 */ 306ENTRY(arm1022_dma_flush_range) 307 mov ip, #0 308#ifndef CONFIG_CPU_DCACHE_DISABLE 309 bic r0, r0, #CACHE_DLINESIZE - 1 3101: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 311 add r0, r0, #CACHE_DLINESIZE 312 cmp r0, r1 313 blo 1b 314#endif 315 mcr p15, 0, ip, c7, c10, 4 @ drain WB 316 mov pc, lr 317 318/* 319 * dma_map_area(start, size, dir) 320 * - start - kernel virtual start address 321 * - size - size of region 322 * - dir - DMA direction 323 */ 324ENTRY(arm1022_dma_map_area) 325 add r1, r1, r0 326 cmp r2, #DMA_TO_DEVICE 327 beq arm1022_dma_clean_range 328 bcs arm1022_dma_inv_range 329 b arm1022_dma_flush_range 330ENDPROC(arm1022_dma_map_area) 331 332/* 333 * dma_unmap_area(start, size, dir) 334 * - start - kernel virtual start address 335 * - size - size of region 336 * - dir - DMA direction 337 */ 338ENTRY(arm1022_dma_unmap_area) 339 mov pc, lr 340ENDPROC(arm1022_dma_unmap_area) 341 342 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 343 define_cache_functions arm1022 344 345 .align 5 346ENTRY(cpu_arm1022_dcache_clean_area) 347#ifndef CONFIG_CPU_DCACHE_DISABLE 348 mov ip, #0 3491: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 350 add r0, r0, #CACHE_DLINESIZE 351 subs r1, r1, #CACHE_DLINESIZE 352 bhi 1b 353#endif 354 mov pc, lr 355 356/* =============================== PageTable ============================== */ 357 358/* 359 * cpu_arm1022_switch_mm(pgd) 360 * 361 * Set the translation base pointer to be as described by pgd. 362 * 363 * pgd: new page tables 364 */ 365 .align 5 366ENTRY(cpu_arm1022_switch_mm) 367#ifdef CONFIG_MMU 368#ifndef CONFIG_CPU_DCACHE_DISABLE 369 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 3701: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 3712: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 372 subs r3, r3, #1 << 26 373 bcs 2b @ entries 63 to 0 374 subs r1, r1, #1 << 5 375 bcs 1b @ segments 15 to 0 376#endif 377 mov r1, #0 378#ifndef CONFIG_CPU_ICACHE_DISABLE 379 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 380#endif 381 mcr p15, 0, r1, c7, c10, 4 @ drain WB 382 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 383 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 384#endif 385 mov pc, lr 386 387/* 388 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 389 * 390 * Set a PTE and flush it out 391 */ 392 .align 5 393ENTRY(cpu_arm1022_set_pte_ext) 394#ifdef CONFIG_MMU 395 armv3_set_pte_ext 396 mov r0, r0 397#ifndef CONFIG_CPU_DCACHE_DISABLE 398 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 399#endif 400#endif /* CONFIG_MMU */ 401 mov pc, lr 402 403 __CPUINIT 404 405 .type __arm1022_setup, #function 406__arm1022_setup: 407 mov r0, #0 408 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 409 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 410#ifdef CONFIG_MMU 411 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 412#endif 413 adr r5, arm1022_crval 414 ldmia r5, {r5, r6} 415 mrc p15, 0, r0, c1, c0 @ get control register v4 416 bic r0, r0, r5 417 orr r0, r0, r6 418#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 419 orr r0, r0, #0x4000 @ .R.............. 420#endif 421 mov pc, lr 422 .size __arm1022_setup, . - __arm1022_setup 423 424 /* 425 * R 426 * .RVI ZFRS BLDP WCAM 427 * .011 1001 ..11 0101 428 * 429 */ 430 .type arm1022_crval, #object 431arm1022_crval: 432 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 433 434 __INITDATA 435 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 436 define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort 437 438 .section ".rodata" 439 440 string cpu_arch_name, "armv5te" 441 string cpu_elf_name, "v5" 442 string cpu_arm1022_name, "ARM1022" 443 444 .align 445 446 .section ".proc.info.init", #alloc, #execinstr 447 448 .type __arm1022_proc_info,#object 449__arm1022_proc_info: 450 .long 0x4105a220 @ ARM 1022E (v5TE) 451 .long 0xff0ffff0 452 .long PMD_TYPE_SECT | \ 453 PMD_BIT4 | \ 454 PMD_SECT_AP_WRITE | \ 455 PMD_SECT_AP_READ 456 .long PMD_TYPE_SECT | \ 457 PMD_BIT4 | \ 458 PMD_SECT_AP_WRITE | \ 459 PMD_SECT_AP_READ 460 b __arm1022_setup 461 .long cpu_arch_name 462 .long cpu_elf_name 463 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 464 .long cpu_arm1022_name 465 .long arm1022_processor_functions 466 .long v4wbi_tlb_fns 467 .long v4wb_user_fns 468 .long arm1022_cache_fns 469 .size __arm1022_proc_info, . - __arm1022_proc_info 470