1/* 2 * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * 14 * These are the low level assembler for performing cache and TLB 15 * functions on the ARM1026EJ-S. 16 */ 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/assembler.h> 20#include <asm/asm-offsets.h> 21#include <asm/hwcap.h> 22#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable.h> 24#include <asm/ptrace.h> 25 26#include "proc-macros.S" 27 28/* 29 * This is the maximum size of an area which will be invalidated 30 * using the single invalidate entry instructions. Anything larger 31 * than this, and we go for the whole cache. 32 * 33 * This value should be chosen such that we choose the cheapest 34 * alternative. 35 */ 36#define MAX_AREA_SIZE 32768 37 38/* 39 * The size of one data cache line. 40 */ 41#define CACHE_DLINESIZE 32 42 43/* 44 * The number of data cache segments. 45 */ 46#define CACHE_DSEGMENTS 16 47 48/* 49 * The number of lines in a cache segment. 50 */ 51#define CACHE_DENTRIES 64 52 53/* 54 * This is the size at which it becomes more efficient to 55 * clean the whole cache, rather than using the individual 56 * cache line maintainence instructions. 57 */ 58#define CACHE_DLIMIT 32768 59 60 .text 61/* 62 * cpu_arm1026_proc_init() 63 */ 64ENTRY(cpu_arm1026_proc_init) 65 mov pc, lr 66 67/* 68 * cpu_arm1026_proc_fin() 69 */ 70ENTRY(cpu_arm1026_proc_fin) 71 stmfd sp!, {lr} 72 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 73 msr cpsr_c, ip 74 bl arm1026_flush_kern_cache_all 75 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 76 bic r0, r0, #0x1000 @ ...i............ 77 bic r0, r0, #0x000e @ ............wca. 78 mcr p15, 0, r0, c1, c0, 0 @ disable caches 79 ldmfd sp!, {pc} 80 81/* 82 * cpu_arm1026_reset(loc) 83 * 84 * Perform a soft reset of the system. Put the CPU into the 85 * same state as it would be if it had been reset, and branch 86 * to what would be the reset vector. 87 * 88 * loc: location to jump to for soft reset 89 */ 90 .align 5 91ENTRY(cpu_arm1026_reset) 92 mov ip, #0 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 94 mcr p15, 0, ip, c7, c10, 4 @ drain WB 95#ifdef CONFIG_MMU 96 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 97#endif 98 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 99 bic ip, ip, #0x000f @ ............wcam 100 bic ip, ip, #0x1100 @ ...i...s........ 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 102 mov pc, r0 103 104/* 105 * cpu_arm1026_do_idle() 106 */ 107 .align 5 108ENTRY(cpu_arm1026_do_idle) 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 110 mov pc, lr 111 112/* ================================= CACHE ================================ */ 113 114 .align 5 115/* 116 * flush_user_cache_all() 117 * 118 * Invalidate all cache entries in a particular address 119 * space. 120 */ 121ENTRY(arm1026_flush_user_cache_all) 122 /* FALLTHROUGH */ 123/* 124 * flush_kern_cache_all() 125 * 126 * Clean and invalidate the entire cache. 127 */ 128ENTRY(arm1026_flush_kern_cache_all) 129 mov r2, #VM_EXEC 130 mov ip, #0 131__flush_whole_cache: 132#ifndef CONFIG_CPU_DCACHE_DISABLE 1331: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 134 bne 1b 135#endif 136 tst r2, #VM_EXEC 137#ifndef CONFIG_CPU_ICACHE_DISABLE 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 139#endif 140 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 141 mov pc, lr 142 143/* 144 * flush_user_cache_range(start, end, flags) 145 * 146 * Invalidate a range of cache entries in the specified 147 * address space. 148 * 149 * - start - start address (inclusive) 150 * - end - end address (exclusive) 151 * - flags - vm_flags for this space 152 */ 153ENTRY(arm1026_flush_user_cache_range) 154 mov ip, #0 155 sub r3, r1, r0 @ calculate total size 156 cmp r3, #CACHE_DLIMIT 157 bhs __flush_whole_cache 158 159#ifndef CONFIG_CPU_DCACHE_DISABLE 1601: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 161 add r0, r0, #CACHE_DLINESIZE 162 cmp r0, r1 163 blo 1b 164#endif 165 tst r2, #VM_EXEC 166#ifndef CONFIG_CPU_ICACHE_DISABLE 167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 168#endif 169 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 170 mov pc, lr 171 172/* 173 * coherent_kern_range(start, end) 174 * 175 * Ensure coherency between the Icache and the Dcache in the 176 * region described by start. If you have non-snooping 177 * Harvard caches, you need to implement this function. 178 * 179 * - start - virtual start address 180 * - end - virtual end address 181 */ 182ENTRY(arm1026_coherent_kern_range) 183 /* FALLTHROUGH */ 184/* 185 * coherent_user_range(start, end) 186 * 187 * Ensure coherency between the Icache and the Dcache in the 188 * region described by start. If you have non-snooping 189 * Harvard caches, you need to implement this function. 190 * 191 * - start - virtual start address 192 * - end - virtual end address 193 */ 194ENTRY(arm1026_coherent_user_range) 195 mov ip, #0 196 bic r0, r0, #CACHE_DLINESIZE - 1 1971: 198#ifndef CONFIG_CPU_DCACHE_DISABLE 199 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 200#endif 201#ifndef CONFIG_CPU_ICACHE_DISABLE 202 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 203#endif 204 add r0, r0, #CACHE_DLINESIZE 205 cmp r0, r1 206 blo 1b 207 mcr p15, 0, ip, c7, c10, 4 @ drain WB 208 mov pc, lr 209 210/* 211 * flush_kern_dcache_area(void *addr, size_t size) 212 * 213 * Ensure no D cache aliasing occurs, either with itself or 214 * the I cache 215 * 216 * - addr - kernel address 217 * - size - region size 218 */ 219ENTRY(arm1026_flush_kern_dcache_area) 220 mov ip, #0 221#ifndef CONFIG_CPU_DCACHE_DISABLE 222 add r1, r0, r1 2231: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 224 add r0, r0, #CACHE_DLINESIZE 225 cmp r0, r1 226 blo 1b 227#endif 228 mcr p15, 0, ip, c7, c10, 4 @ drain WB 229 mov pc, lr 230 231/* 232 * dma_inv_range(start, end) 233 * 234 * Invalidate (discard) the specified virtual address range. 235 * May not write back any entries. If 'start' or 'end' 236 * are not cache line aligned, those lines must be written 237 * back. 238 * 239 * - start - virtual start address 240 * - end - virtual end address 241 * 242 * (same as v4wb) 243 */ 244arm1026_dma_inv_range: 245 mov ip, #0 246#ifndef CONFIG_CPU_DCACHE_DISABLE 247 tst r0, #CACHE_DLINESIZE - 1 248 bic r0, r0, #CACHE_DLINESIZE - 1 249 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 250 tst r1, #CACHE_DLINESIZE - 1 251 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2521: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 253 add r0, r0, #CACHE_DLINESIZE 254 cmp r0, r1 255 blo 1b 256#endif 257 mcr p15, 0, ip, c7, c10, 4 @ drain WB 258 mov pc, lr 259 260/* 261 * dma_clean_range(start, end) 262 * 263 * Clean the specified virtual address range. 264 * 265 * - start - virtual start address 266 * - end - virtual end address 267 * 268 * (same as v4wb) 269 */ 270arm1026_dma_clean_range: 271 mov ip, #0 272#ifndef CONFIG_CPU_DCACHE_DISABLE 273 bic r0, r0, #CACHE_DLINESIZE - 1 2741: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 275 add r0, r0, #CACHE_DLINESIZE 276 cmp r0, r1 277 blo 1b 278#endif 279 mcr p15, 0, ip, c7, c10, 4 @ drain WB 280 mov pc, lr 281 282/* 283 * dma_flush_range(start, end) 284 * 285 * Clean and invalidate the specified virtual address range. 286 * 287 * - start - virtual start address 288 * - end - virtual end address 289 */ 290ENTRY(arm1026_dma_flush_range) 291 mov ip, #0 292#ifndef CONFIG_CPU_DCACHE_DISABLE 293 bic r0, r0, #CACHE_DLINESIZE - 1 2941: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 295 add r0, r0, #CACHE_DLINESIZE 296 cmp r0, r1 297 blo 1b 298#endif 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 300 mov pc, lr 301 302/* 303 * dma_map_area(start, size, dir) 304 * - start - kernel virtual start address 305 * - size - size of region 306 * - dir - DMA direction 307 */ 308ENTRY(arm1026_dma_map_area) 309 add r1, r1, r0 310 cmp r2, #DMA_TO_DEVICE 311 beq arm1026_dma_clean_range 312 bcs arm1026_dma_inv_range 313 b arm1026_dma_flush_range 314ENDPROC(arm1026_dma_map_area) 315 316/* 317 * dma_unmap_area(start, size, dir) 318 * - start - kernel virtual start address 319 * - size - size of region 320 * - dir - DMA direction 321 */ 322ENTRY(arm1026_dma_unmap_area) 323 mov pc, lr 324ENDPROC(arm1026_dma_unmap_area) 325 326ENTRY(arm1026_cache_fns) 327 .long arm1026_flush_kern_cache_all 328 .long arm1026_flush_user_cache_all 329 .long arm1026_flush_user_cache_range 330 .long arm1026_coherent_kern_range 331 .long arm1026_coherent_user_range 332 .long arm1026_flush_kern_dcache_area 333 .long arm1026_dma_map_area 334 .long arm1026_dma_unmap_area 335 .long arm1026_dma_flush_range 336 337 .align 5 338ENTRY(cpu_arm1026_dcache_clean_area) 339#ifndef CONFIG_CPU_DCACHE_DISABLE 340 mov ip, #0 3411: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 342 add r0, r0, #CACHE_DLINESIZE 343 subs r1, r1, #CACHE_DLINESIZE 344 bhi 1b 345#endif 346 mov pc, lr 347 348/* =============================== PageTable ============================== */ 349 350/* 351 * cpu_arm1026_switch_mm(pgd) 352 * 353 * Set the translation base pointer to be as described by pgd. 354 * 355 * pgd: new page tables 356 */ 357 .align 5 358ENTRY(cpu_arm1026_switch_mm) 359#ifdef CONFIG_MMU 360 mov r1, #0 361#ifndef CONFIG_CPU_DCACHE_DISABLE 3621: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 363 bne 1b 364#endif 365#ifndef CONFIG_CPU_ICACHE_DISABLE 366 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 367#endif 368 mcr p15, 0, r1, c7, c10, 4 @ drain WB 369 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 370 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 371#endif 372 mov pc, lr 373 374/* 375 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 376 * 377 * Set a PTE and flush it out 378 */ 379 .align 5 380ENTRY(cpu_arm1026_set_pte_ext) 381#ifdef CONFIG_MMU 382 armv3_set_pte_ext 383 mov r0, r0 384#ifndef CONFIG_CPU_DCACHE_DISABLE 385 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 386#endif 387#endif /* CONFIG_MMU */ 388 mov pc, lr 389 390 391 __INIT 392 393 .type __arm1026_setup, #function 394__arm1026_setup: 395 mov r0, #0 396 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 397 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 398#ifdef CONFIG_MMU 399 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 400 mcr p15, 0, r4, c2, c0 @ load page table pointer 401#endif 402#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 403 mov r0, #4 @ explicitly disable writeback 404 mcr p15, 7, r0, c15, c0, 0 405#endif 406 adr r5, arm1026_crval 407 ldmia r5, {r5, r6} 408 mrc p15, 0, r0, c1, c0 @ get control register v4 409 bic r0, r0, r5 410 orr r0, r0, r6 411#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 412 orr r0, r0, #0x4000 @ .R.. .... .... .... 413#endif 414 mov pc, lr 415 .size __arm1026_setup, . - __arm1026_setup 416 417 /* 418 * R 419 * .RVI ZFRS BLDP WCAM 420 * .011 1001 ..11 0101 421 * 422 */ 423 .type arm1026_crval, #object 424arm1026_crval: 425 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 426 427 __INITDATA 428 429/* 430 * Purpose : Function pointers used to access above functions - all calls 431 * come through these 432 */ 433 .type arm1026_processor_functions, #object 434arm1026_processor_functions: 435 .word v5t_early_abort 436 .word legacy_pabort 437 .word cpu_arm1026_proc_init 438 .word cpu_arm1026_proc_fin 439 .word cpu_arm1026_reset 440 .word cpu_arm1026_do_idle 441 .word cpu_arm1026_dcache_clean_area 442 .word cpu_arm1026_switch_mm 443 .word cpu_arm1026_set_pte_ext 444 .size arm1026_processor_functions, . - arm1026_processor_functions 445 446 .section .rodata 447 448 .type cpu_arch_name, #object 449cpu_arch_name: 450 .asciz "armv5tej" 451 .size cpu_arch_name, . - cpu_arch_name 452 453 .type cpu_elf_name, #object 454cpu_elf_name: 455 .asciz "v5" 456 .size cpu_elf_name, . - cpu_elf_name 457 .align 458 459 .type cpu_arm1026_name, #object 460cpu_arm1026_name: 461 .asciz "ARM1026EJ-S" 462 .size cpu_arm1026_name, . - cpu_arm1026_name 463 464 .align 465 466 .section ".proc.info.init", #alloc, #execinstr 467 468 .type __arm1026_proc_info,#object 469__arm1026_proc_info: 470 .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) 471 .long 0xff0ffff0 472 .long PMD_TYPE_SECT | \ 473 PMD_BIT4 | \ 474 PMD_SECT_AP_WRITE | \ 475 PMD_SECT_AP_READ 476 .long PMD_TYPE_SECT | \ 477 PMD_BIT4 | \ 478 PMD_SECT_AP_WRITE | \ 479 PMD_SECT_AP_READ 480 b __arm1026_setup 481 .long cpu_arch_name 482 .long cpu_elf_name 483 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 484 .long cpu_arm1026_name 485 .long arm1026_processor_functions 486 .long v4wbi_tlb_fns 487 .long v4wb_user_fns 488 .long arm1026_cache_fns 489 .size __arm1026_proc_info, . - __arm1026_proc_info 490