1/* 2 * linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * These are the low level assembler for performing cache and TLB 23 * functions on the arm1020e. 24 * 25 * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt 26 */ 27#include <linux/linkage.h> 28#include <linux/config.h> 29#include <linux/init.h> 30#include <asm/assembler.h> 31#include <asm/asm-offsets.h> 32#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable.h> 34#include <asm/procinfo.h> 35#include <asm/ptrace.h> 36 37/* 38 * This is the maximum size of an area which will be invalidated 39 * using the single invalidate entry instructions. Anything larger 40 * than this, and we go for the whole cache. 41 * 42 * This value should be chosen such that we choose the cheapest 43 * alternative. 44 */ 45#define MAX_AREA_SIZE 32768 46 47/* 48 * The size of one data cache line. 49 */ 50#define CACHE_DLINESIZE 32 51 52/* 53 * The number of data cache segments. 54 */ 55#define CACHE_DSEGMENTS 16 56 57/* 58 * The number of lines in a cache segment. 59 */ 60#define CACHE_DENTRIES 64 61 62/* 63 * This is the size at which it becomes more efficient to 64 * clean the whole cache, rather than using the individual 65 * cache line maintainence instructions. 66 */ 67#define CACHE_DLIMIT 32768 68 69 .text 70/* 71 * cpu_arm1020e_proc_init() 72 */ 73ENTRY(cpu_arm1020e_proc_init) 74 mov pc, lr 75 76/* 77 * cpu_arm1020e_proc_fin() 78 */ 79ENTRY(cpu_arm1020e_proc_fin) 80 stmfd sp!, {lr} 81 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 82 msr cpsr_c, ip 83 bl arm1020e_flush_kern_cache_all 84 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 85 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x000e @ ............wca. 87 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 ldmfd sp!, {pc} 89 90/* 91 * cpu_arm1020e_reset(loc) 92 * 93 * Perform a soft reset of the system. Put the CPU into the 94 * same state as it would be if it had been reset, and branch 95 * to what would be the reset vector. 96 * 97 * loc: location to jump to for soft reset 98 */ 99 .align 5 100ENTRY(cpu_arm1020e_reset) 101 mov ip, #0 102 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 103 mcr p15, 0, ip, c7, c10, 4 @ drain WB 104 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 105 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 106 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x1100 @ ...i...s........ 108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mov pc, r0 110 111/* 112 * cpu_arm1020e_do_idle() 113 */ 114 .align 5 115ENTRY(cpu_arm1020e_do_idle) 116 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 117 mov pc, lr 118 119/* ================================= CACHE ================================ */ 120 121 .align 5 122/* 123 * flush_user_cache_all() 124 * 125 * Invalidate all cache entries in a particular address 126 * space. 127 */ 128ENTRY(arm1020e_flush_user_cache_all) 129 /* FALLTHROUGH */ 130/* 131 * flush_kern_cache_all() 132 * 133 * Clean and invalidate the entire cache. 134 */ 135ENTRY(arm1020e_flush_kern_cache_all) 136 mov r2, #VM_EXEC 137 mov ip, #0 138__flush_whole_cache: 139#ifndef CONFIG_CPU_DCACHE_DISABLE 140 mcr p15, 0, ip, c7, c10, 4 @ drain WB 141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1432: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 144 subs r3, r3, #1 << 26 145 bcs 2b @ entries 63 to 0 146 subs r1, r1, #1 << 5 147 bcs 1b @ segments 15 to 0 148#endif 149 tst r2, #VM_EXEC 150#ifndef CONFIG_CPU_ICACHE_DISABLE 151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152#endif 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 154 mov pc, lr 155 156/* 157 * flush_user_cache_range(start, end, flags) 158 * 159 * Invalidate a range of cache entries in the specified 160 * address space. 161 * 162 * - start - start address (inclusive) 163 * - end - end address (exclusive) 164 * - flags - vm_flags for this space 165 */ 166ENTRY(arm1020e_flush_user_cache_range) 167 mov ip, #0 168 sub r3, r1, r0 @ calculate total size 169 cmp r3, #CACHE_DLIMIT 170 bhs __flush_whole_cache 171 172#ifndef CONFIG_CPU_DCACHE_DISABLE 1731: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 174 add r0, r0, #CACHE_DLINESIZE 175 cmp r0, r1 176 blo 1b 177#endif 178 tst r2, #VM_EXEC 179#ifndef CONFIG_CPU_ICACHE_DISABLE 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 181#endif 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 183 mov pc, lr 184 185/* 186 * coherent_kern_range(start, end) 187 * 188 * Ensure coherency between the Icache and the Dcache in the 189 * region described by start. If you have non-snooping 190 * Harvard caches, you need to implement this function. 191 * 192 * - start - virtual start address 193 * - end - virtual end address 194 */ 195ENTRY(arm1020e_coherent_kern_range) 196 /* FALLTHROUGH */ 197/* 198 * coherent_user_range(start, end) 199 * 200 * Ensure coherency between the Icache and the Dcache in the 201 * region described by start. If you have non-snooping 202 * Harvard caches, you need to implement this function. 203 * 204 * - start - virtual start address 205 * - end - virtual end address 206 */ 207ENTRY(arm1020e_coherent_user_range) 208 mov ip, #0 209 bic r0, r0, #CACHE_DLINESIZE - 1 2101: 211#ifndef CONFIG_CPU_DCACHE_DISABLE 212 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 213#endif 214#ifndef CONFIG_CPU_ICACHE_DISABLE 215 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 216#endif 217 add r0, r0, #CACHE_DLINESIZE 218 cmp r0, r1 219 blo 1b 220 mcr p15, 0, ip, c7, c10, 4 @ drain WB 221 mov pc, lr 222 223/* 224 * flush_kern_dcache_page(void *page) 225 * 226 * Ensure no D cache aliasing occurs, either with itself or 227 * the I cache 228 * 229 * - page - page aligned address 230 */ 231ENTRY(arm1020e_flush_kern_dcache_page) 232 mov ip, #0 233#ifndef CONFIG_CPU_DCACHE_DISABLE 234 add r1, r0, #PAGE_SZ 2351: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 236 add r0, r0, #CACHE_DLINESIZE 237 cmp r0, r1 238 blo 1b 239#endif 240 mcr p15, 0, ip, c7, c10, 4 @ drain WB 241 mov pc, lr 242 243/* 244 * dma_inv_range(start, end) 245 * 246 * Invalidate (discard) the specified virtual address range. 247 * May not write back any entries. If 'start' or 'end' 248 * are not cache line aligned, those lines must be written 249 * back. 250 * 251 * - start - virtual start address 252 * - end - virtual end address 253 * 254 * (same as v4wb) 255 */ 256ENTRY(arm1020e_dma_inv_range) 257 mov ip, #0 258#ifndef CONFIG_CPU_DCACHE_DISABLE 259 tst r0, #CACHE_DLINESIZE - 1 260 bic r0, r0, #CACHE_DLINESIZE - 1 261 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 262 tst r1, #CACHE_DLINESIZE - 1 263 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2641: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 265 add r0, r0, #CACHE_DLINESIZE 266 cmp r0, r1 267 blo 1b 268#endif 269 mcr p15, 0, ip, c7, c10, 4 @ drain WB 270 mov pc, lr 271 272/* 273 * dma_clean_range(start, end) 274 * 275 * Clean the specified virtual address range. 276 * 277 * - start - virtual start address 278 * - end - virtual end address 279 * 280 * (same as v4wb) 281 */ 282ENTRY(arm1020e_dma_clean_range) 283 mov ip, #0 284#ifndef CONFIG_CPU_DCACHE_DISABLE 285 bic r0, r0, #CACHE_DLINESIZE - 1 2861: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 287 add r0, r0, #CACHE_DLINESIZE 288 cmp r0, r1 289 blo 1b 290#endif 291 mcr p15, 0, ip, c7, c10, 4 @ drain WB 292 mov pc, lr 293 294/* 295 * dma_flush_range(start, end) 296 * 297 * Clean and invalidate the specified virtual address range. 298 * 299 * - start - virtual start address 300 * - end - virtual end address 301 */ 302ENTRY(arm1020e_dma_flush_range) 303 mov ip, #0 304#ifndef CONFIG_CPU_DCACHE_DISABLE 305 bic r0, r0, #CACHE_DLINESIZE - 1 3061: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 307 add r0, r0, #CACHE_DLINESIZE 308 cmp r0, r1 309 blo 1b 310#endif 311 mcr p15, 0, ip, c7, c10, 4 @ drain WB 312 mov pc, lr 313 314ENTRY(arm1020e_cache_fns) 315 .long arm1020e_flush_kern_cache_all 316 .long arm1020e_flush_user_cache_all 317 .long arm1020e_flush_user_cache_range 318 .long arm1020e_coherent_kern_range 319 .long arm1020e_coherent_user_range 320 .long arm1020e_flush_kern_dcache_page 321 .long arm1020e_dma_inv_range 322 .long arm1020e_dma_clean_range 323 .long arm1020e_dma_flush_range 324 325 .align 5 326ENTRY(cpu_arm1020e_dcache_clean_area) 327#ifndef CONFIG_CPU_DCACHE_DISABLE 328 mov ip, #0 3291: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 330 add r0, r0, #CACHE_DLINESIZE 331 subs r1, r1, #CACHE_DLINESIZE 332 bhi 1b 333#endif 334 mov pc, lr 335 336/* =============================== PageTable ============================== */ 337 338/* 339 * cpu_arm1020e_switch_mm(pgd) 340 * 341 * Set the translation base pointer to be as described by pgd. 342 * 343 * pgd: new page tables 344 */ 345 .align 5 346ENTRY(cpu_arm1020e_switch_mm) 347#ifndef CONFIG_CPU_DCACHE_DISABLE 348 mcr p15, 0, r3, c7, c10, 4 349 mov r1, #0xF @ 16 segments 3501: mov r3, #0x3F @ 64 entries 3512: mov ip, r3, LSL #26 @ shift up entry 352 orr ip, ip, r1, LSL #5 @ shift in/up index 353 mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry 354 mov ip, #0 355 subs r3, r3, #1 356 cmp r3, #0 357 bge 2b @ entries 3F to 0 358 subs r1, r1, #1 359 cmp r1, #0 360 bge 1b @ segments 15 to 0 361 362#endif 363 mov r1, #0 364#ifndef CONFIG_CPU_ICACHE_DISABLE 365 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 366#endif 367 mcr p15, 0, r1, c7, c10, 4 @ drain WB 368 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 369 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 370 mov pc, lr 371 372/* 373 * cpu_arm1020e_set_pte(ptep, pte) 374 * 375 * Set a PTE and flush it out 376 */ 377 .align 5 378ENTRY(cpu_arm1020e_set_pte) 379 str r1, [r0], #-2048 @ linux version 380 381 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 382 383 bic r2, r1, #PTE_SMALL_AP_MASK 384 bic r2, r2, #PTE_TYPE_MASK 385 orr r2, r2, #PTE_TYPE_SMALL 386 387 tst r1, #L_PTE_USER @ User? 388 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 389 390 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 391 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 392 393 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 394 movne r2, #0 395 396#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 397 eor r3, r1, #0x0a @ C & small page? 398 tst r3, #0x0b 399 biceq r2, r2, #4 400#endif 401 str r2, [r0] @ hardware version 402 mov r0, r0 403#ifndef CONFIG_CPU_DCACHE_DISABLE 404 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 405#endif 406 mov pc, lr 407 408 __INIT 409 410 .type __arm1020e_setup, #function 411__arm1020e_setup: 412 mov r0, #0 413 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 414 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 415 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 416 mrc p15, 0, r0, c1, c0 @ get control register v4 417 ldr r5, arm1020e_cr1_clear 418 bic r0, r0, r5 419 ldr r5, arm1020e_cr1_set 420 orr r0, r0, r5 421#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 422 orr r0, r0, #0x4000 @ .R.. .... .... .... 423#endif 424 mov pc, lr 425 .size __arm1020e_setup, . - __arm1020e_setup 426 427 /* 428 * R 429 * .RVI ZFRS BLDP WCAM 430 * .011 1001 ..11 0101 431 */ 432 .type arm1020e_cr1_clear, #object 433 .type arm1020e_cr1_set, #object 434arm1020e_cr1_clear: 435 .word 0x5f3f 436arm1020e_cr1_set: 437 .word 0x3935 438 439 __INITDATA 440 441/* 442 * Purpose : Function pointers used to access above functions - all calls 443 * come through these 444 */ 445 .type arm1020e_processor_functions, #object 446arm1020e_processor_functions: 447 .word v4t_early_abort 448 .word cpu_arm1020e_proc_init 449 .word cpu_arm1020e_proc_fin 450 .word cpu_arm1020e_reset 451 .word cpu_arm1020e_do_idle 452 .word cpu_arm1020e_dcache_clean_area 453 .word cpu_arm1020e_switch_mm 454 .word cpu_arm1020e_set_pte 455 .size arm1020e_processor_functions, . - arm1020e_processor_functions 456 457 .section ".rodata" 458 459 .type cpu_arch_name, #object 460cpu_arch_name: 461 .asciz "armv5te" 462 .size cpu_arch_name, . - cpu_arch_name 463 464 .type cpu_elf_name, #object 465cpu_elf_name: 466 .asciz "v5" 467 .size cpu_elf_name, . - cpu_elf_name 468 469 .type cpu_arm1020e_name, #object 470cpu_arm1020e_name: 471 .ascii "ARM1020E" 472#ifndef CONFIG_CPU_ICACHE_DISABLE 473 .ascii "i" 474#endif 475#ifndef CONFIG_CPU_DCACHE_DISABLE 476 .ascii "d" 477#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 478 .ascii "(wt)" 479#else 480 .ascii "(wb)" 481#endif 482#endif 483#ifndef CONFIG_CPU_BPREDICT_DISABLE 484 .ascii "B" 485#endif 486#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 487 .ascii "RR" 488#endif 489 .ascii "\0" 490 .size cpu_arm1020e_name, . - cpu_arm1020e_name 491 492 .align 493 494 .section ".proc.info.init", #alloc, #execinstr 495 496 .type __arm1020e_proc_info,#object 497__arm1020e_proc_info: 498 .long 0x4105a200 @ ARM 1020TE (Architecture v5TE) 499 .long 0xff0ffff0 500 .long PMD_TYPE_SECT | \ 501 PMD_BIT4 | \ 502 PMD_SECT_AP_WRITE | \ 503 PMD_SECT_AP_READ 504 b __arm1020e_setup 505 .long cpu_arch_name 506 .long cpu_elf_name 507 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 508 .long cpu_arm1020e_name 509 .long arm1020e_processor_functions 510 .long v4wbi_tlb_fns 511 .long v4wb_user_fns 512 .long arm1020e_cache_fns 513 .size __arm1020e_proc_info, . - __arm1020e_proc_info 514