1/* 2 * linux/arch/arm/mm/arm925.S: MMU functions for ARM925 3 * 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * Copyright (C) 2002 RidgeRun, Inc. 7 * Copyright (C) 2002-2003 MontaVista Software, Inc. 8 * 9 * Update for Linux-2.6 and cache flush improvements 10 * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> 11 * 12 * hacked for non-paged-MM by Hyok S. Choi, 2004. 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * 28 * 29 * These are the low level assembler for performing cache and TLB 30 * functions on the arm925. 31 * 32 * CONFIG_CPU_ARM925_CPU_IDLE -> nohlt 33 * 34 * Some additional notes based on deciphering the TI TRM on OMAP-5910: 35 * 36 * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush 37 * entry mode" must be 0 to flush the entries in both segments 38 * at once. This is the default value. See TRM 2-20 and 2-24 for 39 * more information. 40 * 41 * NOTE2: Default is the "D-cache clean and flush entry mode". It looks 42 * like the "Transparent mode" must be on for partial cache flushes 43 * to work in this mode. This mode only works with 16-bit external 44 * memory. See TRM 2-24 for more information. 45 * 46 * NOTE3: Write-back cache flushing seems to be flakey with devices using 47 * direct memory access, such as USB OHCI. The workaround is to use 48 * write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is 49 * the default for OMAP-1510). 50 */ 51 52#include <linux/linkage.h> 53#include <linux/config.h> 54#include <linux/init.h> 55#include <asm/assembler.h> 56#include <asm/pgtable-hwdef.h> 57#include <asm/pgtable.h> 58#include <asm/procinfo.h> 59#include <asm/page.h> 60#include <asm/ptrace.h> 61#include "proc-macros.S" 62 63/* 64 * The size of one data cache line. 65 */ 66#define CACHE_DLINESIZE 16 67 68/* 69 * The number of data cache segments. 70 */ 71#define CACHE_DSEGMENTS 2 72 73/* 74 * The number of lines in a cache segment. 75 */ 76#define CACHE_DENTRIES 256 77 78/* 79 * This is the size at which it becomes more efficient to 80 * clean the whole cache, rather than using the individual 81 * cache line maintainence instructions. 82 */ 83#define CACHE_DLIMIT 8192 84 85 .text 86/* 87 * cpu_arm925_proc_init() 88 */ 89ENTRY(cpu_arm925_proc_init) 90 mov pc, lr 91 92/* 93 * cpu_arm925_proc_fin() 94 */ 95ENTRY(cpu_arm925_proc_fin) 96 stmfd sp!, {lr} 97 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 98 msr cpsr_c, ip 99 bl arm925_flush_kern_cache_all 100 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 101 bic r0, r0, #0x1000 @ ...i............ 102 bic r0, r0, #0x000e @ ............wca. 103 mcr p15, 0, r0, c1, c0, 0 @ disable caches 104 ldmfd sp!, {pc} 105 106/* 107 * cpu_arm925_reset(loc) 108 * 109 * Perform a soft reset of the system. Put the CPU into the 110 * same state as it would be if it had been reset, and branch 111 * to what would be the reset vector. 112 * 113 * loc: location to jump to for soft reset 114 */ 115 .align 5 116ENTRY(cpu_arm925_reset) 117 /* Send software reset to MPU and DSP */ 118 mov ip, #0xff000000 119 orr ip, ip, #0x00fe0000 120 orr ip, ip, #0x0000ce00 121 mov r4, #1 122 strh r4, [ip, #0x10] 123 124 mov ip, #0 125 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 126 mcr p15, 0, ip, c7, c10, 4 @ drain WB 127#ifdef CONFIG_MMU 128 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 129#endif 130 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 131 bic ip, ip, #0x000f @ ............wcam 132 bic ip, ip, #0x1100 @ ...i...s........ 133 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 134 mov pc, r0 135 136/* 137 * cpu_arm925_do_idle() 138 * 139 * Called with IRQs disabled 140 */ 141 .align 10 142ENTRY(cpu_arm925_do_idle) 143 mov r0, #0 144 mrc p15, 0, r1, c1, c0, 0 @ Read control register 145 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 146 bic r2, r1, #1 << 12 147 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 148 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 149 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 150 mov pc, lr 151 152/* 153 * flush_user_cache_all() 154 * 155 * Clean and invalidate all cache entries in a particular 156 * address space. 157 */ 158ENTRY(arm925_flush_user_cache_all) 159 /* FALLTHROUGH */ 160 161/* 162 * flush_kern_cache_all() 163 * 164 * Clean and invalidate the entire cache. 165 */ 166ENTRY(arm925_flush_kern_cache_all) 167 mov r2, #VM_EXEC 168 mov ip, #0 169__flush_whole_cache: 170#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 171 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 172#else 173 /* Flush entries in both segments at once, see NOTE1 above */ 174 mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 1752: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 176 subs r3, r3, #1 << 4 177 bcs 2b @ entries 255 to 0 178#endif 179 tst r2, #VM_EXEC 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 mov pc, lr 183 184/* 185 * flush_user_cache_range(start, end, flags) 186 * 187 * Clean and invalidate a range of cache entries in the 188 * specified address range. 189 * 190 * - start - start address (inclusive) 191 * - end - end address (exclusive) 192 * - flags - vm_flags describing address space 193 */ 194ENTRY(arm925_flush_user_cache_range) 195 mov ip, #0 196 sub r3, r1, r0 @ calculate total size 197 cmp r3, #CACHE_DLIMIT 198 bgt __flush_whole_cache 1991: tst r2, #VM_EXEC 200#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 201 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 202 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 203 add r0, r0, #CACHE_DLINESIZE 204 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 205 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 206 add r0, r0, #CACHE_DLINESIZE 207#else 208 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 209 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 210 add r0, r0, #CACHE_DLINESIZE 211 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 212 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 213 add r0, r0, #CACHE_DLINESIZE 214#endif 215 cmp r0, r1 216 blo 1b 217 tst r2, #VM_EXEC 218 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 219 mov pc, lr 220 221/* 222 * coherent_kern_range(start, end) 223 * 224 * Ensure coherency between the Icache and the Dcache in the 225 * region described by start, end. If you have non-snooping 226 * Harvard caches, you need to implement this function. 227 * 228 * - start - virtual start address 229 * - end - virtual end address 230 */ 231ENTRY(arm925_coherent_kern_range) 232 /* FALLTHROUGH */ 233 234/* 235 * coherent_user_range(start, end) 236 * 237 * Ensure coherency between the Icache and the Dcache in the 238 * region described by start, end. If you have non-snooping 239 * Harvard caches, you need to implement this function. 240 * 241 * - start - virtual start address 242 * - end - virtual end address 243 */ 244ENTRY(arm925_coherent_user_range) 245 bic r0, r0, #CACHE_DLINESIZE - 1 2461: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 247 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 248 add r0, r0, #CACHE_DLINESIZE 249 cmp r0, r1 250 blo 1b 251 mcr p15, 0, r0, c7, c10, 4 @ drain WB 252 mov pc, lr 253 254/* 255 * flush_kern_dcache_page(void *page) 256 * 257 * Ensure no D cache aliasing occurs, either with itself or 258 * the I cache 259 * 260 * - addr - page aligned address 261 */ 262ENTRY(arm925_flush_kern_dcache_page) 263 add r1, r0, #PAGE_SZ 2641: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 265 add r0, r0, #CACHE_DLINESIZE 266 cmp r0, r1 267 blo 1b 268 mov r0, #0 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 270 mcr p15, 0, r0, c7, c10, 4 @ drain WB 271 mov pc, lr 272 273/* 274 * dma_inv_range(start, end) 275 * 276 * Invalidate (discard) the specified virtual address range. 277 * May not write back any entries. If 'start' or 'end' 278 * are not cache line aligned, those lines must be written 279 * back. 280 * 281 * - start - virtual start address 282 * - end - virtual end address 283 * 284 * (same as v4wb) 285 */ 286ENTRY(arm925_dma_inv_range) 287#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 288 tst r0, #CACHE_DLINESIZE - 1 289 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 290 tst r1, #CACHE_DLINESIZE - 1 291 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 292#endif 293 bic r0, r0, #CACHE_DLINESIZE - 1 2941: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 295 add r0, r0, #CACHE_DLINESIZE 296 cmp r0, r1 297 blo 1b 298 mcr p15, 0, r0, c7, c10, 4 @ drain WB 299 mov pc, lr 300 301/* 302 * dma_clean_range(start, end) 303 * 304 * Clean the specified virtual address range. 305 * 306 * - start - virtual start address 307 * - end - virtual end address 308 * 309 * (same as v4wb) 310 */ 311ENTRY(arm925_dma_clean_range) 312#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 313 bic r0, r0, #CACHE_DLINESIZE - 1 3141: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 315 add r0, r0, #CACHE_DLINESIZE 316 cmp r0, r1 317 blo 1b 318#endif 319 mcr p15, 0, r0, c7, c10, 4 @ drain WB 320 mov pc, lr 321 322/* 323 * dma_flush_range(start, end) 324 * 325 * Clean and invalidate the specified virtual address range. 326 * 327 * - start - virtual start address 328 * - end - virtual end address 329 */ 330ENTRY(arm925_dma_flush_range) 331 bic r0, r0, #CACHE_DLINESIZE - 1 3321: 333#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 334 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 335#else 336 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 337#endif 338 add r0, r0, #CACHE_DLINESIZE 339 cmp r0, r1 340 blo 1b 341 mcr p15, 0, r0, c7, c10, 4 @ drain WB 342 mov pc, lr 343 344ENTRY(arm925_cache_fns) 345 .long arm925_flush_kern_cache_all 346 .long arm925_flush_user_cache_all 347 .long arm925_flush_user_cache_range 348 .long arm925_coherent_kern_range 349 .long arm925_coherent_user_range 350 .long arm925_flush_kern_dcache_page 351 .long arm925_dma_inv_range 352 .long arm925_dma_clean_range 353 .long arm925_dma_flush_range 354 355ENTRY(cpu_arm925_dcache_clean_area) 356#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 3571: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 358 add r0, r0, #CACHE_DLINESIZE 359 subs r1, r1, #CACHE_DLINESIZE 360 bhi 1b 361#endif 362 mcr p15, 0, r0, c7, c10, 4 @ drain WB 363 mov pc, lr 364 365/* =============================== PageTable ============================== */ 366 367/* 368 * cpu_arm925_switch_mm(pgd) 369 * 370 * Set the translation base pointer to be as described by pgd. 371 * 372 * pgd: new page tables 373 */ 374 .align 5 375ENTRY(cpu_arm925_switch_mm) 376#ifdef CONFIG_MMU 377 mov ip, #0 378#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 379 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 380#else 381 /* Flush entries in bothe segments at once, see NOTE1 above */ 382 mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 3832: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 384 subs r3, r3, #1 << 4 385 bcs 2b @ entries 255 to 0 386#endif 387 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 388 mcr p15, 0, ip, c7, c10, 4 @ drain WB 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 390 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 391#endif 392 mov pc, lr 393 394/* 395 * cpu_arm925_set_pte(ptep, pte) 396 * 397 * Set a PTE and flush it out 398 */ 399 .align 5 400ENTRY(cpu_arm925_set_pte) 401#ifdef CONFIG_MMU 402 str r1, [r0], #-2048 @ linux version 403 404 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 405 406 bic r2, r1, #PTE_SMALL_AP_MASK 407 bic r2, r2, #PTE_TYPE_MASK 408 orr r2, r2, #PTE_TYPE_SMALL 409 410 tst r1, #L_PTE_USER @ User? 411 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 412 413 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 414 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 415 416 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 417 movne r2, #0 418 419#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 420 eor r3, r2, #0x0a @ C & small page? 421 tst r3, #0x0b 422 biceq r2, r2, #4 423#endif 424 str r2, [r0] @ hardware version 425 mov r0, r0 426#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 427 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 428#endif 429 mcr p15, 0, r0, c7, c10, 4 @ drain WB 430#endif /* CONFIG_MMU */ 431 mov pc, lr 432 433 __INIT 434 435 .type __arm925_setup, #function 436__arm925_setup: 437 mov r0, #0 438#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE) 439 orr r0,r0,#1 << 7 440#endif 441 442 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ 443 orr r0,r0,#1 << 1 @ transparent mode on 444 mcr p15, 0, r0, c15, c1, 0 @ write TI config register 445 446 mov r0, #0 447 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 448 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 449#ifdef CONFIG_MMU 450 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 451#endif 452 453#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 454 mov r0, #4 @ disable write-back on caches explicitly 455 mcr p15, 7, r0, c15, c0, 0 456#endif 457 458 mrc p15, 0, r0, c1, c0 @ get control register v4 459 ldr r5, arm925_cr1_clear 460 bic r0, r0, r5 461 ldr r5, arm925_cr1_set 462 orr r0, r0, r5 463#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 464 orr r0, r0, #0x4000 @ .1.. .... .... .... 465#endif 466 mov pc, lr 467 .size __arm925_setup, . - __arm925_setup 468 469 /* 470 * R 471 * .RVI ZFRS BLDP WCAM 472 * .011 0001 ..11 1101 473 * 474 */ 475 .type arm925_cr1_clear, #object 476 .type arm925_cr1_set, #object 477arm925_cr1_clear: 478 .word 0x7f3f 479arm925_cr1_set: 480 .word 0x313d 481 482 __INITDATA 483 484/* 485 * Purpose : Function pointers used to access above functions - all calls 486 * come through these 487 */ 488 .type arm925_processor_functions, #object 489arm925_processor_functions: 490 .word v4t_early_abort 491 .word cpu_arm925_proc_init 492 .word cpu_arm925_proc_fin 493 .word cpu_arm925_reset 494 .word cpu_arm925_do_idle 495 .word cpu_arm925_dcache_clean_area 496 .word cpu_arm925_switch_mm 497 .word cpu_arm925_set_pte 498 .size arm925_processor_functions, . - arm925_processor_functions 499 500 .section ".rodata" 501 502 .type cpu_arch_name, #object 503cpu_arch_name: 504 .asciz "armv4t" 505 .size cpu_arch_name, . - cpu_arch_name 506 507 .type cpu_elf_name, #object 508cpu_elf_name: 509 .asciz "v4" 510 .size cpu_elf_name, . - cpu_elf_name 511 512 .type cpu_arm925_name, #object 513cpu_arm925_name: 514 .ascii "ARM925T" 515#ifndef CONFIG_CPU_ICACHE_DISABLE 516 .ascii "i" 517#endif 518#ifndef CONFIG_CPU_DCACHE_DISABLE 519 .ascii "d" 520#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 521 .ascii "(wt)" 522#else 523 .ascii "(wb)" 524#endif 525#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 526 .ascii "RR" 527#endif 528#endif 529 .ascii "\0" 530 .size cpu_arm925_name, . - cpu_arm925_name 531 532 .align 533 534 .section ".proc.info.init", #alloc, #execinstr 535 536 .type __arm925_proc_info,#object 537__arm925_proc_info: 538 .long 0x54029250 539 .long 0xfffffff0 540 .long PMD_TYPE_SECT | \ 541 PMD_BIT4 | \ 542 PMD_SECT_AP_WRITE | \ 543 PMD_SECT_AP_READ 544 b __arm925_setup 545 .long cpu_arch_name 546 .long cpu_elf_name 547 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 548 .long cpu_arm925_name 549 .long arm925_processor_functions 550 .long v4wbi_tlb_fns 551 .long v4wb_user_fns 552 .long arm925_cache_fns 553 .size __arm925_proc_info, . - __arm925_proc_info 554 555 .type __arm915_proc_info,#object 556__arm915_proc_info: 557 .long 0x54029150 558 .long 0xfffffff0 559 .long PMD_TYPE_SECT | \ 560 PMD_BIT4 | \ 561 PMD_SECT_AP_WRITE | \ 562 PMD_SECT_AP_READ 563 b __arm925_setup 564 .long cpu_arch_name 565 .long cpu_elf_name 566 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 567 .long cpu_arm925_name 568 .long arm925_processor_functions 569 .long v4wbi_tlb_fns 570 .long v4wb_user_fns 571 .long arm925_cache_fns 572 .size __arm925_proc_info, . - __arm925_proc_info 573