1/* 2 * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920 3 * 4 * Copyright (C) 1999,2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 * 23 * These are the low level assembler for performing cache and TLB 24 * functions on the arm920. 25 * 26 * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt 27 */ 28#include <linux/linkage.h> 29#include <linux/init.h> 30#include <asm/assembler.h> 31#include <asm/hwcap.h> 32#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable.h> 34#include <asm/page.h> 35#include <asm/ptrace.h> 36#include "proc-macros.S" 37 38/* 39 * The size of one data cache line. 40 */ 41#define CACHE_DLINESIZE 32 42 43/* 44 * The number of data cache segments. 45 */ 46#define CACHE_DSEGMENTS 8 47 48/* 49 * The number of lines in a cache segment. 50 */ 51#define CACHE_DENTRIES 64 52 53/* 54 * This is the size at which it becomes more efficient to 55 * clean the whole cache, rather than using the individual 56 * cache line maintainence instructions. 57 */ 58#define CACHE_DLIMIT 65536 59 60 61 .text 62/* 63 * cpu_arm920_proc_init() 64 */ 65ENTRY(cpu_arm920_proc_init) 66 mov pc, lr 67 68/* 69 * cpu_arm920_proc_fin() 70 */ 71ENTRY(cpu_arm920_proc_fin) 72 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 73 bic r0, r0, #0x1000 @ ...i............ 74 bic r0, r0, #0x000e @ ............wca. 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 76 mov pc, lr 77 78/* 79 * cpu_arm920_reset(loc) 80 * 81 * Perform a soft reset of the system. Put the CPU into the 82 * same state as it would be if it had been reset, and branch 83 * to what would be the reset vector. 84 * 85 * loc: location to jump to for soft reset 86 */ 87 .align 5 88ENTRY(cpu_arm920_reset) 89 mov ip, #0 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 91 mcr p15, 0, ip, c7, c10, 4 @ drain WB 92#ifdef CONFIG_MMU 93 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 94#endif 95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 96 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x1100 @ ...i...s........ 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mov pc, r0 100 101/* 102 * cpu_arm920_do_idle() 103 */ 104 .align 5 105ENTRY(cpu_arm920_do_idle) 106 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 mov pc, lr 108 109 110#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 111 112/* 113 * flush_user_cache_all() 114 * 115 * Invalidate all cache entries in a particular address 116 * space. 117 */ 118ENTRY(arm920_flush_user_cache_all) 119 /* FALLTHROUGH */ 120 121/* 122 * flush_kern_cache_all() 123 * 124 * Clean and invalidate the entire cache. 125 */ 126ENTRY(arm920_flush_kern_cache_all) 127 mov r2, #VM_EXEC 128 mov ip, #0 129__flush_whole_cache: 130 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1311: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1322: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 133 subs r3, r3, #1 << 26 134 bcs 2b @ entries 63 to 0 135 subs r1, r1, #1 << 5 136 bcs 1b @ segments 7 to 0 137 tst r2, #VM_EXEC 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 140 mov pc, lr 141 142/* 143 * flush_user_cache_range(start, end, flags) 144 * 145 * Invalidate a range of cache entries in the specified 146 * address space. 147 * 148 * - start - start address (inclusive) 149 * - end - end address (exclusive) 150 * - flags - vm_flags for address space 151 */ 152ENTRY(arm920_flush_user_cache_range) 153 mov ip, #0 154 sub r3, r1, r0 @ calculate total size 155 cmp r3, #CACHE_DLIMIT 156 bhs __flush_whole_cache 157 1581: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 159 tst r2, #VM_EXEC 160 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 161 add r0, r0, #CACHE_DLINESIZE 162 cmp r0, r1 163 blo 1b 164 tst r2, #VM_EXEC 165 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 166 mov pc, lr 167 168/* 169 * coherent_kern_range(start, end) 170 * 171 * Ensure coherency between the Icache and the Dcache in the 172 * region described by start, end. If you have non-snooping 173 * Harvard caches, you need to implement this function. 174 * 175 * - start - virtual start address 176 * - end - virtual end address 177 */ 178ENTRY(arm920_coherent_kern_range) 179 /* FALLTHROUGH */ 180 181/* 182 * coherent_user_range(start, end) 183 * 184 * Ensure coherency between the Icache and the Dcache in the 185 * region described by start, end. If you have non-snooping 186 * Harvard caches, you need to implement this function. 187 * 188 * - start - virtual start address 189 * - end - virtual end address 190 */ 191ENTRY(arm920_coherent_user_range) 192 bic r0, r0, #CACHE_DLINESIZE - 1 1931: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 194 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 195 add r0, r0, #CACHE_DLINESIZE 196 cmp r0, r1 197 blo 1b 198 mcr p15, 0, r0, c7, c10, 4 @ drain WB 199 mov pc, lr 200 201/* 202 * flush_kern_dcache_area(void *addr, size_t size) 203 * 204 * Ensure no D cache aliasing occurs, either with itself or 205 * the I cache 206 * 207 * - addr - kernel address 208 * - size - region size 209 */ 210ENTRY(arm920_flush_kern_dcache_area) 211 add r1, r0, r1 2121: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 213 add r0, r0, #CACHE_DLINESIZE 214 cmp r0, r1 215 blo 1b 216 mov r0, #0 217 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 218 mcr p15, 0, r0, c7, c10, 4 @ drain WB 219 mov pc, lr 220 221/* 222 * dma_inv_range(start, end) 223 * 224 * Invalidate (discard) the specified virtual address range. 225 * May not write back any entries. If 'start' or 'end' 226 * are not cache line aligned, those lines must be written 227 * back. 228 * 229 * - start - virtual start address 230 * - end - virtual end address 231 * 232 * (same as v4wb) 233 */ 234arm920_dma_inv_range: 235 tst r0, #CACHE_DLINESIZE - 1 236 bic r0, r0, #CACHE_DLINESIZE - 1 237 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 238 tst r1, #CACHE_DLINESIZE - 1 239 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2401: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 241 add r0, r0, #CACHE_DLINESIZE 242 cmp r0, r1 243 blo 1b 244 mcr p15, 0, r0, c7, c10, 4 @ drain WB 245 mov pc, lr 246 247/* 248 * dma_clean_range(start, end) 249 * 250 * Clean the specified virtual address range. 251 * 252 * - start - virtual start address 253 * - end - virtual end address 254 * 255 * (same as v4wb) 256 */ 257arm920_dma_clean_range: 258 bic r0, r0, #CACHE_DLINESIZE - 1 2591: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 260 add r0, r0, #CACHE_DLINESIZE 261 cmp r0, r1 262 blo 1b 263 mcr p15, 0, r0, c7, c10, 4 @ drain WB 264 mov pc, lr 265 266/* 267 * dma_flush_range(start, end) 268 * 269 * Clean and invalidate the specified virtual address range. 270 * 271 * - start - virtual start address 272 * - end - virtual end address 273 */ 274ENTRY(arm920_dma_flush_range) 275 bic r0, r0, #CACHE_DLINESIZE - 1 2761: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 277 add r0, r0, #CACHE_DLINESIZE 278 cmp r0, r1 279 blo 1b 280 mcr p15, 0, r0, c7, c10, 4 @ drain WB 281 mov pc, lr 282 283/* 284 * dma_map_area(start, size, dir) 285 * - start - kernel virtual start address 286 * - size - size of region 287 * - dir - DMA direction 288 */ 289ENTRY(arm920_dma_map_area) 290 add r1, r1, r0 291 cmp r2, #DMA_TO_DEVICE 292 beq arm920_dma_clean_range 293 bcs arm920_dma_inv_range 294 b arm920_dma_flush_range 295ENDPROC(arm920_dma_map_area) 296 297/* 298 * dma_unmap_area(start, size, dir) 299 * - start - kernel virtual start address 300 * - size - size of region 301 * - dir - DMA direction 302 */ 303ENTRY(arm920_dma_unmap_area) 304 mov pc, lr 305ENDPROC(arm920_dma_unmap_area) 306 307ENTRY(arm920_cache_fns) 308 .long arm920_flush_kern_cache_all 309 .long arm920_flush_user_cache_all 310 .long arm920_flush_user_cache_range 311 .long arm920_coherent_kern_range 312 .long arm920_coherent_user_range 313 .long arm920_flush_kern_dcache_area 314 .long arm920_dma_map_area 315 .long arm920_dma_unmap_area 316 .long arm920_dma_flush_range 317 318#endif 319 320 321ENTRY(cpu_arm920_dcache_clean_area) 3221: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 323 add r0, r0, #CACHE_DLINESIZE 324 subs r1, r1, #CACHE_DLINESIZE 325 bhi 1b 326 mov pc, lr 327 328/* =============================== PageTable ============================== */ 329 330/* 331 * cpu_arm920_switch_mm(pgd) 332 * 333 * Set the translation base pointer to be as described by pgd. 334 * 335 * pgd: new page tables 336 */ 337 .align 5 338ENTRY(cpu_arm920_switch_mm) 339#ifdef CONFIG_MMU 340 mov ip, #0 341#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 342 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 343#else 344@ && 'Clean & Invalidate whole DCache' 345@ && Re-written to use Index Ops. 346@ && Uses registers r1, r3 and ip 347 348 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 3491: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 3502: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 351 subs r3, r3, #1 << 26 352 bcs 2b @ entries 63 to 0 353 subs r1, r1, #1 << 5 354 bcs 1b @ segments 7 to 0 355#endif 356 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 357 mcr p15, 0, ip, c7, c10, 4 @ drain WB 358 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 359 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 360#endif 361 mov pc, lr 362 363/* 364 * cpu_arm920_set_pte(ptep, pte, ext) 365 * 366 * Set a PTE and flush it out 367 */ 368 .align 5 369ENTRY(cpu_arm920_set_pte_ext) 370#ifdef CONFIG_MMU 371 armv3_set_pte_ext 372 mov r0, r0 373 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 374 mcr p15, 0, r0, c7, c10, 4 @ drain WB 375#endif 376 mov pc, lr 377 378 __INIT 379 380 .type __arm920_setup, #function 381__arm920_setup: 382 mov r0, #0 383 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 384 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 385#ifdef CONFIG_MMU 386 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 387#endif 388 adr r5, arm920_crval 389 ldmia r5, {r5, r6} 390 mrc p15, 0, r0, c1, c0 @ get control register v4 391 bic r0, r0, r5 392 orr r0, r0, r6 393 mov pc, lr 394 .size __arm920_setup, . - __arm920_setup 395 396 /* 397 * R 398 * .RVI ZFRS BLDP WCAM 399 * ..11 0001 ..11 0101 400 * 401 */ 402 .type arm920_crval, #object 403arm920_crval: 404 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 405 406 __INITDATA 407 408/* 409 * Purpose : Function pointers used to access above functions - all calls 410 * come through these 411 */ 412 .type arm920_processor_functions, #object 413arm920_processor_functions: 414 .word v4t_early_abort 415 .word legacy_pabort 416 .word cpu_arm920_proc_init 417 .word cpu_arm920_proc_fin 418 .word cpu_arm920_reset 419 .word cpu_arm920_do_idle 420 .word cpu_arm920_dcache_clean_area 421 .word cpu_arm920_switch_mm 422 .word cpu_arm920_set_pte_ext 423 .size arm920_processor_functions, . - arm920_processor_functions 424 425 .section ".rodata" 426 427 .type cpu_arch_name, #object 428cpu_arch_name: 429 .asciz "armv4t" 430 .size cpu_arch_name, . - cpu_arch_name 431 432 .type cpu_elf_name, #object 433cpu_elf_name: 434 .asciz "v4" 435 .size cpu_elf_name, . - cpu_elf_name 436 437 .type cpu_arm920_name, #object 438cpu_arm920_name: 439 .asciz "ARM920T" 440 .size cpu_arm920_name, . - cpu_arm920_name 441 442 .align 443 444 .section ".proc.info.init", #alloc, #execinstr 445 446 .type __arm920_proc_info,#object 447__arm920_proc_info: 448 .long 0x41009200 449 .long 0xff00fff0 450 .long PMD_TYPE_SECT | \ 451 PMD_SECT_BUFFERABLE | \ 452 PMD_SECT_CACHEABLE | \ 453 PMD_BIT4 | \ 454 PMD_SECT_AP_WRITE | \ 455 PMD_SECT_AP_READ 456 .long PMD_TYPE_SECT | \ 457 PMD_BIT4 | \ 458 PMD_SECT_AP_WRITE | \ 459 PMD_SECT_AP_READ 460 b __arm920_setup 461 .long cpu_arch_name 462 .long cpu_elf_name 463 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 464 .long cpu_arm920_name 465 .long arm920_processor_functions 466 .long v4wbi_tlb_fns 467 .long v4wb_user_fns 468#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 469 .long arm920_cache_fns 470#else 471 .long v4wt_cache_fns 472#endif 473 .size __arm920_proc_info, . - __arm920_proc_info 474