1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920 4 * 5 * Copyright (C) 1999,2000 ARM Limited 6 * Copyright (C) 2000 Deep Blue Solutions Ltd. 7 * hacked for non-paged-MM by Hyok S. Choi, 2003. 8 * 9 * These are the low level assembler for performing cache and TLB 10 * functions on the arm920. 11 * 12 * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt 13 */ 14#include <linux/linkage.h> 15#include <linux/init.h> 16#include <linux/cfi_types.h> 17#include <linux/pgtable.h> 18#include <asm/assembler.h> 19#include <asm/hwcap.h> 20#include <asm/pgtable-hwdef.h> 21#include <asm/page.h> 22#include <asm/ptrace.h> 23#include "proc-macros.S" 24 25/* 26 * The size of one data cache line. 27 */ 28#define CACHE_DLINESIZE 32 29 30/* 31 * The number of data cache segments. 32 */ 33#define CACHE_DSEGMENTS 8 34 35/* 36 * The number of lines in a cache segment. 37 */ 38#define CACHE_DENTRIES 64 39 40/* 41 * This is the size at which it becomes more efficient to 42 * clean the whole cache, rather than using the individual 43 * cache line maintenance instructions. 44 */ 45#define CACHE_DLIMIT 65536 46 47 48 .text 49/* 50 * cpu_arm920_proc_init() 51 */ 52SYM_TYPED_FUNC_START(cpu_arm920_proc_init) 53 ret lr 54SYM_FUNC_END(cpu_arm920_proc_init) 55 56/* 57 * cpu_arm920_proc_fin() 58 */ 59SYM_TYPED_FUNC_START(cpu_arm920_proc_fin) 60 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 61 bic r0, r0, #0x1000 @ ...i............ 62 bic r0, r0, #0x000e @ ............wca. 63 mcr p15, 0, r0, c1, c0, 0 @ disable caches 64 ret lr 65SYM_FUNC_END(cpu_arm920_proc_fin) 66 67/* 68 * cpu_arm920_reset(loc) 69 * 70 * Perform a soft reset of the system. Put the CPU into the 71 * same state as it would be if it had been reset, and branch 72 * to what would be the reset vector. 73 * 74 * loc: location to jump to for soft reset 75 */ 76 .align 5 77 .pushsection .idmap.text, "ax" 78SYM_TYPED_FUNC_START(cpu_arm920_reset) 79 mov ip, #0 80 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 81 mcr p15, 0, ip, c7, c10, 4 @ drain WB 82#ifdef CONFIG_MMU 83 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 84#endif 85 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 86 bic ip, ip, #0x000f @ ............wcam 87 bic ip, ip, #0x1100 @ ...i...s........ 88 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 89 ret r0 90SYM_FUNC_END(cpu_arm920_reset) 91 .popsection 92 93/* 94 * cpu_arm920_do_idle() 95 */ 96 .align 5 97SYM_TYPED_FUNC_START(cpu_arm920_do_idle) 98 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 99 ret lr 100SYM_FUNC_END(cpu_arm920_do_idle) 101 102#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 103 104/* 105 * flush_icache_all() 106 * 107 * Unconditionally clean and invalidate the entire icache. 108 */ 109SYM_TYPED_FUNC_START(arm920_flush_icache_all) 110 mov r0, #0 111 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 112 ret lr 113SYM_FUNC_END(arm920_flush_icache_all) 114 115/* 116 * flush_user_cache_all() 117 * 118 * Invalidate all cache entries in a particular address 119 * space. 120 */ 121SYM_FUNC_ALIAS(arm920_flush_user_cache_all, arm920_flush_kern_cache_all) 122 123/* 124 * flush_kern_cache_all() 125 * 126 * Clean and invalidate the entire cache. 127 */ 128SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all) 129 mov r2, #VM_EXEC 130 mov ip, #0 131__flush_whole_cache: 132 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 1331: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1342: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 135 subs r3, r3, #1 << 26 136 bcs 2b @ entries 63 to 0 137 subs r1, r1, #1 << 5 138 bcs 1b @ segments 7 to 0 139 tst r2, #VM_EXEC 140 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 141 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 142 ret lr 143SYM_FUNC_END(arm920_flush_kern_cache_all) 144 145/* 146 * flush_user_cache_range(start, end, flags) 147 * 148 * Invalidate a range of cache entries in the specified 149 * address space. 150 * 151 * - start - start address (inclusive) 152 * - end - end address (exclusive) 153 * - flags - vm_flags for address space 154 */ 155SYM_TYPED_FUNC_START(arm920_flush_user_cache_range) 156 mov ip, #0 157 sub r3, r1, r0 @ calculate total size 158 cmp r3, #CACHE_DLIMIT 159 bhs __flush_whole_cache 160 1611: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 162 tst r2, #VM_EXEC 163 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 164 add r0, r0, #CACHE_DLINESIZE 165 cmp r0, r1 166 blo 1b 167 tst r2, #VM_EXEC 168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 169 ret lr 170SYM_FUNC_END(arm920_flush_user_cache_range) 171 172/* 173 * coherent_kern_range(start, end) 174 * 175 * Ensure coherency between the Icache and the Dcache in the 176 * region described by start, end. If you have non-snooping 177 * Harvard caches, you need to implement this function. 178 * 179 * - start - virtual start address 180 * - end - virtual end address 181 */ 182SYM_TYPED_FUNC_START(arm920_coherent_kern_range) 183#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 184 b arm920_coherent_user_range 185#endif 186SYM_FUNC_END(arm920_coherent_kern_range) 187 188/* 189 * coherent_user_range(start, end) 190 * 191 * Ensure coherency between the Icache and the Dcache in the 192 * region described by start, end. If you have non-snooping 193 * Harvard caches, you need to implement this function. 194 * 195 * - start - virtual start address 196 * - end - virtual end address 197 */ 198SYM_TYPED_FUNC_START(arm920_coherent_user_range) 199 bic r0, r0, #CACHE_DLINESIZE - 1 2001: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 201 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 202 add r0, r0, #CACHE_DLINESIZE 203 cmp r0, r1 204 blo 1b 205 mcr p15, 0, r0, c7, c10, 4 @ drain WB 206 mov r0, #0 207 ret lr 208SYM_FUNC_END(arm920_coherent_user_range) 209 210/* 211 * flush_kern_dcache_area(void *addr, size_t size) 212 * 213 * Ensure no D cache aliasing occurs, either with itself or 214 * the I cache 215 * 216 * - addr - kernel address 217 * - size - region size 218 */ 219SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area) 220 add r1, r0, r1 2211: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 222 add r0, r0, #CACHE_DLINESIZE 223 cmp r0, r1 224 blo 1b 225 mov r0, #0 226 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 227 mcr p15, 0, r0, c7, c10, 4 @ drain WB 228 ret lr 229SYM_FUNC_END(arm920_flush_kern_dcache_area) 230 231/* 232 * dma_inv_range(start, end) 233 * 234 * Invalidate (discard) the specified virtual address range. 235 * May not write back any entries. If 'start' or 'end' 236 * are not cache line aligned, those lines must be written 237 * back. 238 * 239 * - start - virtual start address 240 * - end - virtual end address 241 * 242 * (same as v4wb) 243 */ 244arm920_dma_inv_range: 245 tst r0, #CACHE_DLINESIZE - 1 246 bic r0, r0, #CACHE_DLINESIZE - 1 247 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 248 tst r1, #CACHE_DLINESIZE - 1 249 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2501: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 251 add r0, r0, #CACHE_DLINESIZE 252 cmp r0, r1 253 blo 1b 254 mcr p15, 0, r0, c7, c10, 4 @ drain WB 255 ret lr 256 257/* 258 * dma_clean_range(start, end) 259 * 260 * Clean the specified virtual address range. 261 * 262 * - start - virtual start address 263 * - end - virtual end address 264 * 265 * (same as v4wb) 266 */ 267arm920_dma_clean_range: 268 bic r0, r0, #CACHE_DLINESIZE - 1 2691: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 270 add r0, r0, #CACHE_DLINESIZE 271 cmp r0, r1 272 blo 1b 273 mcr p15, 0, r0, c7, c10, 4 @ drain WB 274 ret lr 275 276/* 277 * dma_flush_range(start, end) 278 * 279 * Clean and invalidate the specified virtual address range. 280 * 281 * - start - virtual start address 282 * - end - virtual end address 283 */ 284SYM_TYPED_FUNC_START(arm920_dma_flush_range) 285 bic r0, r0, #CACHE_DLINESIZE - 1 2861: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 287 add r0, r0, #CACHE_DLINESIZE 288 cmp r0, r1 289 blo 1b 290 mcr p15, 0, r0, c7, c10, 4 @ drain WB 291 ret lr 292SYM_FUNC_END(arm920_dma_flush_range) 293 294/* 295 * dma_map_area(start, size, dir) 296 * - start - kernel virtual start address 297 * - size - size of region 298 * - dir - DMA direction 299 */ 300SYM_TYPED_FUNC_START(arm920_dma_map_area) 301 add r1, r1, r0 302 cmp r2, #DMA_TO_DEVICE 303 beq arm920_dma_clean_range 304 bcs arm920_dma_inv_range 305 b arm920_dma_flush_range 306SYM_FUNC_END(arm920_dma_map_area) 307 308/* 309 * dma_unmap_area(start, size, dir) 310 * - start - kernel virtual start address 311 * - size - size of region 312 * - dir - DMA direction 313 */ 314SYM_TYPED_FUNC_START(arm920_dma_unmap_area) 315 ret lr 316SYM_FUNC_END(arm920_dma_unmap_area) 317 318#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 319 320 321SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area) 3221: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 323 add r0, r0, #CACHE_DLINESIZE 324 subs r1, r1, #CACHE_DLINESIZE 325 bhi 1b 326 ret lr 327SYM_FUNC_END(cpu_arm920_dcache_clean_area) 328 329/* =============================== PageTable ============================== */ 330 331/* 332 * cpu_arm920_switch_mm(pgd) 333 * 334 * Set the translation base pointer to be as described by pgd. 335 * 336 * pgd: new page tables 337 */ 338 .align 5 339SYM_TYPED_FUNC_START(cpu_arm920_switch_mm) 340#ifdef CONFIG_MMU 341 mov ip, #0 342#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 343 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 344#else 345@ && 'Clean & Invalidate whole DCache' 346@ && Re-written to use Index Ops. 347@ && Uses registers r1, r3 and ip 348 349 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments 3501: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 3512: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 352 subs r3, r3, #1 << 26 353 bcs 2b @ entries 63 to 0 354 subs r1, r1, #1 << 5 355 bcs 1b @ segments 7 to 0 356#endif 357 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 358 mcr p15, 0, ip, c7, c10, 4 @ drain WB 359 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 360 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 361#endif 362 ret lr 363SYM_FUNC_END(cpu_arm920_switch_mm) 364 365/* 366 * cpu_arm920_set_pte(ptep, pte, ext) 367 * 368 * Set a PTE and flush it out 369 */ 370 .align 5 371SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext) 372#ifdef CONFIG_MMU 373 armv3_set_pte_ext 374 mov r0, r0 375 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 376 mcr p15, 0, r0, c7, c10, 4 @ drain WB 377#endif 378 ret lr 379SYM_FUNC_END(cpu_arm920_set_pte_ext) 380 381/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 382.globl cpu_arm920_suspend_size 383.equ cpu_arm920_suspend_size, 4 * 3 384#ifdef CONFIG_ARM_CPU_SUSPEND 385SYM_TYPED_FUNC_START(cpu_arm920_do_suspend) 386 stmfd sp!, {r4 - r6, lr} 387 mrc p15, 0, r4, c13, c0, 0 @ PID 388 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 389 mrc p15, 0, r6, c1, c0, 0 @ Control register 390 stmia r0, {r4 - r6} 391 ldmfd sp!, {r4 - r6, pc} 392SYM_FUNC_END(cpu_arm920_do_suspend) 393 394SYM_TYPED_FUNC_START(cpu_arm920_do_resume) 395 mov ip, #0 396 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 397 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 398 ldmia r0, {r4 - r6} 399 mcr p15, 0, r4, c13, c0, 0 @ PID 400 mcr p15, 0, r5, c3, c0, 0 @ Domain ID 401 mcr p15, 0, r1, c2, c0, 0 @ TTB address 402 mov r0, r6 @ control register 403 b cpu_resume_mmu 404SYM_FUNC_END(cpu_arm920_do_resume) 405#endif 406 407 .type __arm920_setup, #function 408__arm920_setup: 409 mov r0, #0 410 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 411 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 412#ifdef CONFIG_MMU 413 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 414#endif 415 adr r5, arm920_crval 416 ldmia r5, {r5, r6} 417 mrc p15, 0, r0, c1, c0 @ get control register v4 418 bic r0, r0, r5 419 orr r0, r0, r6 420 ret lr 421 .size __arm920_setup, . - __arm920_setup 422 423 /* 424 * R 425 * .RVI ZFRS BLDP WCAM 426 * ..11 0001 ..11 0101 427 * 428 */ 429 .type arm920_crval, #object 430arm920_crval: 431 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 432 433 __INITDATA 434 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 435 define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1 436 437 .section ".rodata" 438 439 string cpu_arch_name, "armv4t" 440 string cpu_elf_name, "v4" 441 string cpu_arm920_name, "ARM920T" 442 443 .align 444 445 .section ".proc.info.init", "a" 446 447 .type __arm920_proc_info,#object 448__arm920_proc_info: 449 .long 0x41009200 450 .long 0xff00fff0 451 .long PMD_TYPE_SECT | \ 452 PMD_SECT_BUFFERABLE | \ 453 PMD_SECT_CACHEABLE | \ 454 PMD_BIT4 | \ 455 PMD_SECT_AP_WRITE | \ 456 PMD_SECT_AP_READ 457 .long PMD_TYPE_SECT | \ 458 PMD_BIT4 | \ 459 PMD_SECT_AP_WRITE | \ 460 PMD_SECT_AP_READ 461 initfn __arm920_setup, __arm920_proc_info 462 .long cpu_arch_name 463 .long cpu_elf_name 464 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 465 .long cpu_arm920_name 466 .long arm920_processor_functions 467 .long v4wbi_tlb_fns 468 .long v4wb_user_fns 469#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 470 .long arm920_cache_fns 471#else 472 .long v4wt_cache_fns 473#endif 474 .size __arm920_proc_info, . - __arm920_proc_info 475