1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core 4 * 5 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. 6 * 7 * Heavily based on proc-arm926.S and proc-xsc3.S 8 */ 9 10#include <linux/linkage.h> 11#include <linux/init.h> 12#include <linux/cfi_types.h> 13#include <linux/pgtable.h> 14#include <asm/assembler.h> 15#include <asm/hwcap.h> 16#include <asm/pgtable-hwdef.h> 17#include <asm/page.h> 18#include <asm/ptrace.h> 19#include "proc-macros.S" 20 21/* 22 * This is the maximum size of an area which will be flushed. If the 23 * area is larger than this, then we flush the whole cache. 24 */ 25#define CACHE_DLIMIT 32768 26 27/* 28 * The cache line size of the L1 D cache. 29 */ 30#define CACHE_DLINESIZE 32 31 32/* 33 * cpu_mohawk_proc_init() 34 */ 35SYM_TYPED_FUNC_START(cpu_mohawk_proc_init) 36 ret lr 37SYM_FUNC_END(cpu_mohawk_proc_init) 38 39/* 40 * cpu_mohawk_proc_fin() 41 */ 42SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin) 43 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 44 bic r0, r0, #0x1800 @ ...iz........... 45 bic r0, r0, #0x0006 @ .............ca. 46 mcr p15, 0, r0, c1, c0, 0 @ disable caches 47 ret lr 48SYM_FUNC_END(cpu_mohawk_proc_fin) 49 50/* 51 * cpu_mohawk_reset(loc) 52 * 53 * Perform a soft reset of the system. Put the CPU into the 54 * same state as it would be if it had been reset, and branch 55 * to what would be the reset vector. 56 * 57 * loc: location to jump to for soft reset 58 * 59 * (same as arm926) 60 */ 61 .align 5 62 .pushsection .idmap.text, "ax" 63SYM_TYPED_FUNC_START(cpu_mohawk_reset) 64 mov ip, #0 65 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 66 mcr p15, 0, ip, c7, c10, 4 @ drain WB 67 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 68 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 69 bic ip, ip, #0x0007 @ .............cam 70 bic ip, ip, #0x1100 @ ...i...s........ 71 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 ret r0 73SYM_FUNC_END(cpu_mohawk_reset) 74 .popsection 75 76/* 77 * cpu_mohawk_do_idle() 78 * 79 * Called with IRQs disabled 80 */ 81 .align 5 82SYM_TYPED_FUNC_START(cpu_mohawk_do_idle) 83 mov r0, #0 84 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 85 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 86 ret lr 87SYM_FUNC_END(cpu_mohawk_do_idle) 88 89/* 90 * flush_icache_all() 91 * 92 * Unconditionally clean and invalidate the entire icache. 93 */ 94SYM_TYPED_FUNC_START(mohawk_flush_icache_all) 95 mov r0, #0 96 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 97 ret lr 98SYM_FUNC_END(mohawk_flush_icache_all) 99 100/* 101 * flush_user_cache_all() 102 * 103 * Clean and invalidate all cache entries in a particular 104 * address space. 105 */ 106SYM_FUNC_ALIAS(mohawk_flush_user_cache_all, mohawk_flush_kern_cache_all) 107 108/* 109 * flush_kern_cache_all() 110 * 111 * Clean and invalidate the entire cache. 112 */ 113SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all) 114 mov r2, #VM_EXEC 115 mov ip, #0 116__flush_whole_cache: 117 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 118 tst r2, #VM_EXEC 119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 120 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 121 ret lr 122SYM_FUNC_END(mohawk_flush_kern_cache_all) 123 124/* 125 * flush_user_cache_range(start, end, flags) 126 * 127 * Clean and invalidate a range of cache entries in the 128 * specified address range. 129 * 130 * - start - start address (inclusive) 131 * - end - end address (exclusive) 132 * - flags - vm_flags describing address space 133 * 134 * (same as arm926) 135 */ 136SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range) 137 mov ip, #0 138 sub r3, r1, r0 @ calculate total size 139 cmp r3, #CACHE_DLIMIT 140 bgt __flush_whole_cache 1411: tst r2, #VM_EXEC 142 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 143 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 144 add r0, r0, #CACHE_DLINESIZE 145 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 146 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 147 add r0, r0, #CACHE_DLINESIZE 148 cmp r0, r1 149 blo 1b 150 tst r2, #VM_EXEC 151 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 152 ret lr 153SYM_FUNC_END(mohawk_flush_user_cache_range) 154 155/* 156 * coherent_kern_range(start, end) 157 * 158 * Ensure coherency between the Icache and the Dcache in the 159 * region described by start, end. If you have non-snooping 160 * Harvard caches, you need to implement this function. 161 * 162 * - start - virtual start address 163 * - end - virtual end address 164 */ 165SYM_TYPED_FUNC_START(mohawk_coherent_kern_range) 166#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 167 b mohawk_coherent_user_range 168#endif 169SYM_FUNC_END(mohawk_coherent_kern_range) 170 171/* 172 * coherent_user_range(start, end) 173 * 174 * Ensure coherency between the Icache and the Dcache in the 175 * region described by start, end. If you have non-snooping 176 * Harvard caches, you need to implement this function. 177 * 178 * - start - virtual start address 179 * - end - virtual end address 180 * 181 * (same as arm926) 182 */ 183SYM_TYPED_FUNC_START(mohawk_coherent_user_range) 184 bic r0, r0, #CACHE_DLINESIZE - 1 1851: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 186 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 187 add r0, r0, #CACHE_DLINESIZE 188 cmp r0, r1 189 blo 1b 190 mcr p15, 0, r0, c7, c10, 4 @ drain WB 191 mov r0, #0 192 ret lr 193SYM_FUNC_END(mohawk_coherent_user_range) 194 195/* 196 * flush_kern_dcache_area(void *addr, size_t size) 197 * 198 * Ensure no D cache aliasing occurs, either with itself or 199 * the I cache 200 * 201 * - addr - kernel address 202 * - size - region size 203 */ 204SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area) 205 add r1, r0, r1 2061: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 207 add r0, r0, #CACHE_DLINESIZE 208 cmp r0, r1 209 blo 1b 210 mov r0, #0 211 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 212 mcr p15, 0, r0, c7, c10, 4 @ drain WB 213 ret lr 214SYM_FUNC_END(mohawk_flush_kern_dcache_area) 215 216/* 217 * dma_inv_range(start, end) 218 * 219 * Invalidate (discard) the specified virtual address range. 220 * May not write back any entries. If 'start' or 'end' 221 * are not cache line aligned, those lines must be written 222 * back. 223 * 224 * - start - virtual start address 225 * - end - virtual end address 226 * 227 * (same as v4wb) 228 */ 229mohawk_dma_inv_range: 230 tst r0, #CACHE_DLINESIZE - 1 231 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 232 tst r1, #CACHE_DLINESIZE - 1 233 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 234 bic r0, r0, #CACHE_DLINESIZE - 1 2351: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 236 add r0, r0, #CACHE_DLINESIZE 237 cmp r0, r1 238 blo 1b 239 mcr p15, 0, r0, c7, c10, 4 @ drain WB 240 ret lr 241 242/* 243 * dma_clean_range(start, end) 244 * 245 * Clean the specified virtual address range. 246 * 247 * - start - virtual start address 248 * - end - virtual end address 249 * 250 * (same as v4wb) 251 */ 252mohawk_dma_clean_range: 253 bic r0, r0, #CACHE_DLINESIZE - 1 2541: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 255 add r0, r0, #CACHE_DLINESIZE 256 cmp r0, r1 257 blo 1b 258 mcr p15, 0, r0, c7, c10, 4 @ drain WB 259 ret lr 260 261/* 262 * dma_flush_range(start, end) 263 * 264 * Clean and invalidate the specified virtual address range. 265 * 266 * - start - virtual start address 267 * - end - virtual end address 268 */ 269SYM_TYPED_FUNC_START(mohawk_dma_flush_range) 270 bic r0, r0, #CACHE_DLINESIZE - 1 2711: 272 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 273 add r0, r0, #CACHE_DLINESIZE 274 cmp r0, r1 275 blo 1b 276 mcr p15, 0, r0, c7, c10, 4 @ drain WB 277 ret lr 278SYM_FUNC_END(mohawk_dma_flush_range) 279 280/* 281 * dma_map_area(start, size, dir) 282 * - start - kernel virtual start address 283 * - size - size of region 284 * - dir - DMA direction 285 */ 286SYM_TYPED_FUNC_START(mohawk_dma_map_area) 287 add r1, r1, r0 288 cmp r2, #DMA_TO_DEVICE 289 beq mohawk_dma_clean_range 290 bcs mohawk_dma_inv_range 291 b mohawk_dma_flush_range 292SYM_FUNC_END(mohawk_dma_map_area) 293 294/* 295 * dma_unmap_area(start, size, dir) 296 * - start - kernel virtual start address 297 * - size - size of region 298 * - dir - DMA direction 299 */ 300SYM_TYPED_FUNC_START(mohawk_dma_unmap_area) 301 ret lr 302SYM_FUNC_END(mohawk_dma_unmap_area) 303 304SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area) 3051: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 306 add r0, r0, #CACHE_DLINESIZE 307 subs r1, r1, #CACHE_DLINESIZE 308 bhi 1b 309 mcr p15, 0, r0, c7, c10, 4 @ drain WB 310 ret lr 311SYM_FUNC_END(cpu_mohawk_dcache_clean_area) 312 313/* 314 * cpu_mohawk_switch_mm(pgd) 315 * 316 * Set the translation base pointer to be as described by pgd. 317 * 318 * pgd: new page tables 319 */ 320 .align 5 321SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm) 322 mov ip, #0 323 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 324 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 325 mcr p15, 0, ip, c7, c10, 4 @ drain WB 326 orr r0, r0, #0x18 @ cache the page table in L2 327 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 328 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 329 ret lr 330SYM_FUNC_END(cpu_mohawk_switch_mm) 331 332/* 333 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 334 * 335 * Set a PTE and flush it out 336 */ 337 .align 5 338SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext) 339#ifdef CONFIG_MMU 340 armv3_set_pte_ext 341 mov r0, r0 342 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 343 mcr p15, 0, r0, c7, c10, 4 @ drain WB 344 ret lr 345#endif 346SYM_FUNC_END(cpu_mohawk_set_pte_ext) 347 348.globl cpu_mohawk_suspend_size 349.equ cpu_mohawk_suspend_size, 4 * 6 350#ifdef CONFIG_ARM_CPU_SUSPEND 351SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend) 352 stmfd sp!, {r4 - r9, lr} 353 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 354 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 355 mrc p15, 0, r6, c13, c0, 0 @ PID 356 mrc p15, 0, r7, c3, c0, 0 @ domain ID 357 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg 358 mrc p15, 0, r9, c1, c0, 0 @ control reg 359 bic r4, r4, #2 @ clear frequency change bit 360 stmia r0, {r4 - r9} @ store cp regs 361 ldmia sp!, {r4 - r9, pc} 362SYM_FUNC_END(cpu_mohawk_do_suspend) 363 364SYM_TYPED_FUNC_START(cpu_mohawk_do_resume) 365 ldmia r0, {r4 - r9} @ load cp regs 366 mov ip, #0 367 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 368 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 369 mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer 370 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 371 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 372 mcr p15, 0, r5, c15, c1, 0 @ CP access reg 373 mcr p15, 0, r6, c13, c0, 0 @ PID 374 mcr p15, 0, r7, c3, c0, 0 @ domain ID 375 orr r1, r1, #0x18 @ cache the page table in L2 376 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 377 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 378 mov r0, r9 @ control register 379 b cpu_resume_mmu 380SYM_FUNC_END(cpu_mohawk_do_resume) 381#endif 382 383 .type __mohawk_setup, #function 384__mohawk_setup: 385 mov r0, #0 386 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches 387 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 388 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs 389 orr r4, r4, #0x18 @ cache the page table in L2 390 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 391 392 mov r0, #0 @ don't allow CP access 393 mcr p15, 0, r0, c15, c1, 0 @ write CP access register 394 395 adr r5, mohawk_crval 396 ldmia r5, {r5, r6} 397 mrc p15, 0, r0, c1, c0 @ get control register 398 bic r0, r0, r5 399 orr r0, r0, r6 400 ret lr 401 402 .size __mohawk_setup, . - __mohawk_setup 403 404 /* 405 * R 406 * .RVI ZFRS BLDP WCAM 407 * .011 1001 ..00 0101 408 * 409 */ 410 .type mohawk_crval, #object 411mohawk_crval: 412 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 413 414 __INITDATA 415 416 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 417 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort 418 419 .section ".rodata" 420 421 string cpu_arch_name, "armv5te" 422 string cpu_elf_name, "v5" 423 string cpu_mohawk_name, "Marvell 88SV331x" 424 425 .align 426 427 .section ".proc.info.init", "a" 428 429 .type __88sv331x_proc_info,#object 430__88sv331x_proc_info: 431 .long 0x56158000 @ Marvell 88SV331x (MOHAWK) 432 .long 0xfffff000 433 .long PMD_TYPE_SECT | \ 434 PMD_SECT_BUFFERABLE | \ 435 PMD_SECT_CACHEABLE | \ 436 PMD_BIT4 | \ 437 PMD_SECT_AP_WRITE | \ 438 PMD_SECT_AP_READ 439 .long PMD_TYPE_SECT | \ 440 PMD_BIT4 | \ 441 PMD_SECT_AP_WRITE | \ 442 PMD_SECT_AP_READ 443 initfn __mohawk_setup, __88sv331x_proc_info 444 .long cpu_arch_name 445 .long cpu_elf_name 446 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 447 .long cpu_mohawk_name 448 .long mohawk_processor_functions 449 .long v4wbi_tlb_fns 450 .long v4wb_user_fns 451 .long mohawk_cache_fns 452 .size __88sv331x_proc_info, . - __88sv331x_proc_info 453