1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 29a1af5f2SVladimir Murzin/* 39a1af5f2SVladimir Murzin * linux/arch/arm/mm/cache-v7m.S 49a1af5f2SVladimir Murzin * 59a1af5f2SVladimir Murzin * Based on linux/arch/arm/mm/cache-v7.S 69a1af5f2SVladimir Murzin * 79a1af5f2SVladimir Murzin * Copyright (C) 2001 Deep Blue Solutions Ltd. 89a1af5f2SVladimir Murzin * Copyright (C) 2005 ARM Ltd. 99a1af5f2SVladimir Murzin * 109a1af5f2SVladimir Murzin * This is the "shell" of the ARMv7M processor support. 119a1af5f2SVladimir Murzin */ 129a1af5f2SVladimir Murzin#include <linux/linkage.h> 139a1af5f2SVladimir Murzin#include <linux/init.h> 141036b895SLinus Walleij#include <linux/cfi_types.h> 159a1af5f2SVladimir Murzin#include <asm/assembler.h> 169a1af5f2SVladimir Murzin#include <asm/errno.h> 179a1af5f2SVladimir Murzin#include <asm/unwind.h> 189a1af5f2SVladimir Murzin#include <asm/v7m.h> 199a1af5f2SVladimir Murzin 209a1af5f2SVladimir Murzin#include "proc-macros.S" 219a1af5f2SVladimir Murzin 22a2faac39SNick Desaulniers.arch armv7-m 23a2faac39SNick Desaulniers 249a1af5f2SVladimir Murzin/* Generic V7M read/write macros for memory mapped cache operations */ 259a1af5f2SVladimir Murzin.macro v7m_cache_read, rt, reg 269a1af5f2SVladimir Murzin movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg 279a1af5f2SVladimir Murzin movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg 289a1af5f2SVladimir Murzin ldr \rt, [\rt] 299a1af5f2SVladimir Murzin.endm 309a1af5f2SVladimir Murzin 319a1af5f2SVladimir Murzin.macro v7m_cacheop, rt, tmp, op, c = al 329a1af5f2SVladimir Murzin movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op 339a1af5f2SVladimir Murzin movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op 349a1af5f2SVladimir Murzin str\c \rt, [\tmp] 359a1af5f2SVladimir Murzin.endm 369a1af5f2SVladimir Murzin 379a1af5f2SVladimir Murzin 389a1af5f2SVladimir Murzin.macro read_ccsidr, rt 399a1af5f2SVladimir Murzin v7m_cache_read \rt, V7M_SCB_CCSIDR 409a1af5f2SVladimir Murzin.endm 419a1af5f2SVladimir Murzin 429a1af5f2SVladimir Murzin.macro read_clidr, rt 439a1af5f2SVladimir Murzin v7m_cache_read \rt, V7M_SCB_CLIDR 449a1af5f2SVladimir Murzin.endm 459a1af5f2SVladimir Murzin 469a1af5f2SVladimir Murzin.macro write_csselr, rt, tmp 479a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR 489a1af5f2SVladimir Murzin.endm 499a1af5f2SVladimir Murzin 509a1af5f2SVladimir Murzin/* 519a1af5f2SVladimir Murzin * dcisw: Invalidate data cache by set/way 529a1af5f2SVladimir Murzin */ 539a1af5f2SVladimir Murzin.macro dcisw, rt, tmp 549a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCISW 559a1af5f2SVladimir Murzin.endm 569a1af5f2SVladimir Murzin 579a1af5f2SVladimir Murzin/* 589a1af5f2SVladimir Murzin * dccisw: Clean and invalidate data cache by set/way 599a1af5f2SVladimir Murzin */ 609a1af5f2SVladimir Murzin.macro dccisw, rt, tmp 619a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW 629a1af5f2SVladimir Murzin.endm 639a1af5f2SVladimir Murzin 649a1af5f2SVladimir Murzin/* 659a1af5f2SVladimir Murzin * dccimvac: Clean and invalidate data cache line by MVA to PoC. 669a1af5f2SVladimir Murzin */ 679a1af5f2SVladimir Murzin.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 689a1af5f2SVladimir Murzin.macro dccimvac\c, rt, tmp 699a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c 709a1af5f2SVladimir Murzin.endm 719a1af5f2SVladimir Murzin.endr 729a1af5f2SVladimir Murzin 739a1af5f2SVladimir Murzin/* 749a1af5f2SVladimir Murzin * dcimvac: Invalidate data cache line by MVA to PoC 759a1af5f2SVladimir Murzin */ 763d0358d0SVladimir Murzin.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 773d0358d0SVladimir Murzin.macro dcimvac\c, rt, tmp 783d0358d0SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c 799a1af5f2SVladimir Murzin.endm 803d0358d0SVladimir Murzin.endr 819a1af5f2SVladimir Murzin 829a1af5f2SVladimir Murzin/* 839a1af5f2SVladimir Murzin * dccmvau: Clean data cache line by MVA to PoU 849a1af5f2SVladimir Murzin */ 859a1af5f2SVladimir Murzin.macro dccmvau, rt, tmp 869a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU 879a1af5f2SVladimir Murzin.endm 889a1af5f2SVladimir Murzin 899a1af5f2SVladimir Murzin/* 909a1af5f2SVladimir Murzin * dccmvac: Clean data cache line by MVA to PoC 919a1af5f2SVladimir Murzin */ 929a1af5f2SVladimir Murzin.macro dccmvac, rt, tmp 939a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC 949a1af5f2SVladimir Murzin.endm 959a1af5f2SVladimir Murzin 969a1af5f2SVladimir Murzin/* 979a1af5f2SVladimir Murzin * icimvau: Invalidate instruction caches by MVA to PoU 989a1af5f2SVladimir Murzin */ 999a1af5f2SVladimir Murzin.macro icimvau, rt, tmp 1009a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU 1019a1af5f2SVladimir Murzin.endm 1029a1af5f2SVladimir Murzin 1039a1af5f2SVladimir Murzin/* 1049a1af5f2SVladimir Murzin * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP. 1059a1af5f2SVladimir Murzin * rt data ignored by ICIALLU(IS), so can be used for the address 1069a1af5f2SVladimir Murzin */ 1079a1af5f2SVladimir Murzin.macro invalidate_icache, rt 1089a1af5f2SVladimir Murzin v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU 1099a1af5f2SVladimir Murzin mov \rt, #0 1109a1af5f2SVladimir Murzin.endm 1119a1af5f2SVladimir Murzin 1129a1af5f2SVladimir Murzin/* 1139a1af5f2SVladimir Murzin * Invalidate the BTB, inner shareable if SMP. 1149a1af5f2SVladimir Murzin * rt data ignored by BPIALL, so it can be used for the address 1159a1af5f2SVladimir Murzin */ 1169a1af5f2SVladimir Murzin.macro invalidate_bp, rt 1179a1af5f2SVladimir Murzin v7m_cacheop \rt, \rt, V7M_SCB_BPIALL 1189a1af5f2SVladimir Murzin mov \rt, #0 1199a1af5f2SVladimir Murzin.endm 1209a1af5f2SVladimir Murzin 1219a1af5f2SVladimir MurzinENTRY(v7m_invalidate_l1) 1229a1af5f2SVladimir Murzin mov r0, #0 1239a1af5f2SVladimir Murzin 1249a1af5f2SVladimir Murzin write_csselr r0, r1 1259a1af5f2SVladimir Murzin read_ccsidr r0 1269a1af5f2SVladimir Murzin 1279a1af5f2SVladimir Murzin movw r1, #0x7fff 1289a1af5f2SVladimir Murzin and r2, r1, r0, lsr #13 1299a1af5f2SVladimir Murzin 1309a1af5f2SVladimir Murzin movw r1, #0x3ff 1319a1af5f2SVladimir Murzin 1329a1af5f2SVladimir Murzin and r3, r1, r0, lsr #3 @ NumWays - 1 1339a1af5f2SVladimir Murzin add r2, r2, #1 @ NumSets 1349a1af5f2SVladimir Murzin 1359a1af5f2SVladimir Murzin and r0, r0, #0x7 1369a1af5f2SVladimir Murzin add r0, r0, #4 @ SetShift 1379a1af5f2SVladimir Murzin 1389a1af5f2SVladimir Murzin clz r1, r3 @ WayShift 1399a1af5f2SVladimir Murzin add r4, r3, #1 @ NumWays 1409a1af5f2SVladimir Murzin1: sub r2, r2, #1 @ NumSets-- 1419a1af5f2SVladimir Murzin mov r3, r4 @ Temp = NumWays 1429a1af5f2SVladimir Murzin2: subs r3, r3, #1 @ Temp-- 1439a1af5f2SVladimir Murzin mov r5, r3, lsl r1 1449a1af5f2SVladimir Murzin mov r6, r2, lsl r0 1459a1af5f2SVladimir Murzin orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) 1469a1af5f2SVladimir Murzin dcisw r5, r6 1479a1af5f2SVladimir Murzin bgt 2b 1489a1af5f2SVladimir Murzin cmp r2, #0 1499a1af5f2SVladimir Murzin bgt 1b 1509a1af5f2SVladimir Murzin dsb st 1519a1af5f2SVladimir Murzin isb 1529a1af5f2SVladimir Murzin ret lr 1539a1af5f2SVladimir MurzinENDPROC(v7m_invalidate_l1) 1549a1af5f2SVladimir Murzin 1559a1af5f2SVladimir Murzin/* 1569a1af5f2SVladimir Murzin * v7m_flush_icache_all() 1579a1af5f2SVladimir Murzin * 1589a1af5f2SVladimir Murzin * Flush the whole I-cache. 1599a1af5f2SVladimir Murzin * 1609a1af5f2SVladimir Murzin * Registers: 1619a1af5f2SVladimir Murzin * r0 - set to 0 1629a1af5f2SVladimir Murzin */ 1631036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_flush_icache_all) 1649a1af5f2SVladimir Murzin invalidate_icache r0 1659a1af5f2SVladimir Murzin ret lr 1661036b895SLinus WalleijSYM_FUNC_END(v7m_flush_icache_all) 1679a1af5f2SVladimir Murzin 1689a1af5f2SVladimir Murzin/* 1699a1af5f2SVladimir Murzin * v7m_flush_dcache_all() 1709a1af5f2SVladimir Murzin * 1719a1af5f2SVladimir Murzin * Flush the whole D-cache. 1729a1af5f2SVladimir Murzin * 1739a1af5f2SVladimir Murzin * Corrupted registers: r0-r7, r9-r11 1749a1af5f2SVladimir Murzin */ 1759a1af5f2SVladimir MurzinENTRY(v7m_flush_dcache_all) 1769a1af5f2SVladimir Murzin dmb @ ensure ordering with previous memory accesses 1779a1af5f2SVladimir Murzin read_clidr r0 1789a1af5f2SVladimir Murzin mov r3, r0, lsr #23 @ move LoC into position 1799a1af5f2SVladimir Murzin ands r3, r3, #7 << 1 @ extract LoC*2 from clidr 1809a1af5f2SVladimir Murzin beq finished @ if loc is 0, then no need to clean 1819a1af5f2SVladimir Murzinstart_flush_levels: 1829a1af5f2SVladimir Murzin mov r10, #0 @ start clean at cache level 0 1839a1af5f2SVladimir Murzinflush_levels: 1849a1af5f2SVladimir Murzin add r2, r10, r10, lsr #1 @ work out 3x current cache level 1859a1af5f2SVladimir Murzin mov r1, r0, lsr r2 @ extract cache type bits from clidr 1869a1af5f2SVladimir Murzin and r1, r1, #7 @ mask of the bits for current cache only 1879a1af5f2SVladimir Murzin cmp r1, #2 @ see what cache we have at this level 1889a1af5f2SVladimir Murzin blt skip @ skip if no cache, or just i-cache 189e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION 1909a1af5f2SVladimir Murzin save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic 1919a1af5f2SVladimir Murzin#endif 1929a1af5f2SVladimir Murzin write_csselr r10, r1 @ set current cache level 1939a1af5f2SVladimir Murzin isb @ isb to sych the new cssr&csidr 1949a1af5f2SVladimir Murzin read_ccsidr r1 @ read the new csidr 195e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION 1969a1af5f2SVladimir Murzin restore_irqs_notrace r9 1979a1af5f2SVladimir Murzin#endif 1989a1af5f2SVladimir Murzin and r2, r1, #7 @ extract the length of the cache lines 1999a1af5f2SVladimir Murzin add r2, r2, #4 @ add 4 (line length offset) 2009a1af5f2SVladimir Murzin movw r4, #0x3ff 2019a1af5f2SVladimir Murzin ands r4, r4, r1, lsr #3 @ find maximum number on the way size 2029a1af5f2SVladimir Murzin clz r5, r4 @ find bit position of way size increment 2039a1af5f2SVladimir Murzin movw r7, #0x7fff 2049a1af5f2SVladimir Murzin ands r7, r7, r1, lsr #13 @ extract max number of the index size 2059a1af5f2SVladimir Murzinloop1: 2069a1af5f2SVladimir Murzin mov r9, r7 @ create working copy of max index 2079a1af5f2SVladimir Murzinloop2: 2089a1af5f2SVladimir Murzin lsl r6, r4, r5 2099a1af5f2SVladimir Murzin orr r11, r10, r6 @ factor way and cache number into r11 2109a1af5f2SVladimir Murzin lsl r6, r9, r2 2119a1af5f2SVladimir Murzin orr r11, r11, r6 @ factor index number into r11 2129a1af5f2SVladimir Murzin dccisw r11, r6 @ clean/invalidate by set/way 2139a1af5f2SVladimir Murzin subs r9, r9, #1 @ decrement the index 2149a1af5f2SVladimir Murzin bge loop2 2159a1af5f2SVladimir Murzin subs r4, r4, #1 @ decrement the way 2169a1af5f2SVladimir Murzin bge loop1 2179a1af5f2SVladimir Murzinskip: 2189a1af5f2SVladimir Murzin add r10, r10, #2 @ increment cache number 2199a1af5f2SVladimir Murzin cmp r3, r10 2209a1af5f2SVladimir Murzin bgt flush_levels 2219a1af5f2SVladimir Murzinfinished: 22208a7e621SMasahiro Yamada mov r10, #0 @ switch back to cache level 0 2239a1af5f2SVladimir Murzin write_csselr r10, r3 @ select current cache level in cssr 2249a1af5f2SVladimir Murzin dsb st 2259a1af5f2SVladimir Murzin isb 2269a1af5f2SVladimir Murzin ret lr 2279a1af5f2SVladimir MurzinENDPROC(v7m_flush_dcache_all) 2289a1af5f2SVladimir Murzin 2299a1af5f2SVladimir Murzin/* 2309a1af5f2SVladimir Murzin * v7m_flush_cache_all() 2319a1af5f2SVladimir Murzin * 2329a1af5f2SVladimir Murzin * Flush the entire cache system. 2339a1af5f2SVladimir Murzin * The data cache flush is now achieved using atomic clean / invalidates 2349a1af5f2SVladimir Murzin * working outwards from L1 cache. This is done using Set/Way based cache 2359a1af5f2SVladimir Murzin * maintenance instructions. 2369a1af5f2SVladimir Murzin * The instruction cache can still be invalidated back to the point of 2379a1af5f2SVladimir Murzin * unification in a single instruction. 2389a1af5f2SVladimir Murzin * 2399a1af5f2SVladimir Murzin */ 2401036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_flush_kern_cache_all) 2419a1af5f2SVladimir Murzin stmfd sp!, {r4-r7, r9-r11, lr} 2429a1af5f2SVladimir Murzin bl v7m_flush_dcache_all 2439a1af5f2SVladimir Murzin invalidate_icache r0 2449a1af5f2SVladimir Murzin ldmfd sp!, {r4-r7, r9-r11, lr} 2459a1af5f2SVladimir Murzin ret lr 2461036b895SLinus WalleijSYM_FUNC_END(v7m_flush_kern_cache_all) 2479a1af5f2SVladimir Murzin 2489a1af5f2SVladimir Murzin/* 2499a1af5f2SVladimir Murzin * v7m_flush_cache_all() 2509a1af5f2SVladimir Murzin * 2519a1af5f2SVladimir Murzin * Flush all TLB entries in a particular address space 2529a1af5f2SVladimir Murzin * 2539a1af5f2SVladimir Murzin * - mm - mm_struct describing address space 2549a1af5f2SVladimir Murzin */ 2551036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_flush_user_cache_all) 2561036b895SLinus Walleij ret lr 2571036b895SLinus WalleijSYM_FUNC_END(v7m_flush_user_cache_all) 2589a1af5f2SVladimir Murzin 2599a1af5f2SVladimir Murzin/* 2609a1af5f2SVladimir Murzin * v7m_flush_cache_range(start, end, flags) 2619a1af5f2SVladimir Murzin * 2629a1af5f2SVladimir Murzin * Flush a range of TLB entries in the specified address space. 2639a1af5f2SVladimir Murzin * 2649a1af5f2SVladimir Murzin * - start - start address (may not be aligned) 2659a1af5f2SVladimir Murzin * - end - end address (exclusive, may not be aligned) 2669a1af5f2SVladimir Murzin * - flags - vm_area_struct flags describing address space 2679a1af5f2SVladimir Murzin * 2689a1af5f2SVladimir Murzin * It is assumed that: 2699a1af5f2SVladimir Murzin * - we have a VIPT cache. 2709a1af5f2SVladimir Murzin */ 2711036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_flush_user_cache_range) 2729a1af5f2SVladimir Murzin ret lr 2731036b895SLinus WalleijSYM_FUNC_END(v7m_flush_user_cache_range) 2749a1af5f2SVladimir Murzin 2759a1af5f2SVladimir Murzin/* 2769a1af5f2SVladimir Murzin * v7m_coherent_kern_range(start,end) 2779a1af5f2SVladimir Murzin * 2789a1af5f2SVladimir Murzin * Ensure that the I and D caches are coherent within specified 2799a1af5f2SVladimir Murzin * region. This is typically used when code has been written to 2809a1af5f2SVladimir Murzin * a memory region, and will be executed. 2819a1af5f2SVladimir Murzin * 2829a1af5f2SVladimir Murzin * - start - virtual start address of region 2839a1af5f2SVladimir Murzin * - end - virtual end address of region 2849a1af5f2SVladimir Murzin * 2859a1af5f2SVladimir Murzin * It is assumed that: 2869a1af5f2SVladimir Murzin * - the Icache does not read data from the write buffer 2879a1af5f2SVladimir Murzin */ 2881036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_coherent_kern_range) 289*7b749aadSLinus Walleij#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 2901036b895SLinus Walleij b v7m_coherent_user_range 291*7b749aadSLinus Walleij#endif 2921036b895SLinus WalleijSYM_FUNC_END(v7m_coherent_kern_range) 2939a1af5f2SVladimir Murzin 2949a1af5f2SVladimir Murzin/* 2959a1af5f2SVladimir Murzin * v7m_coherent_user_range(start,end) 2969a1af5f2SVladimir Murzin * 2979a1af5f2SVladimir Murzin * Ensure that the I and D caches are coherent within specified 2989a1af5f2SVladimir Murzin * region. This is typically used when code has been written to 2999a1af5f2SVladimir Murzin * a memory region, and will be executed. 3009a1af5f2SVladimir Murzin * 3019a1af5f2SVladimir Murzin * - start - virtual start address of region 3029a1af5f2SVladimir Murzin * - end - virtual end address of region 3039a1af5f2SVladimir Murzin * 3049a1af5f2SVladimir Murzin * It is assumed that: 3059a1af5f2SVladimir Murzin * - the Icache does not read data from the write buffer 3069a1af5f2SVladimir Murzin */ 3071036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_coherent_user_range) 3089a1af5f2SVladimir Murzin UNWIND(.fnstart ) 3099a1af5f2SVladimir Murzin dcache_line_size r2, r3 3109a1af5f2SVladimir Murzin sub r3, r2, #1 3119a1af5f2SVladimir Murzin bic r12, r0, r3 3129a1af5f2SVladimir Murzin1: 3139a1af5f2SVladimir Murzin/* 3149a1af5f2SVladimir Murzin * We use open coded version of dccmvau otherwise USER() would 3159a1af5f2SVladimir Murzin * point at movw instruction. 3169a1af5f2SVladimir Murzin */ 3179a1af5f2SVladimir Murzin dccmvau r12, r3 3189a1af5f2SVladimir Murzin add r12, r12, r2 3199a1af5f2SVladimir Murzin cmp r12, r1 3209a1af5f2SVladimir Murzin blo 1b 3219a1af5f2SVladimir Murzin dsb ishst 3229a1af5f2SVladimir Murzin icache_line_size r2, r3 3239a1af5f2SVladimir Murzin sub r3, r2, #1 3249a1af5f2SVladimir Murzin bic r12, r0, r3 3259a1af5f2SVladimir Murzin2: 3269a1af5f2SVladimir Murzin icimvau r12, r3 3279a1af5f2SVladimir Murzin add r12, r12, r2 3289a1af5f2SVladimir Murzin cmp r12, r1 3299a1af5f2SVladimir Murzin blo 2b 3309a1af5f2SVladimir Murzin invalidate_bp r0 3319a1af5f2SVladimir Murzin dsb ishst 3329a1af5f2SVladimir Murzin isb 3339a1af5f2SVladimir Murzin ret lr 3349a1af5f2SVladimir Murzin UNWIND(.fnend ) 3351036b895SLinus WalleijSYM_FUNC_END(v7m_coherent_user_range) 3369a1af5f2SVladimir Murzin 3379a1af5f2SVladimir Murzin/* 3389a1af5f2SVladimir Murzin * v7m_flush_kern_dcache_area(void *addr, size_t size) 3399a1af5f2SVladimir Murzin * 3409a1af5f2SVladimir Murzin * Ensure that the data held in the page kaddr is written back 3419a1af5f2SVladimir Murzin * to the page in question. 3429a1af5f2SVladimir Murzin * 3439a1af5f2SVladimir Murzin * - addr - kernel address 3449a1af5f2SVladimir Murzin * - size - region size 3459a1af5f2SVladimir Murzin */ 3461036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area) 3479a1af5f2SVladimir Murzin dcache_line_size r2, r3 3489a1af5f2SVladimir Murzin add r1, r0, r1 3499a1af5f2SVladimir Murzin sub r3, r2, #1 3509a1af5f2SVladimir Murzin bic r0, r0, r3 3519a1af5f2SVladimir Murzin1: 3529a1af5f2SVladimir Murzin dccimvac r0, r3 @ clean & invalidate D line / unified line 3539a1af5f2SVladimir Murzin add r0, r0, r2 3549a1af5f2SVladimir Murzin cmp r0, r1 3559a1af5f2SVladimir Murzin blo 1b 3569a1af5f2SVladimir Murzin dsb st 3579a1af5f2SVladimir Murzin ret lr 3581036b895SLinus WalleijSYM_FUNC_END(v7m_flush_kern_dcache_area) 3599a1af5f2SVladimir Murzin 3609a1af5f2SVladimir Murzin/* 3619a1af5f2SVladimir Murzin * v7m_dma_inv_range(start,end) 3629a1af5f2SVladimir Murzin * 3639a1af5f2SVladimir Murzin * Invalidate the data cache within the specified region; we will 3649a1af5f2SVladimir Murzin * be performing a DMA operation in this region and we want to 3659a1af5f2SVladimir Murzin * purge old data in the cache. 3669a1af5f2SVladimir Murzin * 3679a1af5f2SVladimir Murzin * - start - virtual start address of region 3689a1af5f2SVladimir Murzin * - end - virtual end address of region 3699a1af5f2SVladimir Murzin */ 3709a1af5f2SVladimir Murzinv7m_dma_inv_range: 3719a1af5f2SVladimir Murzin dcache_line_size r2, r3 3729a1af5f2SVladimir Murzin sub r3, r2, #1 3739a1af5f2SVladimir Murzin tst r0, r3 3749a1af5f2SVladimir Murzin bic r0, r0, r3 3759a1af5f2SVladimir Murzin dccimvacne r0, r3 3763d0358d0SVladimir Murzin addne r0, r0, r2 3779a1af5f2SVladimir Murzin subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac 3789a1af5f2SVladimir Murzin tst r1, r3 3799a1af5f2SVladimir Murzin bic r1, r1, r3 3809a1af5f2SVladimir Murzin dccimvacne r1, r3 3819a1af5f2SVladimir Murzin cmp r0, r1 3823d0358d0SVladimir Murzin1: 3833d0358d0SVladimir Murzin dcimvaclo r0, r3 3843d0358d0SVladimir Murzin addlo r0, r0, r2 3853d0358d0SVladimir Murzin cmplo r0, r1 3869a1af5f2SVladimir Murzin blo 1b 3879a1af5f2SVladimir Murzin dsb st 3889a1af5f2SVladimir Murzin ret lr 3899a1af5f2SVladimir MurzinENDPROC(v7m_dma_inv_range) 3909a1af5f2SVladimir Murzin 3919a1af5f2SVladimir Murzin/* 3929a1af5f2SVladimir Murzin * v7m_dma_clean_range(start,end) 3939a1af5f2SVladimir Murzin * - start - virtual start address of region 3949a1af5f2SVladimir Murzin * - end - virtual end address of region 3959a1af5f2SVladimir Murzin */ 3969a1af5f2SVladimir Murzinv7m_dma_clean_range: 3979a1af5f2SVladimir Murzin dcache_line_size r2, r3 3989a1af5f2SVladimir Murzin sub r3, r2, #1 3999a1af5f2SVladimir Murzin bic r0, r0, r3 4009a1af5f2SVladimir Murzin1: 4019a1af5f2SVladimir Murzin dccmvac r0, r3 @ clean D / U line 4029a1af5f2SVladimir Murzin add r0, r0, r2 4039a1af5f2SVladimir Murzin cmp r0, r1 4049a1af5f2SVladimir Murzin blo 1b 4059a1af5f2SVladimir Murzin dsb st 4069a1af5f2SVladimir Murzin ret lr 4079a1af5f2SVladimir MurzinENDPROC(v7m_dma_clean_range) 4089a1af5f2SVladimir Murzin 4099a1af5f2SVladimir Murzin/* 4109a1af5f2SVladimir Murzin * v7m_dma_flush_range(start,end) 4119a1af5f2SVladimir Murzin * - start - virtual start address of region 4129a1af5f2SVladimir Murzin * - end - virtual end address of region 4139a1af5f2SVladimir Murzin */ 4141036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_dma_flush_range) 4159a1af5f2SVladimir Murzin dcache_line_size r2, r3 4169a1af5f2SVladimir Murzin sub r3, r2, #1 4179a1af5f2SVladimir Murzin bic r0, r0, r3 4189a1af5f2SVladimir Murzin1: 4199a1af5f2SVladimir Murzin dccimvac r0, r3 @ clean & invalidate D / U line 4209a1af5f2SVladimir Murzin add r0, r0, r2 4219a1af5f2SVladimir Murzin cmp r0, r1 4229a1af5f2SVladimir Murzin blo 1b 4239a1af5f2SVladimir Murzin dsb st 4249a1af5f2SVladimir Murzin ret lr 4251036b895SLinus WalleijSYM_FUNC_END(v7m_dma_flush_range) 4269a1af5f2SVladimir Murzin 4279a1af5f2SVladimir Murzin/* 4289a1af5f2SVladimir Murzin * dma_map_area(start, size, dir) 4299a1af5f2SVladimir Murzin * - start - kernel virtual start address 4309a1af5f2SVladimir Murzin * - size - size of region 4319a1af5f2SVladimir Murzin * - dir - DMA direction 4329a1af5f2SVladimir Murzin */ 4331036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_dma_map_area) 4349a1af5f2SVladimir Murzin add r1, r1, r0 4359a1af5f2SVladimir Murzin teq r2, #DMA_FROM_DEVICE 4369a1af5f2SVladimir Murzin beq v7m_dma_inv_range 4379a1af5f2SVladimir Murzin b v7m_dma_clean_range 4381036b895SLinus WalleijSYM_FUNC_END(v7m_dma_map_area) 4399a1af5f2SVladimir Murzin 4409a1af5f2SVladimir Murzin/* 4419a1af5f2SVladimir Murzin * dma_unmap_area(start, size, dir) 4429a1af5f2SVladimir Murzin * - start - kernel virtual start address 4439a1af5f2SVladimir Murzin * - size - size of region 4449a1af5f2SVladimir Murzin * - dir - DMA direction 4459a1af5f2SVladimir Murzin */ 4461036b895SLinus WalleijSYM_TYPED_FUNC_START(v7m_dma_unmap_area) 4479a1af5f2SVladimir Murzin add r1, r1, r0 4489a1af5f2SVladimir Murzin teq r2, #DMA_TO_DEVICE 4499a1af5f2SVladimir Murzin bne v7m_dma_inv_range 4509a1af5f2SVladimir Murzin ret lr 4511036b895SLinus WalleijSYM_FUNC_END(v7m_dma_unmap_area) 452