18362c389SVineet Gupta /* 28ea2ddffSVineet Gupta * ARC Cache Management 38362c389SVineet Gupta * 48ea2ddffSVineet Gupta * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 58362c389SVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 68362c389SVineet Gupta * 78362c389SVineet Gupta * This program is free software; you can redistribute it and/or modify 88362c389SVineet Gupta * it under the terms of the GNU General Public License version 2 as 98362c389SVineet Gupta * published by the Free Software Foundation. 108362c389SVineet Gupta */ 118362c389SVineet Gupta 128362c389SVineet Gupta #include <linux/module.h> 138362c389SVineet Gupta #include <linux/mm.h> 148362c389SVineet Gupta #include <linux/sched.h> 158362c389SVineet Gupta #include <linux/cache.h> 168362c389SVineet Gupta #include <linux/mmu_context.h> 178362c389SVineet Gupta #include <linux/syscalls.h> 188362c389SVineet Gupta #include <linux/uaccess.h> 198362c389SVineet Gupta #include <linux/pagemap.h> 208362c389SVineet Gupta #include <asm/cacheflush.h> 218362c389SVineet Gupta #include <asm/cachectl.h> 228362c389SVineet Gupta #include <asm/setup.h> 238362c389SVineet Gupta 24795f4558SVineet Gupta static int l2_line_sz; 25cf986d47SVineet Gupta static int ioc_exists; 26*23cb1f64SVineet Gupta int slc_enable = 1, ioc_enable = 0; 27deaf7565SVineet Gupta unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 2826c01c49SVineet Gupta unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 29795f4558SVineet Gupta 3028b4af72SVineet Gupta void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, 31bcc4d65aSVineet Gupta unsigned long sz, const int cacheop); 32bcc4d65aSVineet Gupta 33f5db19e9SVineet Gupta void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); 34f5db19e9SVineet Gupta void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); 35f5db19e9SVineet Gupta void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); 36f2b0b25aSAlexey Brodkin 378362c389SVineet Gupta char *arc_cache_mumbojumbo(int c, char *buf, int len) 388362c389SVineet Gupta { 398362c389SVineet Gupta int n = 0; 40d1f317d8SVineet Gupta struct cpuinfo_arc_cache *p; 418362c389SVineet Gupta 428362c389SVineet Gupta #define PR_CACHE(p, cfg, str) \ 438362c389SVineet Gupta if (!(p)->ver) \ 448362c389SVineet Gupta n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 458362c389SVineet Gupta else \ 468362c389SVineet Gupta n += scnprintf(buf + n, len - n, \ 478362c389SVineet Gupta str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ 488362c389SVineet Gupta (p)->sz_k, (p)->assoc, (p)->line_len, \ 498362c389SVineet Gupta (p)->vipt ? "VIPT" : "PIPT", \ 508362c389SVineet Gupta (p)->alias ? " aliasing" : "", \ 51964cf28fSVineet Gupta IS_USED_CFG(cfg)); 528362c389SVineet Gupta 538362c389SVineet Gupta PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 548362c389SVineet Gupta PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 558362c389SVineet Gupta 56d1f317d8SVineet Gupta p = &cpuinfo_arc700[c].slc; 57d1f317d8SVineet Gupta if (p->ver) 58d1f317d8SVineet Gupta n += scnprintf(buf + n, len - n, 5979335a2cSVineet Gupta "SLC\t\t: %uK, %uB Line%s\n", 6079335a2cSVineet Gupta p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); 61d1f317d8SVineet Gupta 62711c1f26SVineet Gupta n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", 63711c1f26SVineet Gupta perip_base, 64711c1f26SVineet Gupta IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); 65f2b0b25aSAlexey Brodkin 668362c389SVineet Gupta return buf; 678362c389SVineet Gupta } 688362c389SVineet Gupta 698362c389SVineet Gupta /* 708362c389SVineet Gupta * Read the Cache Build Confuration Registers, Decode them and save into 718362c389SVineet Gupta * the cpuinfo structure for later use. 728362c389SVineet Gupta * No Validation done here, simply read/convert the BCRs 738362c389SVineet Gupta */ 74fd0881a2SVineet Gupta static void read_decode_cache_bcr_arcv2(int cpu) 758362c389SVineet Gupta { 76fd0881a2SVineet Gupta struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc; 77d1f317d8SVineet Gupta struct bcr_generic sbcr; 78d1f317d8SVineet Gupta 79d1f317d8SVineet Gupta struct bcr_slc_cfg { 80d1f317d8SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN 81d1f317d8SVineet Gupta unsigned int pad:24, way:2, lsz:2, sz:4; 82d1f317d8SVineet Gupta #else 83d1f317d8SVineet Gupta unsigned int sz:4, lsz:2, way:2, pad:24; 84d1f317d8SVineet Gupta #endif 85d1f317d8SVineet Gupta } slc_cfg; 86d1f317d8SVineet Gupta 87f2b0b25aSAlexey Brodkin struct bcr_clust_cfg { 88f2b0b25aSAlexey Brodkin #ifdef CONFIG_CPU_BIG_ENDIAN 89f2b0b25aSAlexey Brodkin unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; 90f2b0b25aSAlexey Brodkin #else 91f2b0b25aSAlexey Brodkin unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; 92f2b0b25aSAlexey Brodkin #endif 93f2b0b25aSAlexey Brodkin } cbcr; 94f2b0b25aSAlexey Brodkin 9526c01c49SVineet Gupta struct bcr_volatile { 9626c01c49SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN 9726c01c49SVineet Gupta unsigned int start:4, limit:4, pad:22, order:1, disable:1; 9826c01c49SVineet Gupta #else 9926c01c49SVineet Gupta unsigned int disable:1, order:1, pad:22, limit:4, start:4; 10026c01c49SVineet Gupta #endif 10126c01c49SVineet Gupta } vol; 10226c01c49SVineet Gupta 10326c01c49SVineet Gupta 104fd0881a2SVineet Gupta READ_BCR(ARC_REG_SLC_BCR, sbcr); 105fd0881a2SVineet Gupta if (sbcr.ver) { 106fd0881a2SVineet Gupta READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 107fd0881a2SVineet Gupta p_slc->ver = sbcr.ver; 108fd0881a2SVineet Gupta p_slc->sz_k = 128 << slc_cfg.sz; 109fd0881a2SVineet Gupta l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 110fd0881a2SVineet Gupta } 111fd0881a2SVineet Gupta 112fd0881a2SVineet Gupta READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 113cf986d47SVineet Gupta if (cbcr.c) 114fd0881a2SVineet Gupta ioc_exists = 1; 115cf986d47SVineet Gupta else 116cf986d47SVineet Gupta ioc_enable = 0; 117deaf7565SVineet Gupta 11826c01c49SVineet Gupta /* HS 2.0 didn't have AUX_VOL */ 11926c01c49SVineet Gupta if (cpuinfo_arc700[cpu].core.family > 0x51) { 12026c01c49SVineet Gupta READ_BCR(AUX_VOL, vol); 12126c01c49SVineet Gupta perip_base = vol.start << 28; 12226c01c49SVineet Gupta /* HS 3.0 has limit and strict-ordering fields */ 12326c01c49SVineet Gupta if (cpuinfo_arc700[cpu].core.family > 0x52) 12426c01c49SVineet Gupta perip_end = (vol.limit << 28) - 1; 12526c01c49SVineet Gupta } 126fd0881a2SVineet Gupta } 127fd0881a2SVineet Gupta 128fd0881a2SVineet Gupta void read_decode_cache_bcr(void) 129fd0881a2SVineet Gupta { 130fd0881a2SVineet Gupta struct cpuinfo_arc_cache *p_ic, *p_dc; 131fd0881a2SVineet Gupta unsigned int cpu = smp_processor_id(); 132fd0881a2SVineet Gupta struct bcr_cache { 133fd0881a2SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN 134fd0881a2SVineet Gupta unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 135fd0881a2SVineet Gupta #else 136fd0881a2SVineet Gupta unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 137fd0881a2SVineet Gupta #endif 138fd0881a2SVineet Gupta } ibcr, dbcr; 139fd0881a2SVineet Gupta 1408362c389SVineet Gupta p_ic = &cpuinfo_arc700[cpu].icache; 1418362c389SVineet Gupta READ_BCR(ARC_REG_IC_BCR, ibcr); 1428362c389SVineet Gupta 1438362c389SVineet Gupta if (!ibcr.ver) 1448362c389SVineet Gupta goto dc_chk; 1458362c389SVineet Gupta 146d1f317d8SVineet Gupta if (ibcr.ver <= 3) { 1478362c389SVineet Gupta BUG_ON(ibcr.config != 3); 1488362c389SVineet Gupta p_ic->assoc = 2; /* Fixed to 2w set assoc */ 149d1f317d8SVineet Gupta } else if (ibcr.ver >= 4) { 150d1f317d8SVineet Gupta p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ 151d1f317d8SVineet Gupta } 152d1f317d8SVineet Gupta 1538362c389SVineet Gupta p_ic->line_len = 8 << ibcr.line_len; 1548362c389SVineet Gupta p_ic->sz_k = 1 << (ibcr.sz - 1); 1558362c389SVineet Gupta p_ic->ver = ibcr.ver; 1568362c389SVineet Gupta p_ic->vipt = 1; 1578362c389SVineet Gupta p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; 1588362c389SVineet Gupta 1598362c389SVineet Gupta dc_chk: 1608362c389SVineet Gupta p_dc = &cpuinfo_arc700[cpu].dcache; 1618362c389SVineet Gupta READ_BCR(ARC_REG_DC_BCR, dbcr); 1628362c389SVineet Gupta 1638362c389SVineet Gupta if (!dbcr.ver) 164d1f317d8SVineet Gupta goto slc_chk; 1658362c389SVineet Gupta 166d1f317d8SVineet Gupta if (dbcr.ver <= 3) { 1678362c389SVineet Gupta BUG_ON(dbcr.config != 2); 1688362c389SVineet Gupta p_dc->assoc = 4; /* Fixed to 4w set assoc */ 169d1f317d8SVineet Gupta p_dc->vipt = 1; 170d1f317d8SVineet Gupta p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; 171d1f317d8SVineet Gupta } else if (dbcr.ver >= 4) { 172d1f317d8SVineet Gupta p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ 173d1f317d8SVineet Gupta p_dc->vipt = 0; 174d1f317d8SVineet Gupta p_dc->alias = 0; /* PIPT so can't VIPT alias */ 175d1f317d8SVineet Gupta } 176d1f317d8SVineet Gupta 1778362c389SVineet Gupta p_dc->line_len = 16 << dbcr.line_len; 1788362c389SVineet Gupta p_dc->sz_k = 1 << (dbcr.sz - 1); 1798362c389SVineet Gupta p_dc->ver = dbcr.ver; 180d1f317d8SVineet Gupta 181d1f317d8SVineet Gupta slc_chk: 182fd0881a2SVineet Gupta if (is_isa_arcv2()) 183fd0881a2SVineet Gupta read_decode_cache_bcr_arcv2(cpu); 1848362c389SVineet Gupta } 1858362c389SVineet Gupta 1868362c389SVineet Gupta /* 1878ea2ddffSVineet Gupta * Line Operation on {I,D}-Cache 1888362c389SVineet Gupta */ 1898362c389SVineet Gupta 1908362c389SVineet Gupta #define OP_INV 0x1 1918362c389SVineet Gupta #define OP_FLUSH 0x2 1928362c389SVineet Gupta #define OP_FLUSH_N_INV 0x3 1938362c389SVineet Gupta #define OP_INV_IC 0x4 1948362c389SVineet Gupta 1958362c389SVineet Gupta /* 1968ea2ddffSVineet Gupta * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) 1978ea2ddffSVineet Gupta * 1988ea2ddffSVineet Gupta * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. 1998ea2ddffSVineet Gupta * The orig Cache Management Module "CDU" only required paddr to invalidate a 2008ea2ddffSVineet Gupta * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. 2018ea2ddffSVineet Gupta * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching 2028ea2ddffSVineet Gupta * the exact same line. 2038ea2ddffSVineet Gupta * 2048ea2ddffSVineet Gupta * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, 2058ea2ddffSVineet Gupta * paddr alone could not be used to correctly index the cache. 2068ea2ddffSVineet Gupta * 2078ea2ddffSVineet Gupta * ------------------ 2088ea2ddffSVineet Gupta * MMU v1/v2 (Fixed Page Size 8k) 2098ea2ddffSVineet Gupta * ------------------ 2108ea2ddffSVineet Gupta * The solution was to provide CDU with these additonal vaddr bits. These 2118ea2ddffSVineet Gupta * would be bits [x:13], x would depend on cache-geometry, 13 comes from 2128ea2ddffSVineet Gupta * standard page size of 8k. 2138ea2ddffSVineet Gupta * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits 2148ea2ddffSVineet Gupta * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the 2158ea2ddffSVineet Gupta * orig 5 bits of paddr were anyways ignored by CDU line ops, as they 2168ea2ddffSVineet Gupta * represent the offset within cache-line. The adv of using this "clumsy" 2178ea2ddffSVineet Gupta * interface for additional info was no new reg was needed in CDU programming 2188ea2ddffSVineet Gupta * model. 2198ea2ddffSVineet Gupta * 2208ea2ddffSVineet Gupta * 17:13 represented the max num of bits passable, actual bits needed were 2218ea2ddffSVineet Gupta * fewer, based on the num-of-aliases possible. 2228ea2ddffSVineet Gupta * -for 2 alias possibility, only bit 13 needed (32K cache) 2238ea2ddffSVineet Gupta * -for 4 alias possibility, bits 14:13 needed (64K cache) 2248ea2ddffSVineet Gupta * 2258ea2ddffSVineet Gupta * ------------------ 2268ea2ddffSVineet Gupta * MMU v3 2278ea2ddffSVineet Gupta * ------------------ 2288ea2ddffSVineet Gupta * This ver of MMU supports variable page sizes (1k-16k): although Linux will 2298ea2ddffSVineet Gupta * only support 8k (default), 16k and 4k. 2302547476aSAndrea Gelmini * However from hardware perspective, smaller page sizes aggravate aliasing 2318ea2ddffSVineet Gupta * meaning more vaddr bits needed to disambiguate the cache-line-op ; 2328ea2ddffSVineet Gupta * the existing scheme of piggybacking won't work for certain configurations. 2338ea2ddffSVineet Gupta * Two new registers IC_PTAG and DC_PTAG inttoduced. 2348ea2ddffSVineet Gupta * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs 2358362c389SVineet Gupta */ 2368ea2ddffSVineet Gupta 23711e14896SVineet Gupta static inline 23828b4af72SVineet Gupta void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, 2398ea2ddffSVineet Gupta unsigned long sz, const int op) 2408362c389SVineet Gupta { 24111e14896SVineet Gupta unsigned int aux_cmd; 2428362c389SVineet Gupta int num_lines; 24311e14896SVineet Gupta const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 2448362c389SVineet Gupta 2458ea2ddffSVineet Gupta if (op == OP_INV_IC) { 2468362c389SVineet Gupta aux_cmd = ARC_REG_IC_IVIL; 24711e14896SVineet Gupta } else { 2488362c389SVineet Gupta /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 2498ea2ddffSVineet Gupta aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 2508362c389SVineet Gupta } 2518362c389SVineet Gupta 2528362c389SVineet Gupta /* Ensure we properly floor/ceil the non-line aligned/sized requests 2538362c389SVineet Gupta * and have @paddr - aligned to cache line and integral @num_lines. 2548362c389SVineet Gupta * This however can be avoided for page sized since: 2558362c389SVineet Gupta * -@paddr will be cache-line aligned already (being page aligned) 2568362c389SVineet Gupta * -@sz will be integral multiple of line size (being page sized). 2578362c389SVineet Gupta */ 25811e14896SVineet Gupta if (!full_page) { 2598362c389SVineet Gupta sz += paddr & ~CACHE_LINE_MASK; 2608362c389SVineet Gupta paddr &= CACHE_LINE_MASK; 2618362c389SVineet Gupta vaddr &= CACHE_LINE_MASK; 2628362c389SVineet Gupta } 2638362c389SVineet Gupta 2648362c389SVineet Gupta num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 2658362c389SVineet Gupta 2668362c389SVineet Gupta /* MMUv2 and before: paddr contains stuffed vaddrs bits */ 2678362c389SVineet Gupta paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 2688362c389SVineet Gupta 2698362c389SVineet Gupta while (num_lines-- > 0) { 27011e14896SVineet Gupta write_aux_reg(aux_cmd, paddr); 27111e14896SVineet Gupta paddr += L1_CACHE_BYTES; 27211e14896SVineet Gupta } 27311e14896SVineet Gupta } 27411e14896SVineet Gupta 2755a364c2aSVineet Gupta /* 2765a364c2aSVineet Gupta * For ARC700 MMUv3 I-cache and D-cache flushes 2775a364c2aSVineet Gupta * Also reused for HS38 aliasing I-cache configuration 2785a364c2aSVineet Gupta */ 27911e14896SVineet Gupta static inline 28028b4af72SVineet Gupta void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, 28111e14896SVineet Gupta unsigned long sz, const int op) 28211e14896SVineet Gupta { 28311e14896SVineet Gupta unsigned int aux_cmd, aux_tag; 28411e14896SVineet Gupta int num_lines; 28511e14896SVineet Gupta const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 28611e14896SVineet Gupta 28711e14896SVineet Gupta if (op == OP_INV_IC) { 28811e14896SVineet Gupta aux_cmd = ARC_REG_IC_IVIL; 28911e14896SVineet Gupta aux_tag = ARC_REG_IC_PTAG; 29011e14896SVineet Gupta } else { 29111e14896SVineet Gupta aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 29211e14896SVineet Gupta aux_tag = ARC_REG_DC_PTAG; 29311e14896SVineet Gupta } 29411e14896SVineet Gupta 29511e14896SVineet Gupta /* Ensure we properly floor/ceil the non-line aligned/sized requests 29611e14896SVineet Gupta * and have @paddr - aligned to cache line and integral @num_lines. 29711e14896SVineet Gupta * This however can be avoided for page sized since: 29811e14896SVineet Gupta * -@paddr will be cache-line aligned already (being page aligned) 29911e14896SVineet Gupta * -@sz will be integral multiple of line size (being page sized). 30011e14896SVineet Gupta */ 30111e14896SVineet Gupta if (!full_page) { 30211e14896SVineet Gupta sz += paddr & ~CACHE_LINE_MASK; 30311e14896SVineet Gupta paddr &= CACHE_LINE_MASK; 30411e14896SVineet Gupta vaddr &= CACHE_LINE_MASK; 30511e14896SVineet Gupta } 30611e14896SVineet Gupta num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 30711e14896SVineet Gupta 30811e14896SVineet Gupta /* 30911e14896SVineet Gupta * MMUv3, cache ops require paddr in PTAG reg 31011e14896SVineet Gupta * if V-P const for loop, PTAG can be written once outside loop 31111e14896SVineet Gupta */ 31211e14896SVineet Gupta if (full_page) 31311e14896SVineet Gupta write_aux_reg(aux_tag, paddr); 31411e14896SVineet Gupta 3155a364c2aSVineet Gupta /* 3165a364c2aSVineet Gupta * This is technically for MMU v4, using the MMU v3 programming model 3172547476aSAndrea Gelmini * Special work for HS38 aliasing I-cache configuration with PAE40 3185a364c2aSVineet Gupta * - upper 8 bits of paddr need to be written into PTAG_HI 3195a364c2aSVineet Gupta * - (and needs to be written before the lower 32 bits) 3205a364c2aSVineet Gupta * Note that PTAG_HI is hoisted outside the line loop 3215a364c2aSVineet Gupta */ 3225a364c2aSVineet Gupta if (is_pae40_enabled() && op == OP_INV_IC) 3235a364c2aSVineet Gupta write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); 3245a364c2aSVineet Gupta 32511e14896SVineet Gupta while (num_lines-- > 0) { 32611e14896SVineet Gupta if (!full_page) { 3278362c389SVineet Gupta write_aux_reg(aux_tag, paddr); 3288362c389SVineet Gupta paddr += L1_CACHE_BYTES; 3298362c389SVineet Gupta } 3308362c389SVineet Gupta 3318362c389SVineet Gupta write_aux_reg(aux_cmd, vaddr); 3328362c389SVineet Gupta vaddr += L1_CACHE_BYTES; 33311e14896SVineet Gupta } 33411e14896SVineet Gupta } 33511e14896SVineet Gupta 336d1f317d8SVineet Gupta /* 3375a364c2aSVineet Gupta * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT 3385a364c2aSVineet Gupta * Here's how cache ops are implemented 339d1f317d8SVineet Gupta * 3405a364c2aSVineet Gupta * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL) 3415a364c2aSVineet Gupta * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL) 3425a364c2aSVineet Gupta * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG 3435a364c2aSVineet Gupta * respectively, similar to MMU v3 programming model, hence 3445a364c2aSVineet Gupta * __cache_line_loop_v3() is used) 3455a364c2aSVineet Gupta * 3465a364c2aSVineet Gupta * If PAE40 is enabled, independent of aliasing considerations, the higher bits 3475a364c2aSVineet Gupta * needs to be written into PTAG_HI 348d1f317d8SVineet Gupta */ 349d1f317d8SVineet Gupta static inline 35028b4af72SVineet Gupta void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, 351d1f317d8SVineet Gupta unsigned long sz, const int cacheop) 352d1f317d8SVineet Gupta { 353d1f317d8SVineet Gupta unsigned int aux_cmd; 354d1f317d8SVineet Gupta int num_lines; 355d1f317d8SVineet Gupta const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; 356d1f317d8SVineet Gupta 357d1f317d8SVineet Gupta if (cacheop == OP_INV_IC) { 358d1f317d8SVineet Gupta aux_cmd = ARC_REG_IC_IVIL; 359d1f317d8SVineet Gupta } else { 360d1f317d8SVineet Gupta /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 361d1f317d8SVineet Gupta aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 362d1f317d8SVineet Gupta } 363d1f317d8SVineet Gupta 364d1f317d8SVineet Gupta /* Ensure we properly floor/ceil the non-line aligned/sized requests 365d1f317d8SVineet Gupta * and have @paddr - aligned to cache line and integral @num_lines. 366d1f317d8SVineet Gupta * This however can be avoided for page sized since: 367d1f317d8SVineet Gupta * -@paddr will be cache-line aligned already (being page aligned) 368d1f317d8SVineet Gupta * -@sz will be integral multiple of line size (being page sized). 369d1f317d8SVineet Gupta */ 370d1f317d8SVineet Gupta if (!full_page_op) { 371d1f317d8SVineet Gupta sz += paddr & ~CACHE_LINE_MASK; 372d1f317d8SVineet Gupta paddr &= CACHE_LINE_MASK; 373d1f317d8SVineet Gupta } 374d1f317d8SVineet Gupta 375d1f317d8SVineet Gupta num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 376d1f317d8SVineet Gupta 3775a364c2aSVineet Gupta /* 3785a364c2aSVineet Gupta * For HS38 PAE40 configuration 3795a364c2aSVineet Gupta * - upper 8 bits of paddr need to be written into PTAG_HI 3805a364c2aSVineet Gupta * - (and needs to be written before the lower 32 bits) 3815a364c2aSVineet Gupta */ 3825a364c2aSVineet Gupta if (is_pae40_enabled()) { 3835a364c2aSVineet Gupta if (cacheop == OP_INV_IC) 3845a364c2aSVineet Gupta /* 3855a364c2aSVineet Gupta * Non aliasing I-cache in HS38, 3865a364c2aSVineet Gupta * aliasing I-cache handled in __cache_line_loop_v3() 3875a364c2aSVineet Gupta */ 3885a364c2aSVineet Gupta write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); 3895a364c2aSVineet Gupta else 3905a364c2aSVineet Gupta write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); 3915a364c2aSVineet Gupta } 3925a364c2aSVineet Gupta 393d1f317d8SVineet Gupta while (num_lines-- > 0) { 394d1f317d8SVineet Gupta write_aux_reg(aux_cmd, paddr); 395d1f317d8SVineet Gupta paddr += L1_CACHE_BYTES; 396d1f317d8SVineet Gupta } 397d1f317d8SVineet Gupta } 398d1f317d8SVineet Gupta 39911e14896SVineet Gupta #if (CONFIG_ARC_MMU_VER < 3) 40011e14896SVineet Gupta #define __cache_line_loop __cache_line_loop_v2 40111e14896SVineet Gupta #elif (CONFIG_ARC_MMU_VER == 3) 40211e14896SVineet Gupta #define __cache_line_loop __cache_line_loop_v3 403d1f317d8SVineet Gupta #elif (CONFIG_ARC_MMU_VER > 3) 404d1f317d8SVineet Gupta #define __cache_line_loop __cache_line_loop_v4 4058362c389SVineet Gupta #endif 4068362c389SVineet Gupta 4078362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_DCACHE 4088362c389SVineet Gupta 4098362c389SVineet Gupta /*************************************************************** 4108362c389SVineet Gupta * Machine specific helpers for Entire D-Cache or Per Line ops 4118362c389SVineet Gupta */ 4128362c389SVineet Gupta 4136c310681SVineet Gupta static inline void __before_dc_op(const int op) 4148362c389SVineet Gupta { 4158362c389SVineet Gupta if (op == OP_FLUSH_N_INV) { 4168362c389SVineet Gupta /* Dcache provides 2 cmd: FLUSH or INV 4178362c389SVineet Gupta * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE 4188362c389SVineet Gupta * flush-n-inv is achieved by INV cmd but with IM=1 4198362c389SVineet Gupta * So toggle INV sub-mode depending on op request and default 4208362c389SVineet Gupta */ 4216c310681SVineet Gupta const unsigned int ctl = ARC_REG_DC_CTRL; 4226c310681SVineet Gupta write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); 4236c310681SVineet Gupta } 4248362c389SVineet Gupta } 4258362c389SVineet Gupta 4266c310681SVineet Gupta static inline void __after_dc_op(const int op) 4278362c389SVineet Gupta { 4286c310681SVineet Gupta if (op & OP_FLUSH) { 4296c310681SVineet Gupta const unsigned int ctl = ARC_REG_DC_CTRL; 4306c310681SVineet Gupta unsigned int reg; 4316c310681SVineet Gupta 4326c310681SVineet Gupta /* flush / flush-n-inv both wait */ 4336c310681SVineet Gupta while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) 4346c310681SVineet Gupta ; 4358362c389SVineet Gupta 4368362c389SVineet Gupta /* Switch back to default Invalidate mode */ 4378362c389SVineet Gupta if (op == OP_FLUSH_N_INV) 4386c310681SVineet Gupta write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); 4396c310681SVineet Gupta } 4408362c389SVineet Gupta } 4418362c389SVineet Gupta 4428362c389SVineet Gupta /* 4438362c389SVineet Gupta * Operation on Entire D-Cache 4448ea2ddffSVineet Gupta * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} 4458362c389SVineet Gupta * Note that constant propagation ensures all the checks are gone 4468362c389SVineet Gupta * in generated code 4478362c389SVineet Gupta */ 4488ea2ddffSVineet Gupta static inline void __dc_entire_op(const int op) 4498362c389SVineet Gupta { 4508362c389SVineet Gupta int aux; 4518362c389SVineet Gupta 4526c310681SVineet Gupta __before_dc_op(op); 4538362c389SVineet Gupta 4548ea2ddffSVineet Gupta if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 4558362c389SVineet Gupta aux = ARC_REG_DC_IVDC; 4568362c389SVineet Gupta else 4578362c389SVineet Gupta aux = ARC_REG_DC_FLSH; 4588362c389SVineet Gupta 4598362c389SVineet Gupta write_aux_reg(aux, 0x1); 4608362c389SVineet Gupta 4616c310681SVineet Gupta __after_dc_op(op); 4628362c389SVineet Gupta } 4638362c389SVineet Gupta 4648362c389SVineet Gupta /* For kernel mappings cache operation: index is same as paddr */ 4658362c389SVineet Gupta #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 4668362c389SVineet Gupta 4678362c389SVineet Gupta /* 4688ea2ddffSVineet Gupta * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) 4698362c389SVineet Gupta */ 47028b4af72SVineet Gupta static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, 4718ea2ddffSVineet Gupta unsigned long sz, const int op) 4728362c389SVineet Gupta { 4738362c389SVineet Gupta unsigned long flags; 4748362c389SVineet Gupta 4758362c389SVineet Gupta local_irq_save(flags); 4768362c389SVineet Gupta 4776c310681SVineet Gupta __before_dc_op(op); 4788362c389SVineet Gupta 4798ea2ddffSVineet Gupta __cache_line_loop(paddr, vaddr, sz, op); 4808362c389SVineet Gupta 4816c310681SVineet Gupta __after_dc_op(op); 4828362c389SVineet Gupta 4838362c389SVineet Gupta local_irq_restore(flags); 4848362c389SVineet Gupta } 4858362c389SVineet Gupta 4868362c389SVineet Gupta #else 4878362c389SVineet Gupta 4888ea2ddffSVineet Gupta #define __dc_entire_op(op) 4898ea2ddffSVineet Gupta #define __dc_line_op(paddr, vaddr, sz, op) 4908ea2ddffSVineet Gupta #define __dc_line_op_k(paddr, sz, op) 4918362c389SVineet Gupta 4928362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_DCACHE */ 4938362c389SVineet Gupta 4948362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_ICACHE 4958362c389SVineet Gupta 4968362c389SVineet Gupta static inline void __ic_entire_inv(void) 4978362c389SVineet Gupta { 4988362c389SVineet Gupta write_aux_reg(ARC_REG_IC_IVIC, 1); 4998362c389SVineet Gupta read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 5008362c389SVineet Gupta } 5018362c389SVineet Gupta 5028362c389SVineet Gupta static inline void 50328b4af72SVineet Gupta __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, 5048362c389SVineet Gupta unsigned long sz) 5058362c389SVineet Gupta { 5068362c389SVineet Gupta unsigned long flags; 5078362c389SVineet Gupta 5088362c389SVineet Gupta local_irq_save(flags); 509bcc4d65aSVineet Gupta (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); 5108362c389SVineet Gupta local_irq_restore(flags); 5118362c389SVineet Gupta } 5128362c389SVineet Gupta 5138362c389SVineet Gupta #ifndef CONFIG_SMP 5148362c389SVineet Gupta 5158362c389SVineet Gupta #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) 5168362c389SVineet Gupta 5178362c389SVineet Gupta #else 5188362c389SVineet Gupta 5198362c389SVineet Gupta struct ic_inv_args { 52028b4af72SVineet Gupta phys_addr_t paddr, vaddr; 5218362c389SVineet Gupta int sz; 5228362c389SVineet Gupta }; 5238362c389SVineet Gupta 5248362c389SVineet Gupta static void __ic_line_inv_vaddr_helper(void *info) 5258362c389SVineet Gupta { 5268362c389SVineet Gupta struct ic_inv_args *ic_inv = info; 5278362c389SVineet Gupta 5288362c389SVineet Gupta __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 5298362c389SVineet Gupta } 5308362c389SVineet Gupta 53128b4af72SVineet Gupta static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, 5328362c389SVineet Gupta unsigned long sz) 5338362c389SVineet Gupta { 5348362c389SVineet Gupta struct ic_inv_args ic_inv = { 5358362c389SVineet Gupta .paddr = paddr, 5368362c389SVineet Gupta .vaddr = vaddr, 5378362c389SVineet Gupta .sz = sz 5388362c389SVineet Gupta }; 5398362c389SVineet Gupta 5408362c389SVineet Gupta on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); 5418362c389SVineet Gupta } 5428362c389SVineet Gupta 5438362c389SVineet Gupta #endif /* CONFIG_SMP */ 5448362c389SVineet Gupta 5458362c389SVineet Gupta #else /* !CONFIG_ARC_HAS_ICACHE */ 5468362c389SVineet Gupta 5478362c389SVineet Gupta #define __ic_entire_inv() 5488362c389SVineet Gupta #define __ic_line_inv_vaddr(pstart, vstart, sz) 5498362c389SVineet Gupta 5508362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_ICACHE */ 5518362c389SVineet Gupta 55228b4af72SVineet Gupta noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) 553795f4558SVineet Gupta { 554795f4558SVineet Gupta #ifdef CONFIG_ISA_ARCV2 555b607edddSAlexey Brodkin /* 556b607edddSAlexey Brodkin * SLC is shared between all cores and concurrent aux operations from 557b607edddSAlexey Brodkin * multiple cores need to be serialized using a spinlock 558b607edddSAlexey Brodkin * A concurrent operation can be silently ignored and/or the old/new 559b607edddSAlexey Brodkin * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop 560b607edddSAlexey Brodkin * below) 561b607edddSAlexey Brodkin */ 562b607edddSAlexey Brodkin static DEFINE_SPINLOCK(lock); 563795f4558SVineet Gupta unsigned long flags; 564795f4558SVineet Gupta unsigned int ctrl; 565795f4558SVineet Gupta 566b607edddSAlexey Brodkin spin_lock_irqsave(&lock, flags); 567795f4558SVineet Gupta 568795f4558SVineet Gupta /* 569795f4558SVineet Gupta * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 570795f4558SVineet Gupta * - b'000 (default) is Flush, 571795f4558SVineet Gupta * - b'001 is Invalidate if CTRL.IM == 0 572795f4558SVineet Gupta * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 573795f4558SVineet Gupta */ 574795f4558SVineet Gupta ctrl = read_aux_reg(ARC_REG_SLC_CTRL); 575795f4558SVineet Gupta 576795f4558SVineet Gupta /* Don't rely on default value of IM bit */ 577795f4558SVineet Gupta if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 578795f4558SVineet Gupta ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 579795f4558SVineet Gupta else 580795f4558SVineet Gupta ctrl |= SLC_CTRL_IM; 581795f4558SVineet Gupta 582795f4558SVineet Gupta if (op & OP_INV) 583795f4558SVineet Gupta ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ 584795f4558SVineet Gupta else 585795f4558SVineet Gupta ctrl &= ~SLC_CTRL_RGN_OP_INV; 586795f4558SVineet Gupta 587795f4558SVineet Gupta write_aux_reg(ARC_REG_SLC_CTRL, ctrl); 588795f4558SVineet Gupta 589795f4558SVineet Gupta /* 590795f4558SVineet Gupta * Lower bits are ignored, no need to clip 591795f4558SVineet Gupta * END needs to be setup before START (latter triggers the operation) 592795f4558SVineet Gupta * END can't be same as START, so add (l2_line_sz - 1) to sz 593795f4558SVineet Gupta */ 594795f4558SVineet Gupta write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); 595795f4558SVineet Gupta write_aux_reg(ARC_REG_SLC_RGN_START, paddr); 596795f4558SVineet Gupta 597795f4558SVineet Gupta while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 598795f4558SVineet Gupta 599b607edddSAlexey Brodkin spin_unlock_irqrestore(&lock, flags); 600795f4558SVineet Gupta #endif 601795f4558SVineet Gupta } 602795f4558SVineet Gupta 6038362c389SVineet Gupta /*********************************************************** 6048362c389SVineet Gupta * Exported APIs 6058362c389SVineet Gupta */ 6068362c389SVineet Gupta 6078362c389SVineet Gupta /* 6088362c389SVineet Gupta * Handle cache congruency of kernel and userspace mappings of page when kernel 6098362c389SVineet Gupta * writes-to/reads-from 6108362c389SVineet Gupta * 6118362c389SVineet Gupta * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 6128362c389SVineet Gupta * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 6138362c389SVineet Gupta * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 6148362c389SVineet Gupta * -In SMP, if hardware caches are coherent 6158362c389SVineet Gupta * 6168362c389SVineet Gupta * There's a corollary case, where kernel READs from a userspace mapped page. 6178362c389SVineet Gupta * If the U-mapping is not congruent to to K-mapping, former needs flushing. 6188362c389SVineet Gupta */ 6198362c389SVineet Gupta void flush_dcache_page(struct page *page) 6208362c389SVineet Gupta { 6218362c389SVineet Gupta struct address_space *mapping; 6228362c389SVineet Gupta 6238362c389SVineet Gupta if (!cache_is_vipt_aliasing()) { 6248362c389SVineet Gupta clear_bit(PG_dc_clean, &page->flags); 6258362c389SVineet Gupta return; 6268362c389SVineet Gupta } 6278362c389SVineet Gupta 6288362c389SVineet Gupta /* don't handle anon pages here */ 6298362c389SVineet Gupta mapping = page_mapping(page); 6308362c389SVineet Gupta if (!mapping) 6318362c389SVineet Gupta return; 6328362c389SVineet Gupta 6338362c389SVineet Gupta /* 6348362c389SVineet Gupta * pagecache page, file not yet mapped to userspace 6358362c389SVineet Gupta * Make a note that K-mapping is dirty 6368362c389SVineet Gupta */ 6378362c389SVineet Gupta if (!mapping_mapped(mapping)) { 6388362c389SVineet Gupta clear_bit(PG_dc_clean, &page->flags); 639e1534ae9SKirill A. Shutemov } else if (page_mapcount(page)) { 6408362c389SVineet Gupta 6418362c389SVineet Gupta /* kernel reading from page with U-mapping */ 64228b4af72SVineet Gupta phys_addr_t paddr = (unsigned long)page_address(page); 64309cbfeafSKirill A. Shutemov unsigned long vaddr = page->index << PAGE_SHIFT; 6448362c389SVineet Gupta 6458362c389SVineet Gupta if (addr_not_cache_congruent(paddr, vaddr)) 6468362c389SVineet Gupta __flush_dcache_page(paddr, vaddr); 6478362c389SVineet Gupta } 6488362c389SVineet Gupta } 6498362c389SVineet Gupta EXPORT_SYMBOL(flush_dcache_page); 6508362c389SVineet Gupta 651f2b0b25aSAlexey Brodkin /* 652f2b0b25aSAlexey Brodkin * DMA ops for systems with L1 cache only 653f2b0b25aSAlexey Brodkin * Make memory coherent with L1 cache by flushing/invalidating L1 lines 654f2b0b25aSAlexey Brodkin */ 655f5db19e9SVineet Gupta static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz) 6568362c389SVineet Gupta { 6578362c389SVineet Gupta __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 658f2b0b25aSAlexey Brodkin } 659795f4558SVineet Gupta 660f5db19e9SVineet Gupta static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz) 661f2b0b25aSAlexey Brodkin { 662f2b0b25aSAlexey Brodkin __dc_line_op_k(start, sz, OP_INV); 663f2b0b25aSAlexey Brodkin } 664f2b0b25aSAlexey Brodkin 665f5db19e9SVineet Gupta static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz) 666f2b0b25aSAlexey Brodkin { 667f2b0b25aSAlexey Brodkin __dc_line_op_k(start, sz, OP_FLUSH); 668f2b0b25aSAlexey Brodkin } 669f2b0b25aSAlexey Brodkin 670f2b0b25aSAlexey Brodkin /* 671f2b0b25aSAlexey Brodkin * DMA ops for systems with both L1 and L2 caches, but without IOC 6727423cc0cSAdam Buchbinder * Both L1 and L2 lines need to be explicitly flushed/invalidated 673f2b0b25aSAlexey Brodkin */ 674f5db19e9SVineet Gupta static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz) 675f2b0b25aSAlexey Brodkin { 676f2b0b25aSAlexey Brodkin __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 677795f4558SVineet Gupta slc_op(start, sz, OP_FLUSH_N_INV); 6788362c389SVineet Gupta } 679f2b0b25aSAlexey Brodkin 680f5db19e9SVineet Gupta static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz) 681f2b0b25aSAlexey Brodkin { 682f2b0b25aSAlexey Brodkin __dc_line_op_k(start, sz, OP_INV); 683f2b0b25aSAlexey Brodkin slc_op(start, sz, OP_INV); 684f2b0b25aSAlexey Brodkin } 685f2b0b25aSAlexey Brodkin 686f5db19e9SVineet Gupta static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) 687f2b0b25aSAlexey Brodkin { 688f2b0b25aSAlexey Brodkin __dc_line_op_k(start, sz, OP_FLUSH); 689f2b0b25aSAlexey Brodkin slc_op(start, sz, OP_FLUSH); 690f2b0b25aSAlexey Brodkin } 691f2b0b25aSAlexey Brodkin 692f2b0b25aSAlexey Brodkin /* 693f2b0b25aSAlexey Brodkin * DMA ops for systems with IOC 694f2b0b25aSAlexey Brodkin * IOC hardware snoops all DMA traffic keeping the caches consistent with 695f2b0b25aSAlexey Brodkin * memory - eliding need for any explicit cache maintenance of DMA buffers 696f2b0b25aSAlexey Brodkin */ 697f5db19e9SVineet Gupta static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} 698f5db19e9SVineet Gupta static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} 699f5db19e9SVineet Gupta static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} 700f2b0b25aSAlexey Brodkin 701f2b0b25aSAlexey Brodkin /* 702f2b0b25aSAlexey Brodkin * Exported DMA API 703f2b0b25aSAlexey Brodkin */ 704f5db19e9SVineet Gupta void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) 705f2b0b25aSAlexey Brodkin { 706f2b0b25aSAlexey Brodkin __dma_cache_wback_inv(start, sz); 707f2b0b25aSAlexey Brodkin } 7088362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback_inv); 7098362c389SVineet Gupta 710f5db19e9SVineet Gupta void dma_cache_inv(phys_addr_t start, unsigned long sz) 7118362c389SVineet Gupta { 712f2b0b25aSAlexey Brodkin __dma_cache_inv(start, sz); 7138362c389SVineet Gupta } 7148362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_inv); 7158362c389SVineet Gupta 716f5db19e9SVineet Gupta void dma_cache_wback(phys_addr_t start, unsigned long sz) 7178362c389SVineet Gupta { 718f2b0b25aSAlexey Brodkin __dma_cache_wback(start, sz); 7198362c389SVineet Gupta } 7208362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback); 7218362c389SVineet Gupta 7228362c389SVineet Gupta /* 7238362c389SVineet Gupta * This is API for making I/D Caches consistent when modifying 7248362c389SVineet Gupta * kernel code (loadable modules, kprobes, kgdb...) 7258362c389SVineet Gupta * This is called on insmod, with kernel virtual address for CODE of 7268362c389SVineet Gupta * the module. ARC cache maintenance ops require PHY address thus we 7278362c389SVineet Gupta * need to convert vmalloc addr to PHY addr 7288362c389SVineet Gupta */ 7298362c389SVineet Gupta void flush_icache_range(unsigned long kstart, unsigned long kend) 7308362c389SVineet Gupta { 7318362c389SVineet Gupta unsigned int tot_sz; 7328362c389SVineet Gupta 7338362c389SVineet Gupta WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); 7348362c389SVineet Gupta 7358362c389SVineet Gupta /* Shortcut for bigger flush ranges. 7368362c389SVineet Gupta * Here we don't care if this was kernel virtual or phy addr 7378362c389SVineet Gupta */ 7388362c389SVineet Gupta tot_sz = kend - kstart; 7398362c389SVineet Gupta if (tot_sz > PAGE_SIZE) { 7408362c389SVineet Gupta flush_cache_all(); 7418362c389SVineet Gupta return; 7428362c389SVineet Gupta } 7438362c389SVineet Gupta 7448362c389SVineet Gupta /* Case: Kernel Phy addr (0x8000_0000 onwards) */ 7458362c389SVineet Gupta if (likely(kstart > PAGE_OFFSET)) { 7468362c389SVineet Gupta /* 7478362c389SVineet Gupta * The 2nd arg despite being paddr will be used to index icache 7488362c389SVineet Gupta * This is OK since no alternate virtual mappings will exist 7498362c389SVineet Gupta * given the callers for this case: kprobe/kgdb in built-in 7508362c389SVineet Gupta * kernel code only. 7518362c389SVineet Gupta */ 7528362c389SVineet Gupta __sync_icache_dcache(kstart, kstart, kend - kstart); 7538362c389SVineet Gupta return; 7548362c389SVineet Gupta } 7558362c389SVineet Gupta 7568362c389SVineet Gupta /* 7578362c389SVineet Gupta * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) 7588362c389SVineet Gupta * (1) ARC Cache Maintenance ops only take Phy addr, hence special 7598362c389SVineet Gupta * handling of kernel vaddr. 7608362c389SVineet Gupta * 7618362c389SVineet Gupta * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), 7628362c389SVineet Gupta * it still needs to handle a 2 page scenario, where the range 7638362c389SVineet Gupta * straddles across 2 virtual pages and hence need for loop 7648362c389SVineet Gupta */ 7658362c389SVineet Gupta while (tot_sz > 0) { 7668362c389SVineet Gupta unsigned int off, sz; 7678362c389SVineet Gupta unsigned long phy, pfn; 7688362c389SVineet Gupta 7698362c389SVineet Gupta off = kstart % PAGE_SIZE; 7708362c389SVineet Gupta pfn = vmalloc_to_pfn((void *)kstart); 7718362c389SVineet Gupta phy = (pfn << PAGE_SHIFT) + off; 7728362c389SVineet Gupta sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); 7738362c389SVineet Gupta __sync_icache_dcache(phy, kstart, sz); 7748362c389SVineet Gupta kstart += sz; 7758362c389SVineet Gupta tot_sz -= sz; 7768362c389SVineet Gupta } 7778362c389SVineet Gupta } 7788362c389SVineet Gupta EXPORT_SYMBOL(flush_icache_range); 7798362c389SVineet Gupta 7808362c389SVineet Gupta /* 7818362c389SVineet Gupta * General purpose helper to make I and D cache lines consistent. 7828362c389SVineet Gupta * @paddr is phy addr of region 7838362c389SVineet Gupta * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) 7848362c389SVineet Gupta * However in one instance, when called by kprobe (for a breakpt in 7858362c389SVineet Gupta * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will 7868362c389SVineet Gupta * use a paddr to index the cache (despite VIPT). This is fine since since a 7878362c389SVineet Gupta * builtin kernel page will not have any virtual mappings. 7888362c389SVineet Gupta * kprobe on loadable module will be kernel vaddr. 7898362c389SVineet Gupta */ 79028b4af72SVineet Gupta void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) 7918362c389SVineet Gupta { 7928362c389SVineet Gupta __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 7938362c389SVineet Gupta __ic_line_inv_vaddr(paddr, vaddr, len); 7948362c389SVineet Gupta } 7958362c389SVineet Gupta 7968362c389SVineet Gupta /* wrapper to compile time eliminate alignment checks in flush loop */ 79728b4af72SVineet Gupta void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) 7988362c389SVineet Gupta { 7998362c389SVineet Gupta __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 8008362c389SVineet Gupta } 8018362c389SVineet Gupta 8028362c389SVineet Gupta /* 8038362c389SVineet Gupta * wrapper to clearout kernel or userspace mappings of a page 8048362c389SVineet Gupta * For kernel mappings @vaddr == @paddr 8058362c389SVineet Gupta */ 80628b4af72SVineet Gupta void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) 8078362c389SVineet Gupta { 8088362c389SVineet Gupta __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 8098362c389SVineet Gupta } 8108362c389SVineet Gupta 8118362c389SVineet Gupta noinline void flush_cache_all(void) 8128362c389SVineet Gupta { 8138362c389SVineet Gupta unsigned long flags; 8148362c389SVineet Gupta 8158362c389SVineet Gupta local_irq_save(flags); 8168362c389SVineet Gupta 8178362c389SVineet Gupta __ic_entire_inv(); 8188362c389SVineet Gupta __dc_entire_op(OP_FLUSH_N_INV); 8198362c389SVineet Gupta 8208362c389SVineet Gupta local_irq_restore(flags); 8218362c389SVineet Gupta 8228362c389SVineet Gupta } 8238362c389SVineet Gupta 8248362c389SVineet Gupta #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 8258362c389SVineet Gupta 8268362c389SVineet Gupta void flush_cache_mm(struct mm_struct *mm) 8278362c389SVineet Gupta { 8288362c389SVineet Gupta flush_cache_all(); 8298362c389SVineet Gupta } 8308362c389SVineet Gupta 8318362c389SVineet Gupta void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 8328362c389SVineet Gupta unsigned long pfn) 8338362c389SVineet Gupta { 8348362c389SVineet Gupta unsigned int paddr = pfn << PAGE_SHIFT; 8358362c389SVineet Gupta 8368362c389SVineet Gupta u_vaddr &= PAGE_MASK; 8378362c389SVineet Gupta 8388362c389SVineet Gupta __flush_dcache_page(paddr, u_vaddr); 8398362c389SVineet Gupta 8408362c389SVineet Gupta if (vma->vm_flags & VM_EXEC) 8418362c389SVineet Gupta __inv_icache_page(paddr, u_vaddr); 8428362c389SVineet Gupta } 8438362c389SVineet Gupta 8448362c389SVineet Gupta void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 8458362c389SVineet Gupta unsigned long end) 8468362c389SVineet Gupta { 8478362c389SVineet Gupta flush_cache_all(); 8488362c389SVineet Gupta } 8498362c389SVineet Gupta 8508362c389SVineet Gupta void flush_anon_page(struct vm_area_struct *vma, struct page *page, 8518362c389SVineet Gupta unsigned long u_vaddr) 8528362c389SVineet Gupta { 8538362c389SVineet Gupta /* TBD: do we really need to clear the kernel mapping */ 8548362c389SVineet Gupta __flush_dcache_page(page_address(page), u_vaddr); 8558362c389SVineet Gupta __flush_dcache_page(page_address(page), page_address(page)); 8568362c389SVineet Gupta 8578362c389SVineet Gupta } 8588362c389SVineet Gupta 8598362c389SVineet Gupta #endif 8608362c389SVineet Gupta 8618362c389SVineet Gupta void copy_user_highpage(struct page *to, struct page *from, 8628362c389SVineet Gupta unsigned long u_vaddr, struct vm_area_struct *vma) 8638362c389SVineet Gupta { 864336e2136SVineet Gupta void *kfrom = kmap_atomic(from); 865336e2136SVineet Gupta void *kto = kmap_atomic(to); 8668362c389SVineet Gupta int clean_src_k_mappings = 0; 8678362c389SVineet Gupta 8688362c389SVineet Gupta /* 8698362c389SVineet Gupta * If SRC page was already mapped in userspace AND it's U-mapping is 8708362c389SVineet Gupta * not congruent with K-mapping, sync former to physical page so that 8718362c389SVineet Gupta * K-mapping in memcpy below, sees the right data 8728362c389SVineet Gupta * 8738362c389SVineet Gupta * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 8748362c389SVineet Gupta * equally valid for SRC page as well 875336e2136SVineet Gupta * 876336e2136SVineet Gupta * For !VIPT cache, all of this gets compiled out as 877336e2136SVineet Gupta * addr_not_cache_congruent() is 0 8788362c389SVineet Gupta */ 879e1534ae9SKirill A. Shutemov if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 880336e2136SVineet Gupta __flush_dcache_page((unsigned long)kfrom, u_vaddr); 8818362c389SVineet Gupta clean_src_k_mappings = 1; 8828362c389SVineet Gupta } 8838362c389SVineet Gupta 884336e2136SVineet Gupta copy_page(kto, kfrom); 8858362c389SVineet Gupta 8868362c389SVineet Gupta /* 8878362c389SVineet Gupta * Mark DST page K-mapping as dirty for a later finalization by 8888362c389SVineet Gupta * update_mmu_cache(). Although the finalization could have been done 8898362c389SVineet Gupta * here as well (given that both vaddr/paddr are available). 8908362c389SVineet Gupta * But update_mmu_cache() already has code to do that for other 8918362c389SVineet Gupta * non copied user pages (e.g. read faults which wire in pagecache page 8928362c389SVineet Gupta * directly). 8938362c389SVineet Gupta */ 8948362c389SVineet Gupta clear_bit(PG_dc_clean, &to->flags); 8958362c389SVineet Gupta 8968362c389SVineet Gupta /* 8978362c389SVineet Gupta * if SRC was already usermapped and non-congruent to kernel mapping 8988362c389SVineet Gupta * sync the kernel mapping back to physical page 8998362c389SVineet Gupta */ 9008362c389SVineet Gupta if (clean_src_k_mappings) { 901336e2136SVineet Gupta __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom); 9028362c389SVineet Gupta set_bit(PG_dc_clean, &from->flags); 9038362c389SVineet Gupta } else { 9048362c389SVineet Gupta clear_bit(PG_dc_clean, &from->flags); 9058362c389SVineet Gupta } 906336e2136SVineet Gupta 907336e2136SVineet Gupta kunmap_atomic(kto); 908336e2136SVineet Gupta kunmap_atomic(kfrom); 9098362c389SVineet Gupta } 9108362c389SVineet Gupta 9118362c389SVineet Gupta void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 9128362c389SVineet Gupta { 9138362c389SVineet Gupta clear_page(to); 9148362c389SVineet Gupta clear_bit(PG_dc_clean, &page->flags); 9158362c389SVineet Gupta } 9168362c389SVineet Gupta 9178362c389SVineet Gupta 9188362c389SVineet Gupta /********************************************************************** 9198362c389SVineet Gupta * Explicit Cache flush request from user space via syscall 9208362c389SVineet Gupta * Needed for JITs which generate code on the fly 9218362c389SVineet Gupta */ 9228362c389SVineet Gupta SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) 9238362c389SVineet Gupta { 9248362c389SVineet Gupta /* TBD: optimize this */ 9258362c389SVineet Gupta flush_cache_all(); 9268362c389SVineet Gupta return 0; 9278362c389SVineet Gupta } 9288ea2ddffSVineet Gupta 9298ea2ddffSVineet Gupta void arc_cache_init(void) 9308ea2ddffSVineet Gupta { 9318ea2ddffSVineet Gupta unsigned int __maybe_unused cpu = smp_processor_id(); 9328ea2ddffSVineet Gupta char str[256]; 9338ea2ddffSVineet Gupta 9348ea2ddffSVineet Gupta printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 9358ea2ddffSVineet Gupta 93645c3b08aSVineet Gupta /* 93745c3b08aSVineet Gupta * Only master CPU needs to execute rest of function: 93845c3b08aSVineet Gupta * - Assume SMP so all cores will have same cache config so 93945c3b08aSVineet Gupta * any geomtry checks will be same for all 94045c3b08aSVineet Gupta * - IOC setup / dma callbacks only need to be setup once 94145c3b08aSVineet Gupta */ 94245c3b08aSVineet Gupta if (cpu) 94345c3b08aSVineet Gupta return; 94445c3b08aSVineet Gupta 9458ea2ddffSVineet Gupta if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 9468ea2ddffSVineet Gupta struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 9478ea2ddffSVineet Gupta 9488ea2ddffSVineet Gupta if (!ic->ver) 9498ea2ddffSVineet Gupta panic("cache support enabled but non-existent cache\n"); 9508ea2ddffSVineet Gupta 9518ea2ddffSVineet Gupta if (ic->line_len != L1_CACHE_BYTES) 9528ea2ddffSVineet Gupta panic("ICache line [%d] != kernel Config [%d]", 9538ea2ddffSVineet Gupta ic->line_len, L1_CACHE_BYTES); 9548ea2ddffSVineet Gupta 9558ea2ddffSVineet Gupta if (ic->ver != CONFIG_ARC_MMU_VER) 9568ea2ddffSVineet Gupta panic("Cache ver [%d] doesn't match MMU ver [%d]\n", 9578ea2ddffSVineet Gupta ic->ver, CONFIG_ARC_MMU_VER); 958bcc4d65aSVineet Gupta 959bcc4d65aSVineet Gupta /* 9602547476aSAndrea Gelmini * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG 961bcc4d65aSVineet Gupta * pair to provide vaddr/paddr respectively, just as in MMU v3 962bcc4d65aSVineet Gupta */ 963bcc4d65aSVineet Gupta if (is_isa_arcv2() && ic->alias) 964bcc4d65aSVineet Gupta _cache_line_loop_ic_fn = __cache_line_loop_v3; 965bcc4d65aSVineet Gupta else 966bcc4d65aSVineet Gupta _cache_line_loop_ic_fn = __cache_line_loop; 9678ea2ddffSVineet Gupta } 9688ea2ddffSVineet Gupta 9698ea2ddffSVineet Gupta if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { 9708ea2ddffSVineet Gupta struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 9718ea2ddffSVineet Gupta 9728ea2ddffSVineet Gupta if (!dc->ver) 9738ea2ddffSVineet Gupta panic("cache support enabled but non-existent cache\n"); 9748ea2ddffSVineet Gupta 9758ea2ddffSVineet Gupta if (dc->line_len != L1_CACHE_BYTES) 9768ea2ddffSVineet Gupta panic("DCache line [%d] != kernel Config [%d]", 9778ea2ddffSVineet Gupta dc->line_len, L1_CACHE_BYTES); 9788ea2ddffSVineet Gupta 979d1f317d8SVineet Gupta /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ 980d1f317d8SVineet Gupta if (is_isa_arcompact()) { 981d1f317d8SVineet Gupta int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 9828ea2ddffSVineet Gupta 9838ea2ddffSVineet Gupta if (dc->alias && !handled) 9848ea2ddffSVineet Gupta panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 9858ea2ddffSVineet Gupta else if (!dc->alias && handled) 9868ea2ddffSVineet Gupta panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 9878ea2ddffSVineet Gupta } 9888ea2ddffSVineet Gupta } 989f2b0b25aSAlexey Brodkin 99079335a2cSVineet Gupta if (is_isa_arcv2() && l2_line_sz && !slc_enable) { 99179335a2cSVineet Gupta 99279335a2cSVineet Gupta /* IM set : flush before invalidate */ 99379335a2cSVineet Gupta write_aux_reg(ARC_REG_SLC_CTRL, 99479335a2cSVineet Gupta read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); 99579335a2cSVineet Gupta 99679335a2cSVineet Gupta write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 99779335a2cSVineet Gupta 99879335a2cSVineet Gupta /* Important to wait for flush to complete */ 99979335a2cSVineet Gupta while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 100079335a2cSVineet Gupta write_aux_reg(ARC_REG_SLC_CTRL, 100179335a2cSVineet Gupta read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); 100279335a2cSVineet Gupta } 100379335a2cSVineet Gupta 1004cf986d47SVineet Gupta if (is_isa_arcv2() && ioc_enable) { 1005f2b0b25aSAlexey Brodkin /* IO coherency base - 0x8z */ 1006f2b0b25aSAlexey Brodkin write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); 1007f2b0b25aSAlexey Brodkin /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ 1008f2b0b25aSAlexey Brodkin write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); 1009f2b0b25aSAlexey Brodkin /* Enable partial writes */ 1010f2b0b25aSAlexey Brodkin write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); 1011f2b0b25aSAlexey Brodkin /* Enable IO coherency */ 1012f2b0b25aSAlexey Brodkin write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); 1013f2b0b25aSAlexey Brodkin 1014f2b0b25aSAlexey Brodkin __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 1015f2b0b25aSAlexey Brodkin __dma_cache_inv = __dma_cache_inv_ioc; 1016f2b0b25aSAlexey Brodkin __dma_cache_wback = __dma_cache_wback_ioc; 101779335a2cSVineet Gupta } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { 1018f2b0b25aSAlexey Brodkin __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 1019f2b0b25aSAlexey Brodkin __dma_cache_inv = __dma_cache_inv_slc; 1020f2b0b25aSAlexey Brodkin __dma_cache_wback = __dma_cache_wback_slc; 1021f2b0b25aSAlexey Brodkin } else { 1022f2b0b25aSAlexey Brodkin __dma_cache_wback_inv = __dma_cache_wback_inv_l1; 1023f2b0b25aSAlexey Brodkin __dma_cache_inv = __dma_cache_inv_l1; 1024f2b0b25aSAlexey Brodkin __dma_cache_wback = __dma_cache_wback_l1; 1025f2b0b25aSAlexey Brodkin } 1026d1f317d8SVineet Gupta } 1027