xref: /linux/arch/arc/mm/cache.c (revision 08fe007968b2b45e831daf74899f79a54d73f773)
18362c389SVineet Gupta /*
28ea2ddffSVineet Gupta  * ARC Cache Management
38362c389SVineet Gupta  *
48ea2ddffSVineet Gupta  * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
58362c389SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
68362c389SVineet Gupta  *
78362c389SVineet Gupta  * This program is free software; you can redistribute it and/or modify
88362c389SVineet Gupta  * it under the terms of the GNU General Public License version 2 as
98362c389SVineet Gupta  * published by the Free Software Foundation.
108362c389SVineet Gupta  */
118362c389SVineet Gupta 
128362c389SVineet Gupta #include <linux/module.h>
138362c389SVineet Gupta #include <linux/mm.h>
148362c389SVineet Gupta #include <linux/sched.h>
158362c389SVineet Gupta #include <linux/cache.h>
168362c389SVineet Gupta #include <linux/mmu_context.h>
178362c389SVineet Gupta #include <linux/syscalls.h>
188362c389SVineet Gupta #include <linux/uaccess.h>
198362c389SVineet Gupta #include <linux/pagemap.h>
208362c389SVineet Gupta #include <asm/cacheflush.h>
218362c389SVineet Gupta #include <asm/cachectl.h>
228362c389SVineet Gupta #include <asm/setup.h>
238362c389SVineet Gupta 
24795f4558SVineet Gupta static int l2_line_sz;
25cf986d47SVineet Gupta static int ioc_exists;
2623cb1f64SVineet Gupta int slc_enable = 1, ioc_enable = 0;
27deaf7565SVineet Gupta unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
2826c01c49SVineet Gupta unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29795f4558SVineet Gupta 
3028b4af72SVineet Gupta void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
31bcc4d65aSVineet Gupta 			       unsigned long sz, const int cacheop);
32bcc4d65aSVineet Gupta 
33f5db19e9SVineet Gupta void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
34f5db19e9SVineet Gupta void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
35f5db19e9SVineet Gupta void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
36f2b0b25aSAlexey Brodkin 
378362c389SVineet Gupta char *arc_cache_mumbojumbo(int c, char *buf, int len)
388362c389SVineet Gupta {
398362c389SVineet Gupta 	int n = 0;
40d1f317d8SVineet Gupta 	struct cpuinfo_arc_cache *p;
418362c389SVineet Gupta 
428362c389SVineet Gupta #define PR_CACHE(p, cfg, str)						\
43f64915beSVineet Gupta 	if (!(p)->line_len)						\
448362c389SVineet Gupta 		n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");	\
458362c389SVineet Gupta 	else								\
468362c389SVineet Gupta 		n += scnprintf(buf + n, len - n,			\
478362c389SVineet Gupta 			str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",	\
488362c389SVineet Gupta 			(p)->sz_k, (p)->assoc, (p)->line_len,		\
498362c389SVineet Gupta 			(p)->vipt ? "VIPT" : "PIPT",			\
508362c389SVineet Gupta 			(p)->alias ? " aliasing" : "",			\
51964cf28fSVineet Gupta 			IS_USED_CFG(cfg));
528362c389SVineet Gupta 
538362c389SVineet Gupta 	PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
548362c389SVineet Gupta 	PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
558362c389SVineet Gupta 
56d1f317d8SVineet Gupta 	p = &cpuinfo_arc700[c].slc;
57f64915beSVineet Gupta 	if (p->line_len)
58d1f317d8SVineet Gupta 		n += scnprintf(buf + n, len - n,
5979335a2cSVineet Gupta 			       "SLC\t\t: %uK, %uB Line%s\n",
6079335a2cSVineet Gupta 			       p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
61d1f317d8SVineet Gupta 
62711c1f26SVineet Gupta 	n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
63711c1f26SVineet Gupta 		       perip_base,
64711c1f26SVineet Gupta 		       IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
65f2b0b25aSAlexey Brodkin 
668362c389SVineet Gupta 	return buf;
678362c389SVineet Gupta }
688362c389SVineet Gupta 
698362c389SVineet Gupta /*
708362c389SVineet Gupta  * Read the Cache Build Confuration Registers, Decode them and save into
718362c389SVineet Gupta  * the cpuinfo structure for later use.
728362c389SVineet Gupta  * No Validation done here, simply read/convert the BCRs
738362c389SVineet Gupta  */
74fd0881a2SVineet Gupta static void read_decode_cache_bcr_arcv2(int cpu)
758362c389SVineet Gupta {
76fd0881a2SVineet Gupta 	struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
77d1f317d8SVineet Gupta 	struct bcr_generic sbcr;
78d1f317d8SVineet Gupta 
79d1f317d8SVineet Gupta 	struct bcr_slc_cfg {
80d1f317d8SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
81d1f317d8SVineet Gupta 		unsigned int pad:24, way:2, lsz:2, sz:4;
82d1f317d8SVineet Gupta #else
83d1f317d8SVineet Gupta 		unsigned int sz:4, lsz:2, way:2, pad:24;
84d1f317d8SVineet Gupta #endif
85d1f317d8SVineet Gupta 	} slc_cfg;
86d1f317d8SVineet Gupta 
87f2b0b25aSAlexey Brodkin 	struct bcr_clust_cfg {
88f2b0b25aSAlexey Brodkin #ifdef CONFIG_CPU_BIG_ENDIAN
89f2b0b25aSAlexey Brodkin 		unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
90f2b0b25aSAlexey Brodkin #else
91f2b0b25aSAlexey Brodkin 		unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
92f2b0b25aSAlexey Brodkin #endif
93f2b0b25aSAlexey Brodkin 	} cbcr;
94f2b0b25aSAlexey Brodkin 
9526c01c49SVineet Gupta 	struct bcr_volatile {
9626c01c49SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
9726c01c49SVineet Gupta 		unsigned int start:4, limit:4, pad:22, order:1, disable:1;
9826c01c49SVineet Gupta #else
9926c01c49SVineet Gupta 		unsigned int disable:1, order:1, pad:22, limit:4, start:4;
10026c01c49SVineet Gupta #endif
10126c01c49SVineet Gupta 	} vol;
10226c01c49SVineet Gupta 
10326c01c49SVineet Gupta 
104fd0881a2SVineet Gupta 	READ_BCR(ARC_REG_SLC_BCR, sbcr);
105fd0881a2SVineet Gupta 	if (sbcr.ver) {
106fd0881a2SVineet Gupta 		READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
107fd0881a2SVineet Gupta 		p_slc->sz_k = 128 << slc_cfg.sz;
108fd0881a2SVineet Gupta 		l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
109fd0881a2SVineet Gupta 	}
110fd0881a2SVineet Gupta 
111fd0881a2SVineet Gupta 	READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
112cf986d47SVineet Gupta 	if (cbcr.c)
113fd0881a2SVineet Gupta 		ioc_exists = 1;
114cf986d47SVineet Gupta 	else
115cf986d47SVineet Gupta 		ioc_enable = 0;
116deaf7565SVineet Gupta 
11726c01c49SVineet Gupta 	/* HS 2.0 didn't have AUX_VOL */
11826c01c49SVineet Gupta 	if (cpuinfo_arc700[cpu].core.family > 0x51) {
11926c01c49SVineet Gupta 		READ_BCR(AUX_VOL, vol);
12026c01c49SVineet Gupta 		perip_base = vol.start << 28;
12126c01c49SVineet Gupta 		/* HS 3.0 has limit and strict-ordering fields */
12226c01c49SVineet Gupta 		if (cpuinfo_arc700[cpu].core.family > 0x52)
12326c01c49SVineet Gupta 			perip_end = (vol.limit << 28) - 1;
12426c01c49SVineet Gupta 	}
125fd0881a2SVineet Gupta }
126fd0881a2SVineet Gupta 
127fd0881a2SVineet Gupta void read_decode_cache_bcr(void)
128fd0881a2SVineet Gupta {
129fd0881a2SVineet Gupta 	struct cpuinfo_arc_cache *p_ic, *p_dc;
130fd0881a2SVineet Gupta 	unsigned int cpu = smp_processor_id();
131fd0881a2SVineet Gupta 	struct bcr_cache {
132fd0881a2SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
133fd0881a2SVineet Gupta 		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
134fd0881a2SVineet Gupta #else
135fd0881a2SVineet Gupta 		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
136fd0881a2SVineet Gupta #endif
137fd0881a2SVineet Gupta 	} ibcr, dbcr;
138fd0881a2SVineet Gupta 
1398362c389SVineet Gupta 	p_ic = &cpuinfo_arc700[cpu].icache;
1408362c389SVineet Gupta 	READ_BCR(ARC_REG_IC_BCR, ibcr);
1418362c389SVineet Gupta 
1428362c389SVineet Gupta 	if (!ibcr.ver)
1438362c389SVineet Gupta 		goto dc_chk;
1448362c389SVineet Gupta 
145d1f317d8SVineet Gupta 	if (ibcr.ver <= 3) {
1468362c389SVineet Gupta 		BUG_ON(ibcr.config != 3);
1478362c389SVineet Gupta 		p_ic->assoc = 2;		/* Fixed to 2w set assoc */
148d1f317d8SVineet Gupta 	} else if (ibcr.ver >= 4) {
149d1f317d8SVineet Gupta 		p_ic->assoc = 1 << ibcr.config;	/* 1,2,4,8 */
150d1f317d8SVineet Gupta 	}
151d1f317d8SVineet Gupta 
1528362c389SVineet Gupta 	p_ic->line_len = 8 << ibcr.line_len;
1538362c389SVineet Gupta 	p_ic->sz_k = 1 << (ibcr.sz - 1);
1548362c389SVineet Gupta 	p_ic->vipt = 1;
1558362c389SVineet Gupta 	p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
1568362c389SVineet Gupta 
1578362c389SVineet Gupta dc_chk:
1588362c389SVineet Gupta 	p_dc = &cpuinfo_arc700[cpu].dcache;
1598362c389SVineet Gupta 	READ_BCR(ARC_REG_DC_BCR, dbcr);
1608362c389SVineet Gupta 
1618362c389SVineet Gupta 	if (!dbcr.ver)
162d1f317d8SVineet Gupta 		goto slc_chk;
1638362c389SVineet Gupta 
164d1f317d8SVineet Gupta 	if (dbcr.ver <= 3) {
1658362c389SVineet Gupta 		BUG_ON(dbcr.config != 2);
1668362c389SVineet Gupta 		p_dc->assoc = 4;		/* Fixed to 4w set assoc */
167d1f317d8SVineet Gupta 		p_dc->vipt = 1;
168d1f317d8SVineet Gupta 		p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
169d1f317d8SVineet Gupta 	} else if (dbcr.ver >= 4) {
170d1f317d8SVineet Gupta 		p_dc->assoc = 1 << dbcr.config;	/* 1,2,4,8 */
171d1f317d8SVineet Gupta 		p_dc->vipt = 0;
172d1f317d8SVineet Gupta 		p_dc->alias = 0;		/* PIPT so can't VIPT alias */
173d1f317d8SVineet Gupta 	}
174d1f317d8SVineet Gupta 
1758362c389SVineet Gupta 	p_dc->line_len = 16 << dbcr.line_len;
1768362c389SVineet Gupta 	p_dc->sz_k = 1 << (dbcr.sz - 1);
177d1f317d8SVineet Gupta 
178d1f317d8SVineet Gupta slc_chk:
179fd0881a2SVineet Gupta 	if (is_isa_arcv2())
180fd0881a2SVineet Gupta                 read_decode_cache_bcr_arcv2(cpu);
1818362c389SVineet Gupta }
1828362c389SVineet Gupta 
1838362c389SVineet Gupta /*
1848ea2ddffSVineet Gupta  * Line Operation on {I,D}-Cache
1858362c389SVineet Gupta  */
1868362c389SVineet Gupta 
1878362c389SVineet Gupta #define OP_INV		0x1
1888362c389SVineet Gupta #define OP_FLUSH	0x2
1898362c389SVineet Gupta #define OP_FLUSH_N_INV	0x3
1908362c389SVineet Gupta #define OP_INV_IC	0x4
1918362c389SVineet Gupta 
1928362c389SVineet Gupta /*
1938ea2ddffSVineet Gupta  *		I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
1948ea2ddffSVineet Gupta  *
1958ea2ddffSVineet Gupta  * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
1968ea2ddffSVineet Gupta  * The orig Cache Management Module "CDU" only required paddr to invalidate a
1978ea2ddffSVineet Gupta  * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
1988ea2ddffSVineet Gupta  * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
1998ea2ddffSVineet Gupta  * the exact same line.
2008ea2ddffSVineet Gupta  *
2018ea2ddffSVineet Gupta  * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
2028ea2ddffSVineet Gupta  * paddr alone could not be used to correctly index the cache.
2038ea2ddffSVineet Gupta  *
2048ea2ddffSVineet Gupta  * ------------------
2058ea2ddffSVineet Gupta  * MMU v1/v2 (Fixed Page Size 8k)
2068ea2ddffSVineet Gupta  * ------------------
2078ea2ddffSVineet Gupta  * The solution was to provide CDU with these additonal vaddr bits. These
2088ea2ddffSVineet Gupta  * would be bits [x:13], x would depend on cache-geometry, 13 comes from
2098ea2ddffSVineet Gupta  * standard page size of 8k.
2108ea2ddffSVineet Gupta  * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
2118ea2ddffSVineet Gupta  * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
2128ea2ddffSVineet Gupta  * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
2138ea2ddffSVineet Gupta  * represent the offset within cache-line. The adv of using this "clumsy"
2148ea2ddffSVineet Gupta  * interface for additional info was no new reg was needed in CDU programming
2158ea2ddffSVineet Gupta  * model.
2168ea2ddffSVineet Gupta  *
2178ea2ddffSVineet Gupta  * 17:13 represented the max num of bits passable, actual bits needed were
2188ea2ddffSVineet Gupta  * fewer, based on the num-of-aliases possible.
2198ea2ddffSVineet Gupta  * -for 2 alias possibility, only bit 13 needed (32K cache)
2208ea2ddffSVineet Gupta  * -for 4 alias possibility, bits 14:13 needed (64K cache)
2218ea2ddffSVineet Gupta  *
2228ea2ddffSVineet Gupta  * ------------------
2238ea2ddffSVineet Gupta  * MMU v3
2248ea2ddffSVineet Gupta  * ------------------
2258ea2ddffSVineet Gupta  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
2268ea2ddffSVineet Gupta  * only support 8k (default), 16k and 4k.
2272547476aSAndrea Gelmini  * However from hardware perspective, smaller page sizes aggravate aliasing
2288ea2ddffSVineet Gupta  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
2298ea2ddffSVineet Gupta  * the existing scheme of piggybacking won't work for certain configurations.
2308ea2ddffSVineet Gupta  * Two new registers IC_PTAG and DC_PTAG inttoduced.
2318ea2ddffSVineet Gupta  * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
2328362c389SVineet Gupta  */
2338ea2ddffSVineet Gupta 
23411e14896SVineet Gupta static inline
23528b4af72SVineet Gupta void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
2368ea2ddffSVineet Gupta 			  unsigned long sz, const int op)
2378362c389SVineet Gupta {
23811e14896SVineet Gupta 	unsigned int aux_cmd;
2398362c389SVineet Gupta 	int num_lines;
24011e14896SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
2418362c389SVineet Gupta 
2428ea2ddffSVineet Gupta 	if (op == OP_INV_IC) {
2438362c389SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
24411e14896SVineet Gupta 	} else {
2458362c389SVineet Gupta 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
2468ea2ddffSVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
2478362c389SVineet Gupta 	}
2488362c389SVineet Gupta 
2498362c389SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
2508362c389SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
2518362c389SVineet Gupta 	 * This however can be avoided for page sized since:
2528362c389SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
2538362c389SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
2548362c389SVineet Gupta 	 */
25511e14896SVineet Gupta 	if (!full_page) {
2568362c389SVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
2578362c389SVineet Gupta 		paddr &= CACHE_LINE_MASK;
2588362c389SVineet Gupta 		vaddr &= CACHE_LINE_MASK;
2598362c389SVineet Gupta 	}
2608362c389SVineet Gupta 
2618362c389SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
2628362c389SVineet Gupta 
2638362c389SVineet Gupta 	/* MMUv2 and before: paddr contains stuffed vaddrs bits */
2648362c389SVineet Gupta 	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
2658362c389SVineet Gupta 
2668362c389SVineet Gupta 	while (num_lines-- > 0) {
26711e14896SVineet Gupta 		write_aux_reg(aux_cmd, paddr);
26811e14896SVineet Gupta 		paddr += L1_CACHE_BYTES;
26911e14896SVineet Gupta 	}
27011e14896SVineet Gupta }
27111e14896SVineet Gupta 
2725a364c2aSVineet Gupta /*
2735a364c2aSVineet Gupta  * For ARC700 MMUv3 I-cache and D-cache flushes
2745a364c2aSVineet Gupta  * Also reused for HS38 aliasing I-cache configuration
2755a364c2aSVineet Gupta  */
27611e14896SVineet Gupta static inline
27728b4af72SVineet Gupta void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
27811e14896SVineet Gupta 			  unsigned long sz, const int op)
27911e14896SVineet Gupta {
28011e14896SVineet Gupta 	unsigned int aux_cmd, aux_tag;
28111e14896SVineet Gupta 	int num_lines;
28211e14896SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
28311e14896SVineet Gupta 
28411e14896SVineet Gupta 	if (op == OP_INV_IC) {
28511e14896SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
28611e14896SVineet Gupta 		aux_tag = ARC_REG_IC_PTAG;
28711e14896SVineet Gupta 	} else {
28811e14896SVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
28911e14896SVineet Gupta 		aux_tag = ARC_REG_DC_PTAG;
29011e14896SVineet Gupta 	}
29111e14896SVineet Gupta 
29211e14896SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
29311e14896SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
29411e14896SVineet Gupta 	 * This however can be avoided for page sized since:
29511e14896SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
29611e14896SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
29711e14896SVineet Gupta 	 */
29811e14896SVineet Gupta 	if (!full_page) {
29911e14896SVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
30011e14896SVineet Gupta 		paddr &= CACHE_LINE_MASK;
30111e14896SVineet Gupta 		vaddr &= CACHE_LINE_MASK;
30211e14896SVineet Gupta 	}
30311e14896SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
30411e14896SVineet Gupta 
30511e14896SVineet Gupta 	/*
30611e14896SVineet Gupta 	 * MMUv3, cache ops require paddr in PTAG reg
30711e14896SVineet Gupta 	 * if V-P const for loop, PTAG can be written once outside loop
30811e14896SVineet Gupta 	 */
30911e14896SVineet Gupta 	if (full_page)
31011e14896SVineet Gupta 		write_aux_reg(aux_tag, paddr);
31111e14896SVineet Gupta 
3125a364c2aSVineet Gupta 	/*
3135a364c2aSVineet Gupta 	 * This is technically for MMU v4, using the MMU v3 programming model
3142547476aSAndrea Gelmini 	 * Special work for HS38 aliasing I-cache configuration with PAE40
3155a364c2aSVineet Gupta 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
3165a364c2aSVineet Gupta 	 *   - (and needs to be written before the lower 32 bits)
3175a364c2aSVineet Gupta 	 * Note that PTAG_HI is hoisted outside the line loop
3185a364c2aSVineet Gupta 	 */
3195a364c2aSVineet Gupta 	if (is_pae40_enabled() && op == OP_INV_IC)
3205a364c2aSVineet Gupta 		write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
3215a364c2aSVineet Gupta 
32211e14896SVineet Gupta 	while (num_lines-- > 0) {
32311e14896SVineet Gupta 		if (!full_page) {
3248362c389SVineet Gupta 			write_aux_reg(aux_tag, paddr);
3258362c389SVineet Gupta 			paddr += L1_CACHE_BYTES;
3268362c389SVineet Gupta 		}
3278362c389SVineet Gupta 
3288362c389SVineet Gupta 		write_aux_reg(aux_cmd, vaddr);
3298362c389SVineet Gupta 		vaddr += L1_CACHE_BYTES;
33011e14896SVineet Gupta 	}
33111e14896SVineet Gupta }
33211e14896SVineet Gupta 
333d1f317d8SVineet Gupta /*
3345a364c2aSVineet Gupta  * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
3355a364c2aSVineet Gupta  * Here's how cache ops are implemented
336d1f317d8SVineet Gupta  *
3375a364c2aSVineet Gupta  *  - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
3385a364c2aSVineet Gupta  *  - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
3395a364c2aSVineet Gupta  *  - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
3405a364c2aSVineet Gupta  *    respectively, similar to MMU v3 programming model, hence
3415a364c2aSVineet Gupta  *    __cache_line_loop_v3() is used)
3425a364c2aSVineet Gupta  *
3435a364c2aSVineet Gupta  * If PAE40 is enabled, independent of aliasing considerations, the higher bits
3445a364c2aSVineet Gupta  * needs to be written into PTAG_HI
345d1f317d8SVineet Gupta  */
346d1f317d8SVineet Gupta static inline
34728b4af72SVineet Gupta void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
348d1f317d8SVineet Gupta 			  unsigned long sz, const int cacheop)
349d1f317d8SVineet Gupta {
350d1f317d8SVineet Gupta 	unsigned int aux_cmd;
351d1f317d8SVineet Gupta 	int num_lines;
352d1f317d8SVineet Gupta 	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
353d1f317d8SVineet Gupta 
354d1f317d8SVineet Gupta 	if (cacheop == OP_INV_IC) {
355d1f317d8SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
356d1f317d8SVineet Gupta 	} else {
357d1f317d8SVineet Gupta 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
358d1f317d8SVineet Gupta 		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
359d1f317d8SVineet Gupta 	}
360d1f317d8SVineet Gupta 
361d1f317d8SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
362d1f317d8SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
363d1f317d8SVineet Gupta 	 * This however can be avoided for page sized since:
364d1f317d8SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
365d1f317d8SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
366d1f317d8SVineet Gupta 	 */
367d1f317d8SVineet Gupta 	if (!full_page_op) {
368d1f317d8SVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
369d1f317d8SVineet Gupta 		paddr &= CACHE_LINE_MASK;
370d1f317d8SVineet Gupta 	}
371d1f317d8SVineet Gupta 
372d1f317d8SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
373d1f317d8SVineet Gupta 
3745a364c2aSVineet Gupta 	/*
3755a364c2aSVineet Gupta 	 * For HS38 PAE40 configuration
3765a364c2aSVineet Gupta 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
3775a364c2aSVineet Gupta 	 *   - (and needs to be written before the lower 32 bits)
3785a364c2aSVineet Gupta 	 */
3795a364c2aSVineet Gupta 	if (is_pae40_enabled()) {
3805a364c2aSVineet Gupta 		if (cacheop == OP_INV_IC)
3815a364c2aSVineet Gupta 			/*
3825a364c2aSVineet Gupta 			 * Non aliasing I-cache in HS38,
3835a364c2aSVineet Gupta 			 * aliasing I-cache handled in __cache_line_loop_v3()
3845a364c2aSVineet Gupta 			 */
3855a364c2aSVineet Gupta 			write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
3865a364c2aSVineet Gupta 		else
3875a364c2aSVineet Gupta 			write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
3885a364c2aSVineet Gupta 	}
3895a364c2aSVineet Gupta 
390d1f317d8SVineet Gupta 	while (num_lines-- > 0) {
391d1f317d8SVineet Gupta 		write_aux_reg(aux_cmd, paddr);
392d1f317d8SVineet Gupta 		paddr += L1_CACHE_BYTES;
393d1f317d8SVineet Gupta 	}
394d1f317d8SVineet Gupta }
395d1f317d8SVineet Gupta 
39611e14896SVineet Gupta #if (CONFIG_ARC_MMU_VER < 3)
39711e14896SVineet Gupta #define __cache_line_loop	__cache_line_loop_v2
39811e14896SVineet Gupta #elif (CONFIG_ARC_MMU_VER == 3)
39911e14896SVineet Gupta #define __cache_line_loop	__cache_line_loop_v3
400d1f317d8SVineet Gupta #elif (CONFIG_ARC_MMU_VER > 3)
401d1f317d8SVineet Gupta #define __cache_line_loop	__cache_line_loop_v4
4028362c389SVineet Gupta #endif
4038362c389SVineet Gupta 
4048362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_DCACHE
4058362c389SVineet Gupta 
4068362c389SVineet Gupta /***************************************************************
4078362c389SVineet Gupta  * Machine specific helpers for Entire D-Cache or Per Line ops
4088362c389SVineet Gupta  */
4098362c389SVineet Gupta 
4106c310681SVineet Gupta static inline void __before_dc_op(const int op)
4118362c389SVineet Gupta {
4128362c389SVineet Gupta 	if (op == OP_FLUSH_N_INV) {
4138362c389SVineet Gupta 		/* Dcache provides 2 cmd: FLUSH or INV
4148362c389SVineet Gupta 		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
4158362c389SVineet Gupta 		 * flush-n-inv is achieved by INV cmd but with IM=1
4168362c389SVineet Gupta 		 * So toggle INV sub-mode depending on op request and default
4178362c389SVineet Gupta 		 */
4186c310681SVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
4196c310681SVineet Gupta 		write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
4206c310681SVineet Gupta 	}
4218362c389SVineet Gupta }
4228362c389SVineet Gupta 
4236c310681SVineet Gupta static inline void __after_dc_op(const int op)
4248362c389SVineet Gupta {
4256c310681SVineet Gupta 	if (op & OP_FLUSH) {
4266c310681SVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
4276c310681SVineet Gupta 		unsigned int reg;
4286c310681SVineet Gupta 
4296c310681SVineet Gupta 		/* flush / flush-n-inv both wait */
4306c310681SVineet Gupta 		while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
4316c310681SVineet Gupta 			;
4328362c389SVineet Gupta 
4338362c389SVineet Gupta 		/* Switch back to default Invalidate mode */
4348362c389SVineet Gupta 		if (op == OP_FLUSH_N_INV)
4356c310681SVineet Gupta 			write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
4366c310681SVineet Gupta 	}
4378362c389SVineet Gupta }
4388362c389SVineet Gupta 
4398362c389SVineet Gupta /*
4408362c389SVineet Gupta  * Operation on Entire D-Cache
4418ea2ddffSVineet Gupta  * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
4428362c389SVineet Gupta  * Note that constant propagation ensures all the checks are gone
4438362c389SVineet Gupta  * in generated code
4448362c389SVineet Gupta  */
4458ea2ddffSVineet Gupta static inline void __dc_entire_op(const int op)
4468362c389SVineet Gupta {
4478362c389SVineet Gupta 	int aux;
4488362c389SVineet Gupta 
4496c310681SVineet Gupta 	__before_dc_op(op);
4508362c389SVineet Gupta 
4518ea2ddffSVineet Gupta 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
4528362c389SVineet Gupta 		aux = ARC_REG_DC_IVDC;
4538362c389SVineet Gupta 	else
4548362c389SVineet Gupta 		aux = ARC_REG_DC_FLSH;
4558362c389SVineet Gupta 
4568362c389SVineet Gupta 	write_aux_reg(aux, 0x1);
4578362c389SVineet Gupta 
4586c310681SVineet Gupta 	__after_dc_op(op);
4598362c389SVineet Gupta }
4608362c389SVineet Gupta 
4618362c389SVineet Gupta /* For kernel mappings cache operation: index is same as paddr */
4628362c389SVineet Gupta #define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
4638362c389SVineet Gupta 
4648362c389SVineet Gupta /*
4658ea2ddffSVineet Gupta  * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
4668362c389SVineet Gupta  */
46728b4af72SVineet Gupta static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
4688ea2ddffSVineet Gupta 				unsigned long sz, const int op)
4698362c389SVineet Gupta {
4708362c389SVineet Gupta 	unsigned long flags;
4718362c389SVineet Gupta 
4728362c389SVineet Gupta 	local_irq_save(flags);
4738362c389SVineet Gupta 
4746c310681SVineet Gupta 	__before_dc_op(op);
4758362c389SVineet Gupta 
4768ea2ddffSVineet Gupta 	__cache_line_loop(paddr, vaddr, sz, op);
4778362c389SVineet Gupta 
4786c310681SVineet Gupta 	__after_dc_op(op);
4798362c389SVineet Gupta 
4808362c389SVineet Gupta 	local_irq_restore(flags);
4818362c389SVineet Gupta }
4828362c389SVineet Gupta 
4838362c389SVineet Gupta #else
4848362c389SVineet Gupta 
4858ea2ddffSVineet Gupta #define __dc_entire_op(op)
4868ea2ddffSVineet Gupta #define __dc_line_op(paddr, vaddr, sz, op)
4878ea2ddffSVineet Gupta #define __dc_line_op_k(paddr, sz, op)
4888362c389SVineet Gupta 
4898362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_DCACHE */
4908362c389SVineet Gupta 
4918362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_ICACHE
4928362c389SVineet Gupta 
4938362c389SVineet Gupta static inline void __ic_entire_inv(void)
4948362c389SVineet Gupta {
4958362c389SVineet Gupta 	write_aux_reg(ARC_REG_IC_IVIC, 1);
4968362c389SVineet Gupta 	read_aux_reg(ARC_REG_IC_CTRL);	/* blocks */
4978362c389SVineet Gupta }
4988362c389SVineet Gupta 
4998362c389SVineet Gupta static inline void
50028b4af72SVineet Gupta __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
5018362c389SVineet Gupta 			  unsigned long sz)
5028362c389SVineet Gupta {
5038362c389SVineet Gupta 	unsigned long flags;
5048362c389SVineet Gupta 
5058362c389SVineet Gupta 	local_irq_save(flags);
506bcc4d65aSVineet Gupta 	(*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
5078362c389SVineet Gupta 	local_irq_restore(flags);
5088362c389SVineet Gupta }
5098362c389SVineet Gupta 
5108362c389SVineet Gupta #ifndef CONFIG_SMP
5118362c389SVineet Gupta 
5128362c389SVineet Gupta #define __ic_line_inv_vaddr(p, v, s)	__ic_line_inv_vaddr_local(p, v, s)
5138362c389SVineet Gupta 
5148362c389SVineet Gupta #else
5158362c389SVineet Gupta 
5168362c389SVineet Gupta struct ic_inv_args {
51728b4af72SVineet Gupta 	phys_addr_t paddr, vaddr;
5188362c389SVineet Gupta 	int sz;
5198362c389SVineet Gupta };
5208362c389SVineet Gupta 
5218362c389SVineet Gupta static void __ic_line_inv_vaddr_helper(void *info)
5228362c389SVineet Gupta {
5238362c389SVineet Gupta         struct ic_inv_args *ic_inv = info;
5248362c389SVineet Gupta 
5258362c389SVineet Gupta         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
5268362c389SVineet Gupta }
5278362c389SVineet Gupta 
52828b4af72SVineet Gupta static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
5298362c389SVineet Gupta 				unsigned long sz)
5308362c389SVineet Gupta {
5318362c389SVineet Gupta 	struct ic_inv_args ic_inv = {
5328362c389SVineet Gupta 		.paddr = paddr,
5338362c389SVineet Gupta 		.vaddr = vaddr,
5348362c389SVineet Gupta 		.sz    = sz
5358362c389SVineet Gupta 	};
5368362c389SVineet Gupta 
5378362c389SVineet Gupta 	on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
5388362c389SVineet Gupta }
5398362c389SVineet Gupta 
5408362c389SVineet Gupta #endif	/* CONFIG_SMP */
5418362c389SVineet Gupta 
5428362c389SVineet Gupta #else	/* !CONFIG_ARC_HAS_ICACHE */
5438362c389SVineet Gupta 
5448362c389SVineet Gupta #define __ic_entire_inv()
5458362c389SVineet Gupta #define __ic_line_inv_vaddr(pstart, vstart, sz)
5468362c389SVineet Gupta 
5478362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_ICACHE */
5488362c389SVineet Gupta 
54928b4af72SVineet Gupta noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
550795f4558SVineet Gupta {
551795f4558SVineet Gupta #ifdef CONFIG_ISA_ARCV2
552b607edddSAlexey Brodkin 	/*
553b607edddSAlexey Brodkin 	 * SLC is shared between all cores and concurrent aux operations from
554b607edddSAlexey Brodkin 	 * multiple cores need to be serialized using a spinlock
555b607edddSAlexey Brodkin 	 * A concurrent operation can be silently ignored and/or the old/new
556b607edddSAlexey Brodkin 	 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
557b607edddSAlexey Brodkin 	 * below)
558b607edddSAlexey Brodkin 	 */
559b607edddSAlexey Brodkin 	static DEFINE_SPINLOCK(lock);
560795f4558SVineet Gupta 	unsigned long flags;
561795f4558SVineet Gupta 	unsigned int ctrl;
562795f4558SVineet Gupta 
563b607edddSAlexey Brodkin 	spin_lock_irqsave(&lock, flags);
564795f4558SVineet Gupta 
565795f4558SVineet Gupta 	/*
566795f4558SVineet Gupta 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
567795f4558SVineet Gupta 	 *  - b'000 (default) is Flush,
568795f4558SVineet Gupta 	 *  - b'001 is Invalidate if CTRL.IM == 0
569795f4558SVineet Gupta 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
570795f4558SVineet Gupta 	 */
571795f4558SVineet Gupta 	ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
572795f4558SVineet Gupta 
573795f4558SVineet Gupta 	/* Don't rely on default value of IM bit */
574795f4558SVineet Gupta 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
575795f4558SVineet Gupta 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
576795f4558SVineet Gupta 	else
577795f4558SVineet Gupta 		ctrl |= SLC_CTRL_IM;
578795f4558SVineet Gupta 
579795f4558SVineet Gupta 	if (op & OP_INV)
580795f4558SVineet Gupta 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
581795f4558SVineet Gupta 	else
582795f4558SVineet Gupta 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
583795f4558SVineet Gupta 
584795f4558SVineet Gupta 	write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
585795f4558SVineet Gupta 
586795f4558SVineet Gupta 	/*
587795f4558SVineet Gupta 	 * Lower bits are ignored, no need to clip
588795f4558SVineet Gupta 	 * END needs to be setup before START (latter triggers the operation)
589795f4558SVineet Gupta 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
590795f4558SVineet Gupta 	 */
591795f4558SVineet Gupta 	write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
592795f4558SVineet Gupta 	write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
593795f4558SVineet Gupta 
594795f4558SVineet Gupta 	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
595795f4558SVineet Gupta 
596b607edddSAlexey Brodkin 	spin_unlock_irqrestore(&lock, flags);
597795f4558SVineet Gupta #endif
598795f4558SVineet Gupta }
599795f4558SVineet Gupta 
6008362c389SVineet Gupta /***********************************************************
6018362c389SVineet Gupta  * Exported APIs
6028362c389SVineet Gupta  */
6038362c389SVineet Gupta 
6048362c389SVineet Gupta /*
6058362c389SVineet Gupta  * Handle cache congruency of kernel and userspace mappings of page when kernel
6068362c389SVineet Gupta  * writes-to/reads-from
6078362c389SVineet Gupta  *
6088362c389SVineet Gupta  * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
6098362c389SVineet Gupta  *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
6108362c389SVineet Gupta  *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
6118362c389SVineet Gupta  *  -In SMP, if hardware caches are coherent
6128362c389SVineet Gupta  *
6138362c389SVineet Gupta  * There's a corollary case, where kernel READs from a userspace mapped page.
6148362c389SVineet Gupta  * If the U-mapping is not congruent to to K-mapping, former needs flushing.
6158362c389SVineet Gupta  */
6168362c389SVineet Gupta void flush_dcache_page(struct page *page)
6178362c389SVineet Gupta {
6188362c389SVineet Gupta 	struct address_space *mapping;
6198362c389SVineet Gupta 
6208362c389SVineet Gupta 	if (!cache_is_vipt_aliasing()) {
6218362c389SVineet Gupta 		clear_bit(PG_dc_clean, &page->flags);
6228362c389SVineet Gupta 		return;
6238362c389SVineet Gupta 	}
6248362c389SVineet Gupta 
6258362c389SVineet Gupta 	/* don't handle anon pages here */
6268362c389SVineet Gupta 	mapping = page_mapping(page);
6278362c389SVineet Gupta 	if (!mapping)
6288362c389SVineet Gupta 		return;
6298362c389SVineet Gupta 
6308362c389SVineet Gupta 	/*
6318362c389SVineet Gupta 	 * pagecache page, file not yet mapped to userspace
6328362c389SVineet Gupta 	 * Make a note that K-mapping is dirty
6338362c389SVineet Gupta 	 */
6348362c389SVineet Gupta 	if (!mapping_mapped(mapping)) {
6358362c389SVineet Gupta 		clear_bit(PG_dc_clean, &page->flags);
636e1534ae9SKirill A. Shutemov 	} else if (page_mapcount(page)) {
6378362c389SVineet Gupta 
6388362c389SVineet Gupta 		/* kernel reading from page with U-mapping */
63928b4af72SVineet Gupta 		phys_addr_t paddr = (unsigned long)page_address(page);
64009cbfeafSKirill A. Shutemov 		unsigned long vaddr = page->index << PAGE_SHIFT;
6418362c389SVineet Gupta 
6428362c389SVineet Gupta 		if (addr_not_cache_congruent(paddr, vaddr))
6438362c389SVineet Gupta 			__flush_dcache_page(paddr, vaddr);
6448362c389SVineet Gupta 	}
6458362c389SVineet Gupta }
6468362c389SVineet Gupta EXPORT_SYMBOL(flush_dcache_page);
6478362c389SVineet Gupta 
648f2b0b25aSAlexey Brodkin /*
649f2b0b25aSAlexey Brodkin  * DMA ops for systems with L1 cache only
650f2b0b25aSAlexey Brodkin  * Make memory coherent with L1 cache by flushing/invalidating L1 lines
651f2b0b25aSAlexey Brodkin  */
652f5db19e9SVineet Gupta static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
6538362c389SVineet Gupta {
6548362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
655f2b0b25aSAlexey Brodkin }
656795f4558SVineet Gupta 
657f5db19e9SVineet Gupta static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
658f2b0b25aSAlexey Brodkin {
659f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_INV);
660f2b0b25aSAlexey Brodkin }
661f2b0b25aSAlexey Brodkin 
662f5db19e9SVineet Gupta static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
663f2b0b25aSAlexey Brodkin {
664f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_FLUSH);
665f2b0b25aSAlexey Brodkin }
666f2b0b25aSAlexey Brodkin 
667f2b0b25aSAlexey Brodkin /*
668f2b0b25aSAlexey Brodkin  * DMA ops for systems with both L1 and L2 caches, but without IOC
6697423cc0cSAdam Buchbinder  * Both L1 and L2 lines need to be explicitly flushed/invalidated
670f2b0b25aSAlexey Brodkin  */
671f5db19e9SVineet Gupta static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
672f2b0b25aSAlexey Brodkin {
673f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
674795f4558SVineet Gupta 	slc_op(start, sz, OP_FLUSH_N_INV);
6758362c389SVineet Gupta }
676f2b0b25aSAlexey Brodkin 
677f5db19e9SVineet Gupta static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
678f2b0b25aSAlexey Brodkin {
679f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_INV);
680f2b0b25aSAlexey Brodkin 	slc_op(start, sz, OP_INV);
681f2b0b25aSAlexey Brodkin }
682f2b0b25aSAlexey Brodkin 
683f5db19e9SVineet Gupta static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
684f2b0b25aSAlexey Brodkin {
685f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_FLUSH);
686f2b0b25aSAlexey Brodkin 	slc_op(start, sz, OP_FLUSH);
687f2b0b25aSAlexey Brodkin }
688f2b0b25aSAlexey Brodkin 
689f2b0b25aSAlexey Brodkin /*
690f2b0b25aSAlexey Brodkin  * DMA ops for systems with IOC
691f2b0b25aSAlexey Brodkin  * IOC hardware snoops all DMA traffic keeping the caches consistent with
692f2b0b25aSAlexey Brodkin  * memory - eliding need for any explicit cache maintenance of DMA buffers
693f2b0b25aSAlexey Brodkin  */
694f5db19e9SVineet Gupta static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
695f5db19e9SVineet Gupta static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
696f5db19e9SVineet Gupta static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
697f2b0b25aSAlexey Brodkin 
698f2b0b25aSAlexey Brodkin /*
699f2b0b25aSAlexey Brodkin  * Exported DMA API
700f2b0b25aSAlexey Brodkin  */
701f5db19e9SVineet Gupta void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
702f2b0b25aSAlexey Brodkin {
703f2b0b25aSAlexey Brodkin 	__dma_cache_wback_inv(start, sz);
704f2b0b25aSAlexey Brodkin }
7058362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback_inv);
7068362c389SVineet Gupta 
707f5db19e9SVineet Gupta void dma_cache_inv(phys_addr_t start, unsigned long sz)
7088362c389SVineet Gupta {
709f2b0b25aSAlexey Brodkin 	__dma_cache_inv(start, sz);
7108362c389SVineet Gupta }
7118362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_inv);
7128362c389SVineet Gupta 
713f5db19e9SVineet Gupta void dma_cache_wback(phys_addr_t start, unsigned long sz)
7148362c389SVineet Gupta {
715f2b0b25aSAlexey Brodkin 	__dma_cache_wback(start, sz);
7168362c389SVineet Gupta }
7178362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback);
7188362c389SVineet Gupta 
7198362c389SVineet Gupta /*
7208362c389SVineet Gupta  * This is API for making I/D Caches consistent when modifying
7218362c389SVineet Gupta  * kernel code (loadable modules, kprobes, kgdb...)
7228362c389SVineet Gupta  * This is called on insmod, with kernel virtual address for CODE of
7238362c389SVineet Gupta  * the module. ARC cache maintenance ops require PHY address thus we
7248362c389SVineet Gupta  * need to convert vmalloc addr to PHY addr
7258362c389SVineet Gupta  */
7268362c389SVineet Gupta void flush_icache_range(unsigned long kstart, unsigned long kend)
7278362c389SVineet Gupta {
7288362c389SVineet Gupta 	unsigned int tot_sz;
7298362c389SVineet Gupta 
7308362c389SVineet Gupta 	WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
7318362c389SVineet Gupta 
7328362c389SVineet Gupta 	/* Shortcut for bigger flush ranges.
7338362c389SVineet Gupta 	 * Here we don't care if this was kernel virtual or phy addr
7348362c389SVineet Gupta 	 */
7358362c389SVineet Gupta 	tot_sz = kend - kstart;
7368362c389SVineet Gupta 	if (tot_sz > PAGE_SIZE) {
7378362c389SVineet Gupta 		flush_cache_all();
7388362c389SVineet Gupta 		return;
7398362c389SVineet Gupta 	}
7408362c389SVineet Gupta 
7418362c389SVineet Gupta 	/* Case: Kernel Phy addr (0x8000_0000 onwards) */
7428362c389SVineet Gupta 	if (likely(kstart > PAGE_OFFSET)) {
7438362c389SVineet Gupta 		/*
7448362c389SVineet Gupta 		 * The 2nd arg despite being paddr will be used to index icache
7458362c389SVineet Gupta 		 * This is OK since no alternate virtual mappings will exist
7468362c389SVineet Gupta 		 * given the callers for this case: kprobe/kgdb in built-in
7478362c389SVineet Gupta 		 * kernel code only.
7488362c389SVineet Gupta 		 */
7498362c389SVineet Gupta 		__sync_icache_dcache(kstart, kstart, kend - kstart);
7508362c389SVineet Gupta 		return;
7518362c389SVineet Gupta 	}
7528362c389SVineet Gupta 
7538362c389SVineet Gupta 	/*
7548362c389SVineet Gupta 	 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
7558362c389SVineet Gupta 	 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
7568362c389SVineet Gupta 	 *     handling of kernel vaddr.
7578362c389SVineet Gupta 	 *
7588362c389SVineet Gupta 	 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
7598362c389SVineet Gupta 	 *     it still needs to handle  a 2 page scenario, where the range
7608362c389SVineet Gupta 	 *     straddles across 2 virtual pages and hence need for loop
7618362c389SVineet Gupta 	 */
7628362c389SVineet Gupta 	while (tot_sz > 0) {
7638362c389SVineet Gupta 		unsigned int off, sz;
7648362c389SVineet Gupta 		unsigned long phy, pfn;
7658362c389SVineet Gupta 
7668362c389SVineet Gupta 		off = kstart % PAGE_SIZE;
7678362c389SVineet Gupta 		pfn = vmalloc_to_pfn((void *)kstart);
7688362c389SVineet Gupta 		phy = (pfn << PAGE_SHIFT) + off;
7698362c389SVineet Gupta 		sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
7708362c389SVineet Gupta 		__sync_icache_dcache(phy, kstart, sz);
7718362c389SVineet Gupta 		kstart += sz;
7728362c389SVineet Gupta 		tot_sz -= sz;
7738362c389SVineet Gupta 	}
7748362c389SVineet Gupta }
7758362c389SVineet Gupta EXPORT_SYMBOL(flush_icache_range);
7768362c389SVineet Gupta 
7778362c389SVineet Gupta /*
7788362c389SVineet Gupta  * General purpose helper to make I and D cache lines consistent.
7798362c389SVineet Gupta  * @paddr is phy addr of region
7808362c389SVineet Gupta  * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
7818362c389SVineet Gupta  *    However in one instance, when called by kprobe (for a breakpt in
7828362c389SVineet Gupta  *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
7838362c389SVineet Gupta  *    use a paddr to index the cache (despite VIPT). This is fine since since a
7848362c389SVineet Gupta  *    builtin kernel page will not have any virtual mappings.
7858362c389SVineet Gupta  *    kprobe on loadable module will be kernel vaddr.
7868362c389SVineet Gupta  */
78728b4af72SVineet Gupta void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
7888362c389SVineet Gupta {
7898362c389SVineet Gupta 	__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
7908362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, len);
7918362c389SVineet Gupta }
7928362c389SVineet Gupta 
7938362c389SVineet Gupta /* wrapper to compile time eliminate alignment checks in flush loop */
79428b4af72SVineet Gupta void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
7958362c389SVineet Gupta {
7968362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
7978362c389SVineet Gupta }
7988362c389SVineet Gupta 
7998362c389SVineet Gupta /*
8008362c389SVineet Gupta  * wrapper to clearout kernel or userspace mappings of a page
8018362c389SVineet Gupta  * For kernel mappings @vaddr == @paddr
8028362c389SVineet Gupta  */
80328b4af72SVineet Gupta void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
8048362c389SVineet Gupta {
8058362c389SVineet Gupta 	__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
8068362c389SVineet Gupta }
8078362c389SVineet Gupta 
8088362c389SVineet Gupta noinline void flush_cache_all(void)
8098362c389SVineet Gupta {
8108362c389SVineet Gupta 	unsigned long flags;
8118362c389SVineet Gupta 
8128362c389SVineet Gupta 	local_irq_save(flags);
8138362c389SVineet Gupta 
8148362c389SVineet Gupta 	__ic_entire_inv();
8158362c389SVineet Gupta 	__dc_entire_op(OP_FLUSH_N_INV);
8168362c389SVineet Gupta 
8178362c389SVineet Gupta 	local_irq_restore(flags);
8188362c389SVineet Gupta 
8198362c389SVineet Gupta }
8208362c389SVineet Gupta 
8218362c389SVineet Gupta #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
8228362c389SVineet Gupta 
8238362c389SVineet Gupta void flush_cache_mm(struct mm_struct *mm)
8248362c389SVineet Gupta {
8258362c389SVineet Gupta 	flush_cache_all();
8268362c389SVineet Gupta }
8278362c389SVineet Gupta 
8288362c389SVineet Gupta void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
8298362c389SVineet Gupta 		      unsigned long pfn)
8308362c389SVineet Gupta {
8318362c389SVineet Gupta 	unsigned int paddr = pfn << PAGE_SHIFT;
8328362c389SVineet Gupta 
8338362c389SVineet Gupta 	u_vaddr &= PAGE_MASK;
8348362c389SVineet Gupta 
8358362c389SVineet Gupta 	__flush_dcache_page(paddr, u_vaddr);
8368362c389SVineet Gupta 
8378362c389SVineet Gupta 	if (vma->vm_flags & VM_EXEC)
8388362c389SVineet Gupta 		__inv_icache_page(paddr, u_vaddr);
8398362c389SVineet Gupta }
8408362c389SVineet Gupta 
8418362c389SVineet Gupta void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
8428362c389SVineet Gupta 		       unsigned long end)
8438362c389SVineet Gupta {
8448362c389SVineet Gupta 	flush_cache_all();
8458362c389SVineet Gupta }
8468362c389SVineet Gupta 
8478362c389SVineet Gupta void flush_anon_page(struct vm_area_struct *vma, struct page *page,
8488362c389SVineet Gupta 		     unsigned long u_vaddr)
8498362c389SVineet Gupta {
8508362c389SVineet Gupta 	/* TBD: do we really need to clear the kernel mapping */
8518362c389SVineet Gupta 	__flush_dcache_page(page_address(page), u_vaddr);
8528362c389SVineet Gupta 	__flush_dcache_page(page_address(page), page_address(page));
8538362c389SVineet Gupta 
8548362c389SVineet Gupta }
8558362c389SVineet Gupta 
8568362c389SVineet Gupta #endif
8578362c389SVineet Gupta 
8588362c389SVineet Gupta void copy_user_highpage(struct page *to, struct page *from,
8598362c389SVineet Gupta 	unsigned long u_vaddr, struct vm_area_struct *vma)
8608362c389SVineet Gupta {
861336e2136SVineet Gupta 	void *kfrom = kmap_atomic(from);
862336e2136SVineet Gupta 	void *kto = kmap_atomic(to);
8638362c389SVineet Gupta 	int clean_src_k_mappings = 0;
8648362c389SVineet Gupta 
8658362c389SVineet Gupta 	/*
8668362c389SVineet Gupta 	 * If SRC page was already mapped in userspace AND it's U-mapping is
8678362c389SVineet Gupta 	 * not congruent with K-mapping, sync former to physical page so that
8688362c389SVineet Gupta 	 * K-mapping in memcpy below, sees the right data
8698362c389SVineet Gupta 	 *
8708362c389SVineet Gupta 	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
8718362c389SVineet Gupta 	 * equally valid for SRC page as well
872336e2136SVineet Gupta 	 *
873336e2136SVineet Gupta 	 * For !VIPT cache, all of this gets compiled out as
874336e2136SVineet Gupta 	 * addr_not_cache_congruent() is 0
8758362c389SVineet Gupta 	 */
876e1534ae9SKirill A. Shutemov 	if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
877336e2136SVineet Gupta 		__flush_dcache_page((unsigned long)kfrom, u_vaddr);
8788362c389SVineet Gupta 		clean_src_k_mappings = 1;
8798362c389SVineet Gupta 	}
8808362c389SVineet Gupta 
881336e2136SVineet Gupta 	copy_page(kto, kfrom);
8828362c389SVineet Gupta 
8838362c389SVineet Gupta 	/*
8848362c389SVineet Gupta 	 * Mark DST page K-mapping as dirty for a later finalization by
8858362c389SVineet Gupta 	 * update_mmu_cache(). Although the finalization could have been done
8868362c389SVineet Gupta 	 * here as well (given that both vaddr/paddr are available).
8878362c389SVineet Gupta 	 * But update_mmu_cache() already has code to do that for other
8888362c389SVineet Gupta 	 * non copied user pages (e.g. read faults which wire in pagecache page
8898362c389SVineet Gupta 	 * directly).
8908362c389SVineet Gupta 	 */
8918362c389SVineet Gupta 	clear_bit(PG_dc_clean, &to->flags);
8928362c389SVineet Gupta 
8938362c389SVineet Gupta 	/*
8948362c389SVineet Gupta 	 * if SRC was already usermapped and non-congruent to kernel mapping
8958362c389SVineet Gupta 	 * sync the kernel mapping back to physical page
8968362c389SVineet Gupta 	 */
8978362c389SVineet Gupta 	if (clean_src_k_mappings) {
898336e2136SVineet Gupta 		__flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
8998362c389SVineet Gupta 		set_bit(PG_dc_clean, &from->flags);
9008362c389SVineet Gupta 	} else {
9018362c389SVineet Gupta 		clear_bit(PG_dc_clean, &from->flags);
9028362c389SVineet Gupta 	}
903336e2136SVineet Gupta 
904336e2136SVineet Gupta 	kunmap_atomic(kto);
905336e2136SVineet Gupta 	kunmap_atomic(kfrom);
9068362c389SVineet Gupta }
9078362c389SVineet Gupta 
9088362c389SVineet Gupta void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
9098362c389SVineet Gupta {
9108362c389SVineet Gupta 	clear_page(to);
9118362c389SVineet Gupta 	clear_bit(PG_dc_clean, &page->flags);
9128362c389SVineet Gupta }
9138362c389SVineet Gupta 
9148362c389SVineet Gupta 
9158362c389SVineet Gupta /**********************************************************************
9168362c389SVineet Gupta  * Explicit Cache flush request from user space via syscall
9178362c389SVineet Gupta  * Needed for JITs which generate code on the fly
9188362c389SVineet Gupta  */
9198362c389SVineet Gupta SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
9208362c389SVineet Gupta {
9218362c389SVineet Gupta 	/* TBD: optimize this */
9228362c389SVineet Gupta 	flush_cache_all();
9238362c389SVineet Gupta 	return 0;
9248362c389SVineet Gupta }
9258ea2ddffSVineet Gupta 
9268ea2ddffSVineet Gupta void arc_cache_init(void)
9278ea2ddffSVineet Gupta {
9288ea2ddffSVineet Gupta 	unsigned int __maybe_unused cpu = smp_processor_id();
9298ea2ddffSVineet Gupta 	char str[256];
9308ea2ddffSVineet Gupta 
9318ea2ddffSVineet Gupta 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
9328ea2ddffSVineet Gupta 
93345c3b08aSVineet Gupta 	/*
93445c3b08aSVineet Gupta 	 * Only master CPU needs to execute rest of function:
93545c3b08aSVineet Gupta 	 *  - Assume SMP so all cores will have same cache config so
93645c3b08aSVineet Gupta 	 *    any geomtry checks will be same for all
93745c3b08aSVineet Gupta 	 *  - IOC setup / dma callbacks only need to be setup once
93845c3b08aSVineet Gupta 	 */
93945c3b08aSVineet Gupta 	if (cpu)
94045c3b08aSVineet Gupta 		return;
94145c3b08aSVineet Gupta 
9428ea2ddffSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
9438ea2ddffSVineet Gupta 		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
9448ea2ddffSVineet Gupta 
945f64915beSVineet Gupta 		if (!ic->line_len)
9468ea2ddffSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
9478ea2ddffSVineet Gupta 
9488ea2ddffSVineet Gupta 		if (ic->line_len != L1_CACHE_BYTES)
9498ea2ddffSVineet Gupta 			panic("ICache line [%d] != kernel Config [%d]",
9508ea2ddffSVineet Gupta 			      ic->line_len, L1_CACHE_BYTES);
9518ea2ddffSVineet Gupta 
952bcc4d65aSVineet Gupta 		/*
9532547476aSAndrea Gelmini 		 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
954bcc4d65aSVineet Gupta 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
955bcc4d65aSVineet Gupta 		 */
956bcc4d65aSVineet Gupta 		if (is_isa_arcv2() && ic->alias)
957bcc4d65aSVineet Gupta 			_cache_line_loop_ic_fn = __cache_line_loop_v3;
958bcc4d65aSVineet Gupta 		else
959bcc4d65aSVineet Gupta 			_cache_line_loop_ic_fn = __cache_line_loop;
9608ea2ddffSVineet Gupta 	}
9618ea2ddffSVineet Gupta 
9628ea2ddffSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
9638ea2ddffSVineet Gupta 		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
9648ea2ddffSVineet Gupta 
965f64915beSVineet Gupta 		if (!dc->line_len)
9668ea2ddffSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
9678ea2ddffSVineet Gupta 
9688ea2ddffSVineet Gupta 		if (dc->line_len != L1_CACHE_BYTES)
9698ea2ddffSVineet Gupta 			panic("DCache line [%d] != kernel Config [%d]",
9708ea2ddffSVineet Gupta 			      dc->line_len, L1_CACHE_BYTES);
9718ea2ddffSVineet Gupta 
972d1f317d8SVineet Gupta 		/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
973d1f317d8SVineet Gupta 		if (is_isa_arcompact()) {
974d1f317d8SVineet Gupta 			int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
975*08fe0079SVineet Gupta 			int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
9768ea2ddffSVineet Gupta 
977*08fe0079SVineet Gupta 			if (dc->alias) {
978*08fe0079SVineet Gupta 				if (!handled)
9798ea2ddffSVineet Gupta 					panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
980*08fe0079SVineet Gupta 				if (CACHE_COLORS_NUM != num_colors)
981*08fe0079SVineet Gupta 					panic("CACHE_COLORS_NUM not optimized for config\n");
982*08fe0079SVineet Gupta 			} else if (!dc->alias && handled) {
9838ea2ddffSVineet Gupta 				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
9848ea2ddffSVineet Gupta 			}
9858ea2ddffSVineet Gupta 		}
986*08fe0079SVineet Gupta 	}
987f2b0b25aSAlexey Brodkin 
98879335a2cSVineet Gupta 	if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
98979335a2cSVineet Gupta 
99079335a2cSVineet Gupta 		/* IM set : flush before invalidate */
99179335a2cSVineet Gupta 		write_aux_reg(ARC_REG_SLC_CTRL,
99279335a2cSVineet Gupta 			read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
99379335a2cSVineet Gupta 
99479335a2cSVineet Gupta 		write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
99579335a2cSVineet Gupta 
99679335a2cSVineet Gupta 		/* Important to wait for flush to complete */
99779335a2cSVineet Gupta 		while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
99879335a2cSVineet Gupta 		write_aux_reg(ARC_REG_SLC_CTRL,
99979335a2cSVineet Gupta 			read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
100079335a2cSVineet Gupta 	}
100179335a2cSVineet Gupta 
1002cf986d47SVineet Gupta 	if (is_isa_arcv2() && ioc_enable) {
1003f2b0b25aSAlexey Brodkin 		/* IO coherency base - 0x8z */
1004f2b0b25aSAlexey Brodkin 		write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1005f2b0b25aSAlexey Brodkin 		/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1006f2b0b25aSAlexey Brodkin 		write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
1007f2b0b25aSAlexey Brodkin 		/* Enable partial writes */
1008f2b0b25aSAlexey Brodkin 		write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1009f2b0b25aSAlexey Brodkin 		/* Enable IO coherency */
1010f2b0b25aSAlexey Brodkin 		write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1011f2b0b25aSAlexey Brodkin 
1012f2b0b25aSAlexey Brodkin 		__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1013f2b0b25aSAlexey Brodkin 		__dma_cache_inv = __dma_cache_inv_ioc;
1014f2b0b25aSAlexey Brodkin 		__dma_cache_wback = __dma_cache_wback_ioc;
101579335a2cSVineet Gupta 	} else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1016f2b0b25aSAlexey Brodkin 		__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1017f2b0b25aSAlexey Brodkin 		__dma_cache_inv = __dma_cache_inv_slc;
1018f2b0b25aSAlexey Brodkin 		__dma_cache_wback = __dma_cache_wback_slc;
1019f2b0b25aSAlexey Brodkin 	} else {
1020f2b0b25aSAlexey Brodkin 		__dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1021f2b0b25aSAlexey Brodkin 		__dma_cache_inv = __dma_cache_inv_l1;
1022f2b0b25aSAlexey Brodkin 		__dma_cache_wback = __dma_cache_wback_l1;
1023f2b0b25aSAlexey Brodkin 	}
1024d1f317d8SVineet Gupta }
1025