xref: /linux/arch/arc/mm/cache.c (revision 82fea5a1bbbe8c3b56d5f3efbf8880c7b25b1758)
1 /*
2  * ARC Cache Management
3  *
4  * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
23 
24 void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
25 			       unsigned long sz, const int cacheop);
26 
27 char *arc_cache_mumbojumbo(int c, char *buf, int len)
28 {
29 	int n = 0;
30 	struct cpuinfo_arc_cache *p;
31 
32 #define PR_CACHE(p, cfg, str)						\
33 	if (!(p)->ver)							\
34 		n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");	\
35 	else								\
36 		n += scnprintf(buf + n, len - n,			\
37 			str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",	\
38 			(p)->sz_k, (p)->assoc, (p)->line_len,		\
39 			(p)->vipt ? "VIPT" : "PIPT",			\
40 			(p)->alias ? " aliasing" : "",			\
41 			IS_ENABLED(cfg) ? "" : " (not used)");
42 
43 	PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
44 	PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
45 
46 	p = &cpuinfo_arc700[c].slc;
47 	if (p->ver)
48 		n += scnprintf(buf + n, len - n,
49 			"SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
50 
51 	return buf;
52 }
53 
54 /*
55  * Read the Cache Build Confuration Registers, Decode them and save into
56  * the cpuinfo structure for later use.
57  * No Validation done here, simply read/convert the BCRs
58  */
59 void read_decode_cache_bcr(void)
60 {
61 	struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
62 	unsigned int cpu = smp_processor_id();
63 	struct bcr_cache {
64 #ifdef CONFIG_CPU_BIG_ENDIAN
65 		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
66 #else
67 		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
68 #endif
69 	} ibcr, dbcr;
70 
71 	struct bcr_generic sbcr;
72 
73 	struct bcr_slc_cfg {
74 #ifdef CONFIG_CPU_BIG_ENDIAN
75 		unsigned int pad:24, way:2, lsz:2, sz:4;
76 #else
77 		unsigned int sz:4, lsz:2, way:2, pad:24;
78 #endif
79 	} slc_cfg;
80 
81 	p_ic = &cpuinfo_arc700[cpu].icache;
82 	READ_BCR(ARC_REG_IC_BCR, ibcr);
83 
84 	if (!ibcr.ver)
85 		goto dc_chk;
86 
87 	if (ibcr.ver <= 3) {
88 		BUG_ON(ibcr.config != 3);
89 		p_ic->assoc = 2;		/* Fixed to 2w set assoc */
90 	} else if (ibcr.ver >= 4) {
91 		p_ic->assoc = 1 << ibcr.config;	/* 1,2,4,8 */
92 	}
93 
94 	p_ic->line_len = 8 << ibcr.line_len;
95 	p_ic->sz_k = 1 << (ibcr.sz - 1);
96 	p_ic->ver = ibcr.ver;
97 	p_ic->vipt = 1;
98 	p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
99 
100 dc_chk:
101 	p_dc = &cpuinfo_arc700[cpu].dcache;
102 	READ_BCR(ARC_REG_DC_BCR, dbcr);
103 
104 	if (!dbcr.ver)
105 		goto slc_chk;
106 
107 	if (dbcr.ver <= 3) {
108 		BUG_ON(dbcr.config != 2);
109 		p_dc->assoc = 4;		/* Fixed to 4w set assoc */
110 		p_dc->vipt = 1;
111 		p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
112 	} else if (dbcr.ver >= 4) {
113 		p_dc->assoc = 1 << dbcr.config;	/* 1,2,4,8 */
114 		p_dc->vipt = 0;
115 		p_dc->alias = 0;		/* PIPT so can't VIPT alias */
116 	}
117 
118 	p_dc->line_len = 16 << dbcr.line_len;
119 	p_dc->sz_k = 1 << (dbcr.sz - 1);
120 	p_dc->ver = dbcr.ver;
121 
122 slc_chk:
123 	p_slc = &cpuinfo_arc700[cpu].slc;
124 	READ_BCR(ARC_REG_SLC_BCR, sbcr);
125 	if (sbcr.ver) {
126 		READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
127 		p_slc->ver = sbcr.ver;
128 		p_slc->sz_k = 128 << slc_cfg.sz;
129 		p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
130 	}
131 }
132 
133 /*
134  * Line Operation on {I,D}-Cache
135  */
136 
137 #define OP_INV		0x1
138 #define OP_FLUSH	0x2
139 #define OP_FLUSH_N_INV	0x3
140 #define OP_INV_IC	0x4
141 
142 /*
143  *		I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
144  *
145  * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
146  * The orig Cache Management Module "CDU" only required paddr to invalidate a
147  * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
148  * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
149  * the exact same line.
150  *
151  * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
152  * paddr alone could not be used to correctly index the cache.
153  *
154  * ------------------
155  * MMU v1/v2 (Fixed Page Size 8k)
156  * ------------------
157  * The solution was to provide CDU with these additonal vaddr bits. These
158  * would be bits [x:13], x would depend on cache-geometry, 13 comes from
159  * standard page size of 8k.
160  * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
161  * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
162  * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
163  * represent the offset within cache-line. The adv of using this "clumsy"
164  * interface for additional info was no new reg was needed in CDU programming
165  * model.
166  *
167  * 17:13 represented the max num of bits passable, actual bits needed were
168  * fewer, based on the num-of-aliases possible.
169  * -for 2 alias possibility, only bit 13 needed (32K cache)
170  * -for 4 alias possibility, bits 14:13 needed (64K cache)
171  *
172  * ------------------
173  * MMU v3
174  * ------------------
175  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
176  * only support 8k (default), 16k and 4k.
177  * However from hardware perspective, smaller page sizes aggrevate aliasing
178  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
179  * the existing scheme of piggybacking won't work for certain configurations.
180  * Two new registers IC_PTAG and DC_PTAG inttoduced.
181  * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
182  */
183 
184 static inline
185 void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
186 			  unsigned long sz, const int op)
187 {
188 	unsigned int aux_cmd;
189 	int num_lines;
190 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
191 
192 	if (op == OP_INV_IC) {
193 		aux_cmd = ARC_REG_IC_IVIL;
194 	} else {
195 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
196 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
197 	}
198 
199 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
200 	 * and have @paddr - aligned to cache line and integral @num_lines.
201 	 * This however can be avoided for page sized since:
202 	 *  -@paddr will be cache-line aligned already (being page aligned)
203 	 *  -@sz will be integral multiple of line size (being page sized).
204 	 */
205 	if (!full_page) {
206 		sz += paddr & ~CACHE_LINE_MASK;
207 		paddr &= CACHE_LINE_MASK;
208 		vaddr &= CACHE_LINE_MASK;
209 	}
210 
211 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
212 
213 	/* MMUv2 and before: paddr contains stuffed vaddrs bits */
214 	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
215 
216 	while (num_lines-- > 0) {
217 		write_aux_reg(aux_cmd, paddr);
218 		paddr += L1_CACHE_BYTES;
219 	}
220 }
221 
222 static inline
223 void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
224 			  unsigned long sz, const int op)
225 {
226 	unsigned int aux_cmd, aux_tag;
227 	int num_lines;
228 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
229 
230 	if (op == OP_INV_IC) {
231 		aux_cmd = ARC_REG_IC_IVIL;
232 		aux_tag = ARC_REG_IC_PTAG;
233 	} else {
234 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
235 		aux_tag = ARC_REG_DC_PTAG;
236 	}
237 
238 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
239 	 * and have @paddr - aligned to cache line and integral @num_lines.
240 	 * This however can be avoided for page sized since:
241 	 *  -@paddr will be cache-line aligned already (being page aligned)
242 	 *  -@sz will be integral multiple of line size (being page sized).
243 	 */
244 	if (!full_page) {
245 		sz += paddr & ~CACHE_LINE_MASK;
246 		paddr &= CACHE_LINE_MASK;
247 		vaddr &= CACHE_LINE_MASK;
248 	}
249 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
250 
251 	/*
252 	 * MMUv3, cache ops require paddr in PTAG reg
253 	 * if V-P const for loop, PTAG can be written once outside loop
254 	 */
255 	if (full_page)
256 		write_aux_reg(aux_tag, paddr);
257 
258 	while (num_lines-- > 0) {
259 		if (!full_page) {
260 			write_aux_reg(aux_tag, paddr);
261 			paddr += L1_CACHE_BYTES;
262 		}
263 
264 		write_aux_reg(aux_cmd, vaddr);
265 		vaddr += L1_CACHE_BYTES;
266 	}
267 }
268 
269 /*
270  * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
271  * maintenance ops (in IVIL reg), as long as icache doesn't alias.
272  *
273  * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
274  * specified in PTAG (similar to MMU v3)
275  */
276 static inline
277 void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
278 			  unsigned long sz, const int cacheop)
279 {
280 	unsigned int aux_cmd;
281 	int num_lines;
282 	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
283 
284 	if (cacheop == OP_INV_IC) {
285 		aux_cmd = ARC_REG_IC_IVIL;
286 	} else {
287 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
288 		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
289 	}
290 
291 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
292 	 * and have @paddr - aligned to cache line and integral @num_lines.
293 	 * This however can be avoided for page sized since:
294 	 *  -@paddr will be cache-line aligned already (being page aligned)
295 	 *  -@sz will be integral multiple of line size (being page sized).
296 	 */
297 	if (!full_page_op) {
298 		sz += paddr & ~CACHE_LINE_MASK;
299 		paddr &= CACHE_LINE_MASK;
300 	}
301 
302 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
303 
304 	while (num_lines-- > 0) {
305 		write_aux_reg(aux_cmd, paddr);
306 		paddr += L1_CACHE_BYTES;
307 	}
308 }
309 
310 #if (CONFIG_ARC_MMU_VER < 3)
311 #define __cache_line_loop	__cache_line_loop_v2
312 #elif (CONFIG_ARC_MMU_VER == 3)
313 #define __cache_line_loop	__cache_line_loop_v3
314 #elif (CONFIG_ARC_MMU_VER > 3)
315 #define __cache_line_loop	__cache_line_loop_v4
316 #endif
317 
318 #ifdef CONFIG_ARC_HAS_DCACHE
319 
320 /***************************************************************
321  * Machine specific helpers for Entire D-Cache or Per Line ops
322  */
323 
324 static inline void __before_dc_op(const int op)
325 {
326 	if (op == OP_FLUSH_N_INV) {
327 		/* Dcache provides 2 cmd: FLUSH or INV
328 		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
329 		 * flush-n-inv is achieved by INV cmd but with IM=1
330 		 * So toggle INV sub-mode depending on op request and default
331 		 */
332 		const unsigned int ctl = ARC_REG_DC_CTRL;
333 		write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
334 	}
335 }
336 
337 static inline void __after_dc_op(const int op)
338 {
339 	if (op & OP_FLUSH) {
340 		const unsigned int ctl = ARC_REG_DC_CTRL;
341 		unsigned int reg;
342 
343 		/* flush / flush-n-inv both wait */
344 		while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
345 			;
346 
347 		/* Switch back to default Invalidate mode */
348 		if (op == OP_FLUSH_N_INV)
349 			write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
350 	}
351 }
352 
353 /*
354  * Operation on Entire D-Cache
355  * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
356  * Note that constant propagation ensures all the checks are gone
357  * in generated code
358  */
359 static inline void __dc_entire_op(const int op)
360 {
361 	int aux;
362 
363 	__before_dc_op(op);
364 
365 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
366 		aux = ARC_REG_DC_IVDC;
367 	else
368 		aux = ARC_REG_DC_FLSH;
369 
370 	write_aux_reg(aux, 0x1);
371 
372 	__after_dc_op(op);
373 }
374 
375 /* For kernel mappings cache operation: index is same as paddr */
376 #define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
377 
378 /*
379  * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
380  */
381 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
382 				unsigned long sz, const int op)
383 {
384 	unsigned long flags;
385 
386 	local_irq_save(flags);
387 
388 	__before_dc_op(op);
389 
390 	__cache_line_loop(paddr, vaddr, sz, op);
391 
392 	__after_dc_op(op);
393 
394 	local_irq_restore(flags);
395 }
396 
397 #else
398 
399 #define __dc_entire_op(op)
400 #define __dc_line_op(paddr, vaddr, sz, op)
401 #define __dc_line_op_k(paddr, sz, op)
402 
403 #endif /* CONFIG_ARC_HAS_DCACHE */
404 
405 #ifdef CONFIG_ARC_HAS_ICACHE
406 
407 static inline void __ic_entire_inv(void)
408 {
409 	write_aux_reg(ARC_REG_IC_IVIC, 1);
410 	read_aux_reg(ARC_REG_IC_CTRL);	/* blocks */
411 }
412 
413 static inline void
414 __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
415 			  unsigned long sz)
416 {
417 	unsigned long flags;
418 
419 	local_irq_save(flags);
420 	(*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
421 	local_irq_restore(flags);
422 }
423 
424 #ifndef CONFIG_SMP
425 
426 #define __ic_line_inv_vaddr(p, v, s)	__ic_line_inv_vaddr_local(p, v, s)
427 
428 #else
429 
430 struct ic_inv_args {
431 	unsigned long paddr, vaddr;
432 	int sz;
433 };
434 
435 static void __ic_line_inv_vaddr_helper(void *info)
436 {
437         struct ic_inv_args *ic_inv = info;
438 
439         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
440 }
441 
442 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
443 				unsigned long sz)
444 {
445 	struct ic_inv_args ic_inv = {
446 		.paddr = paddr,
447 		.vaddr = vaddr,
448 		.sz    = sz
449 	};
450 
451 	on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
452 }
453 
454 #endif	/* CONFIG_SMP */
455 
456 #else	/* !CONFIG_ARC_HAS_ICACHE */
457 
458 #define __ic_entire_inv()
459 #define __ic_line_inv_vaddr(pstart, vstart, sz)
460 
461 #endif /* CONFIG_ARC_HAS_ICACHE */
462 
463 
464 /***********************************************************
465  * Exported APIs
466  */
467 
468 /*
469  * Handle cache congruency of kernel and userspace mappings of page when kernel
470  * writes-to/reads-from
471  *
472  * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
473  *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
474  *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
475  *  -In SMP, if hardware caches are coherent
476  *
477  * There's a corollary case, where kernel READs from a userspace mapped page.
478  * If the U-mapping is not congruent to to K-mapping, former needs flushing.
479  */
480 void flush_dcache_page(struct page *page)
481 {
482 	struct address_space *mapping;
483 
484 	if (!cache_is_vipt_aliasing()) {
485 		clear_bit(PG_dc_clean, &page->flags);
486 		return;
487 	}
488 
489 	/* don't handle anon pages here */
490 	mapping = page_mapping(page);
491 	if (!mapping)
492 		return;
493 
494 	/*
495 	 * pagecache page, file not yet mapped to userspace
496 	 * Make a note that K-mapping is dirty
497 	 */
498 	if (!mapping_mapped(mapping)) {
499 		clear_bit(PG_dc_clean, &page->flags);
500 	} else if (page_mapped(page)) {
501 
502 		/* kernel reading from page with U-mapping */
503 		unsigned long paddr = (unsigned long)page_address(page);
504 		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
505 
506 		if (addr_not_cache_congruent(paddr, vaddr))
507 			__flush_dcache_page(paddr, vaddr);
508 	}
509 }
510 EXPORT_SYMBOL(flush_dcache_page);
511 
512 
513 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
514 {
515 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
516 }
517 EXPORT_SYMBOL(dma_cache_wback_inv);
518 
519 void dma_cache_inv(unsigned long start, unsigned long sz)
520 {
521 	__dc_line_op_k(start, sz, OP_INV);
522 }
523 EXPORT_SYMBOL(dma_cache_inv);
524 
525 void dma_cache_wback(unsigned long start, unsigned long sz)
526 {
527 	__dc_line_op_k(start, sz, OP_FLUSH);
528 }
529 EXPORT_SYMBOL(dma_cache_wback);
530 
531 /*
532  * This is API for making I/D Caches consistent when modifying
533  * kernel code (loadable modules, kprobes, kgdb...)
534  * This is called on insmod, with kernel virtual address for CODE of
535  * the module. ARC cache maintenance ops require PHY address thus we
536  * need to convert vmalloc addr to PHY addr
537  */
538 void flush_icache_range(unsigned long kstart, unsigned long kend)
539 {
540 	unsigned int tot_sz;
541 
542 	WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
543 
544 	/* Shortcut for bigger flush ranges.
545 	 * Here we don't care if this was kernel virtual or phy addr
546 	 */
547 	tot_sz = kend - kstart;
548 	if (tot_sz > PAGE_SIZE) {
549 		flush_cache_all();
550 		return;
551 	}
552 
553 	/* Case: Kernel Phy addr (0x8000_0000 onwards) */
554 	if (likely(kstart > PAGE_OFFSET)) {
555 		/*
556 		 * The 2nd arg despite being paddr will be used to index icache
557 		 * This is OK since no alternate virtual mappings will exist
558 		 * given the callers for this case: kprobe/kgdb in built-in
559 		 * kernel code only.
560 		 */
561 		__sync_icache_dcache(kstart, kstart, kend - kstart);
562 		return;
563 	}
564 
565 	/*
566 	 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
567 	 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
568 	 *     handling of kernel vaddr.
569 	 *
570 	 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
571 	 *     it still needs to handle  a 2 page scenario, where the range
572 	 *     straddles across 2 virtual pages and hence need for loop
573 	 */
574 	while (tot_sz > 0) {
575 		unsigned int off, sz;
576 		unsigned long phy, pfn;
577 
578 		off = kstart % PAGE_SIZE;
579 		pfn = vmalloc_to_pfn((void *)kstart);
580 		phy = (pfn << PAGE_SHIFT) + off;
581 		sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
582 		__sync_icache_dcache(phy, kstart, sz);
583 		kstart += sz;
584 		tot_sz -= sz;
585 	}
586 }
587 EXPORT_SYMBOL(flush_icache_range);
588 
589 /*
590  * General purpose helper to make I and D cache lines consistent.
591  * @paddr is phy addr of region
592  * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
593  *    However in one instance, when called by kprobe (for a breakpt in
594  *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
595  *    use a paddr to index the cache (despite VIPT). This is fine since since a
596  *    builtin kernel page will not have any virtual mappings.
597  *    kprobe on loadable module will be kernel vaddr.
598  */
599 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
600 {
601 	__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
602 	__ic_line_inv_vaddr(paddr, vaddr, len);
603 }
604 
605 /* wrapper to compile time eliminate alignment checks in flush loop */
606 void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
607 {
608 	__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
609 }
610 
611 /*
612  * wrapper to clearout kernel or userspace mappings of a page
613  * For kernel mappings @vaddr == @paddr
614  */
615 void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
616 {
617 	__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
618 }
619 
620 noinline void flush_cache_all(void)
621 {
622 	unsigned long flags;
623 
624 	local_irq_save(flags);
625 
626 	__ic_entire_inv();
627 	__dc_entire_op(OP_FLUSH_N_INV);
628 
629 	local_irq_restore(flags);
630 
631 }
632 
633 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
634 
635 void flush_cache_mm(struct mm_struct *mm)
636 {
637 	flush_cache_all();
638 }
639 
640 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
641 		      unsigned long pfn)
642 {
643 	unsigned int paddr = pfn << PAGE_SHIFT;
644 
645 	u_vaddr &= PAGE_MASK;
646 
647 	__flush_dcache_page(paddr, u_vaddr);
648 
649 	if (vma->vm_flags & VM_EXEC)
650 		__inv_icache_page(paddr, u_vaddr);
651 }
652 
653 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
654 		       unsigned long end)
655 {
656 	flush_cache_all();
657 }
658 
659 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
660 		     unsigned long u_vaddr)
661 {
662 	/* TBD: do we really need to clear the kernel mapping */
663 	__flush_dcache_page(page_address(page), u_vaddr);
664 	__flush_dcache_page(page_address(page), page_address(page));
665 
666 }
667 
668 #endif
669 
670 void copy_user_highpage(struct page *to, struct page *from,
671 	unsigned long u_vaddr, struct vm_area_struct *vma)
672 {
673 	unsigned long kfrom = (unsigned long)page_address(from);
674 	unsigned long kto = (unsigned long)page_address(to);
675 	int clean_src_k_mappings = 0;
676 
677 	/*
678 	 * If SRC page was already mapped in userspace AND it's U-mapping is
679 	 * not congruent with K-mapping, sync former to physical page so that
680 	 * K-mapping in memcpy below, sees the right data
681 	 *
682 	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
683 	 * equally valid for SRC page as well
684 	 */
685 	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
686 		__flush_dcache_page(kfrom, u_vaddr);
687 		clean_src_k_mappings = 1;
688 	}
689 
690 	copy_page((void *)kto, (void *)kfrom);
691 
692 	/*
693 	 * Mark DST page K-mapping as dirty for a later finalization by
694 	 * update_mmu_cache(). Although the finalization could have been done
695 	 * here as well (given that both vaddr/paddr are available).
696 	 * But update_mmu_cache() already has code to do that for other
697 	 * non copied user pages (e.g. read faults which wire in pagecache page
698 	 * directly).
699 	 */
700 	clear_bit(PG_dc_clean, &to->flags);
701 
702 	/*
703 	 * if SRC was already usermapped and non-congruent to kernel mapping
704 	 * sync the kernel mapping back to physical page
705 	 */
706 	if (clean_src_k_mappings) {
707 		__flush_dcache_page(kfrom, kfrom);
708 		set_bit(PG_dc_clean, &from->flags);
709 	} else {
710 		clear_bit(PG_dc_clean, &from->flags);
711 	}
712 }
713 
714 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
715 {
716 	clear_page(to);
717 	clear_bit(PG_dc_clean, &page->flags);
718 }
719 
720 
721 /**********************************************************************
722  * Explicit Cache flush request from user space via syscall
723  * Needed for JITs which generate code on the fly
724  */
725 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
726 {
727 	/* TBD: optimize this */
728 	flush_cache_all();
729 	return 0;
730 }
731 
732 void arc_cache_init(void)
733 {
734 	unsigned int __maybe_unused cpu = smp_processor_id();
735 	char str[256];
736 
737 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
738 
739 	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
740 		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
741 
742 		if (!ic->ver)
743 			panic("cache support enabled but non-existent cache\n");
744 
745 		if (ic->line_len != L1_CACHE_BYTES)
746 			panic("ICache line [%d] != kernel Config [%d]",
747 			      ic->line_len, L1_CACHE_BYTES);
748 
749 		if (ic->ver != CONFIG_ARC_MMU_VER)
750 			panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
751 			      ic->ver, CONFIG_ARC_MMU_VER);
752 
753 		/*
754 		 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
755 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
756 		 */
757 		if (is_isa_arcv2() && ic->alias)
758 			_cache_line_loop_ic_fn = __cache_line_loop_v3;
759 		else
760 			_cache_line_loop_ic_fn = __cache_line_loop;
761 	}
762 
763 	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
764 		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
765 
766 		if (!dc->ver)
767 			panic("cache support enabled but non-existent cache\n");
768 
769 		if (dc->line_len != L1_CACHE_BYTES)
770 			panic("DCache line [%d] != kernel Config [%d]",
771 			      dc->line_len, L1_CACHE_BYTES);
772 
773 		/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
774 		if (is_isa_arcompact()) {
775 			int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
776 
777 			if (dc->alias && !handled)
778 				panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
779 			else if (!dc->alias && handled)
780 				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
781 		}
782 	}
783 }
784