xref: /linux/arch/arc/mm/tlbex.S (revision 4232da23d75d173195c6766729e51947b64f83cd)
1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2cc562d2eSVineet Gupta/*
3cc562d2eSVineet Gupta * TLB Exception Handling for ARC
4cc562d2eSVineet Gupta *
5cc562d2eSVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6cc562d2eSVineet Gupta *
7cc562d2eSVineet Gupta * Vineetg: April 2011 :
8*ebfc2fd8SBjorn Helgaas *  -MMU v1: moved out legacy code into a separate file
9cc562d2eSVineet Gupta *  -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
10cc562d2eSVineet Gupta *      helps avoid a shift when preparing PD0 from PTE
11cc562d2eSVineet Gupta *
12cc562d2eSVineet Gupta * Vineetg: July 2009
13*ebfc2fd8SBjorn Helgaas *  -For MMU V2, we need not do heuristics at the time of committing a D-TLB
14*ebfc2fd8SBjorn Helgaas *   entry, so that it doesn't knock out its I-TLB entry
15cc562d2eSVineet Gupta *  -Some more fine tuning:
16cc562d2eSVineet Gupta *   bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
17cc562d2eSVineet Gupta *
18cc562d2eSVineet Gupta * Vineetg: July 2009
19cc562d2eSVineet Gupta *  -Practically rewrote the I/D TLB Miss handlers
20*ebfc2fd8SBjorn Helgaas *   Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
21cc562d2eSVineet Gupta *   Hence Leaner by 1.5 K
22cc562d2eSVineet Gupta *   Used Conditional arithmetic to replace excessive branching
23cc562d2eSVineet Gupta *   Also used short instructions wherever possible
24cc562d2eSVineet Gupta *
25cc562d2eSVineet Gupta * Vineetg: Aug 13th 2008
26cc562d2eSVineet Gupta *  -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
27cc562d2eSVineet Gupta *   more information in case of a Fatality
28cc562d2eSVineet Gupta *
29cc562d2eSVineet Gupta * Vineetg: March 25th Bug #92690
30cc562d2eSVineet Gupta *  -Added Debug Code to check if sw-ASID == hw-ASID
31cc562d2eSVineet Gupta
32cc562d2eSVineet Gupta * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
33cc562d2eSVineet Gupta */
34cc562d2eSVineet Gupta
35cc562d2eSVineet Gupta#include <linux/linkage.h>
3665fddcfcSMike Rapoport#include <linux/pgtable.h>
37cc562d2eSVineet Gupta#include <asm/entry.h>
38da1677b0SVineet Gupta#include <asm/mmu.h>
39cc562d2eSVineet Gupta#include <asm/arcregs.h>
40cc562d2eSVineet Gupta#include <asm/cache.h>
41cc562d2eSVineet Gupta#include <asm/processor.h>
42cc562d2eSVineet Gupta
43d7a512bfSVineet Gupta#ifdef CONFIG_ISA_ARCOMPACT
444b06ff35SVineet Gupta;-----------------------------------------------------------------
454b06ff35SVineet Gupta; ARC700 Exception Handling doesn't auto-switch stack and it only provides
464b06ff35SVineet Gupta; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
474b06ff35SVineet Gupta;
484b06ff35SVineet Gupta; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
494b06ff35SVineet Gupta; "global" is used to free-up FIRST core reg to be able to code the rest of
504b06ff35SVineet Gupta; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
514b06ff35SVineet Gupta; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
524b06ff35SVineet Gupta; need to be saved as well by extending the "global" to be 4 words. Hence
534b06ff35SVineet Gupta;	".size   ex_saved_reg1, 16"
544b06ff35SVineet Gupta; [All of this dance is to avoid stack switching for each TLB Miss, since we
554b06ff35SVineet Gupta; only need to save only a handful of regs, as opposed to complete reg file]
564b06ff35SVineet Gupta;
574b06ff35SVineet Gupta; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
584b06ff35SVineet Gupta; core reg as it will not be SMP safe.
594b06ff35SVineet Gupta; Thus scratch AUX reg is used (and no longer used to cache task PGD).
604b06ff35SVineet Gupta; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
614b06ff35SVineet Gupta; Epilogue thus has to locate the "per-cpu" storage for regs.
624b06ff35SVineet Gupta; To avoid cache line bouncing the per-cpu global is aligned/sized per
634b06ff35SVineet Gupta; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
644b06ff35SVineet Gupta;	".size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
654b06ff35SVineet Gupta
664b06ff35SVineet Gupta; As simple as that....
67cc562d2eSVineet Gupta;--------------------------------------------------------------------------
68cc562d2eSVineet Gupta
694b06ff35SVineet Gupta; scratch memory to save [r0-r3] used to code TLB refill Handler
708b5850f8SVineet GuptaARCFP_DATA ex_saved_reg1
714b06ff35SVineet Gupta	.align 1 << L1_CACHE_SHIFT
72cc562d2eSVineet Gupta	.type   ex_saved_reg1, @object
7341195d23SVineet Gupta#ifdef CONFIG_SMP
7441195d23SVineet Gupta	.size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
7541195d23SVineet Guptaex_saved_reg1:
7641195d23SVineet Gupta	.zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
7741195d23SVineet Gupta#else
78cc562d2eSVineet Gupta	.size   ex_saved_reg1, 16
79cc562d2eSVineet Guptaex_saved_reg1:
80cc562d2eSVineet Gupta	.zero 16
8141195d23SVineet Gupta#endif
82cc562d2eSVineet Gupta
834b06ff35SVineet Gupta.macro TLBMISS_FREEUP_REGS
844b06ff35SVineet Gupta#ifdef CONFIG_SMP
854b06ff35SVineet Gupta	sr  r0, [ARC_REG_SCRATCH_DATA0]	; freeup r0 to code with
864b06ff35SVineet Gupta	GET_CPU_ID  r0			; get to per cpu scratch mem,
87a6416f57SVineet Gupta	asl r0, r0, L1_CACHE_SHIFT	; cache line wide per cpu
884b06ff35SVineet Gupta	add r0, @ex_saved_reg1, r0
894b06ff35SVineet Gupta#else
904b06ff35SVineet Gupta	st    r0, [@ex_saved_reg1]
914b06ff35SVineet Gupta	mov_s r0, @ex_saved_reg1
924b06ff35SVineet Gupta#endif
934b06ff35SVineet Gupta	st_s  r1, [r0, 4]
944b06ff35SVineet Gupta	st_s  r2, [r0, 8]
954b06ff35SVineet Gupta	st_s  r3, [r0, 12]
964b06ff35SVineet Gupta.endm
974b06ff35SVineet Gupta
984b06ff35SVineet Gupta.macro TLBMISS_RESTORE_REGS
994b06ff35SVineet Gupta#ifdef CONFIG_SMP
1004b06ff35SVineet Gupta	GET_CPU_ID  r0			; get to per cpu scratch mem
101a6416f57SVineet Gupta	asl r0, r0, L1_CACHE_SHIFT	; each is cache line wide
1024b06ff35SVineet Gupta	add r0, @ex_saved_reg1, r0
1034b06ff35SVineet Gupta	ld_s  r3, [r0,12]
1044b06ff35SVineet Gupta	ld_s  r2, [r0, 8]
1054b06ff35SVineet Gupta	ld_s  r1, [r0, 4]
1064b06ff35SVineet Gupta	lr    r0, [ARC_REG_SCRATCH_DATA0]
1074b06ff35SVineet Gupta#else
1084b06ff35SVineet Gupta	mov_s r0, @ex_saved_reg1
1094b06ff35SVineet Gupta	ld_s  r3, [r0,12]
1104b06ff35SVineet Gupta	ld_s  r2, [r0, 8]
1114b06ff35SVineet Gupta	ld_s  r1, [r0, 4]
1124b06ff35SVineet Gupta	ld_s  r0, [r0]
1134b06ff35SVineet Gupta#endif
1144b06ff35SVineet Gupta.endm
1154b06ff35SVineet Gupta
116d7a512bfSVineet Gupta#else	/* ARCv2 */
117d7a512bfSVineet Gupta
118d7a512bfSVineet Gupta.macro TLBMISS_FREEUP_REGS
1190fb1f35eSVineet Gupta#ifdef CONFIG_ARC_HAS_LL64
1200fb1f35eSVineet Gupta	std   r0, [sp, -16]
1210fb1f35eSVineet Gupta	std   r2, [sp, -8]
1220fb1f35eSVineet Gupta#else
123d7a512bfSVineet Gupta	PUSH  r0
124d7a512bfSVineet Gupta	PUSH  r1
125d7a512bfSVineet Gupta	PUSH  r2
126d7a512bfSVineet Gupta	PUSH  r3
1270fb1f35eSVineet Gupta#endif
128d7a512bfSVineet Gupta.endm
129d7a512bfSVineet Gupta
130d7a512bfSVineet Gupta.macro TLBMISS_RESTORE_REGS
1310fb1f35eSVineet Gupta#ifdef CONFIG_ARC_HAS_LL64
1320fb1f35eSVineet Gupta	ldd   r0, [sp, -16]
1330fb1f35eSVineet Gupta	ldd   r2, [sp, -8]
1340fb1f35eSVineet Gupta#else
135d7a512bfSVineet Gupta	POP   r3
136d7a512bfSVineet Gupta	POP   r2
137d7a512bfSVineet Gupta	POP   r1
138d7a512bfSVineet Gupta	POP   r0
1390fb1f35eSVineet Gupta#endif
140d7a512bfSVineet Gupta.endm
141d7a512bfSVineet Gupta
142d7a512bfSVineet Gupta#endif
143d7a512bfSVineet Gupta
144cc562d2eSVineet Gupta;============================================================================
145cc562d2eSVineet Gupta;TLB Miss handling Code
146cc562d2eSVineet Gupta;============================================================================
147cc562d2eSVineet Gupta
148f35534a2SVineet Gupta#ifndef PMD_SHIFT
149f35534a2SVineet Gupta#define PMD_SHIFT PUD_SHIFT
150f35534a2SVineet Gupta#endif
151f35534a2SVineet Gupta
152f35534a2SVineet Gupta#ifndef PUD_SHIFT
153f35534a2SVineet Gupta#define PUD_SHIFT PGDIR_SHIFT
154f35534a2SVineet Gupta#endif
155f35534a2SVineet Gupta
156cc562d2eSVineet Gupta;-----------------------------------------------------------------------------
157cc562d2eSVineet Gupta; This macro does the page-table lookup for the faulting address.
158cc562d2eSVineet Gupta; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
159cc562d2eSVineet Gupta.macro LOAD_FAULT_PTE
160cc562d2eSVineet Gupta
161cc562d2eSVineet Gupta	lr  r2, [efa]
162cc562d2eSVineet Gupta
1636128df5bSVineet Gupta#ifdef CONFIG_ISA_ARCV2
164cc562d2eSVineet Gupta	lr  r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
16541195d23SVineet Gupta#else
16641195d23SVineet Gupta	GET_CURR_TASK_ON_CPU  r1
16741195d23SVineet Gupta	ld  r1, [r1, TASK_ACT_MM]
16841195d23SVineet Gupta	ld  r1, [r1, MM_PGD]
16941195d23SVineet Gupta#endif
170cc562d2eSVineet Gupta
171cc562d2eSVineet Gupta	lsr     r0, r2, PGDIR_SHIFT     ; Bits for indexing into PGD
172fe6c1b86SVineet Gupta	ld.as   r3, [r1, r0]            ; PGD entry corresp to faulting addr
173fe6c1b86SVineet Gupta	tst	r3, r3
174fe6c1b86SVineet Gupta	bz	do_slow_path_pf         ; if no Page Table, do page fault
175fe6c1b86SVineet Gupta
1768747ff70SVineet Gupta#if CONFIG_PGTABLE_LEVELS > 3
1778747ff70SVineet Gupta	lsr     r0, r2, PUD_SHIFT	; Bits for indexing into PUD
1788747ff70SVineet Gupta	and	r0, r0, (PTRS_PER_PUD - 1)
1798747ff70SVineet Gupta	ld.as	r1, [r3, r0]		; PMD entry
1808747ff70SVineet Gupta	tst	r1, r1
1818747ff70SVineet Gupta	bz	do_slow_path_pf
1828747ff70SVineet Gupta	mov	r3, r1
1838747ff70SVineet Gupta#endif
1848747ff70SVineet Gupta
1852dde02abSVineet Gupta#if CONFIG_PGTABLE_LEVELS > 2
1862dde02abSVineet Gupta	lsr     r0, r2, PMD_SHIFT	; Bits for indexing into PMD
1872dde02abSVineet Gupta	and	r0, r0, (PTRS_PER_PMD - 1)
1882dde02abSVineet Gupta	ld.as	r1, [r3, r0]		; PMD entry
1892dde02abSVineet Gupta	tst	r1, r1
1902dde02abSVineet Gupta	bz	do_slow_path_pf
1912dde02abSVineet Gupta	mov	r3, r1
1922dde02abSVineet Gupta#endif
1932dde02abSVineet Gupta
194fe6c1b86SVineet Gupta#ifdef CONFIG_TRANSPARENT_HUGEPAGE
195fe6c1b86SVineet Gupta	and.f	0, r3, _PAGE_HW_SZ	; Is this Huge PMD (thp)
196fe6c1b86SVineet Gupta	add2.nz	r1, r1, r0
197fe6c1b86SVineet Gupta	bnz.d	2f		; YES: PGD == PMD has THP PTE: stop pgd walk
198fe6c1b86SVineet Gupta	mov.nz	r0, r3
199fe6c1b86SVineet Gupta
200fe6c1b86SVineet Gupta#endif
201fe6c1b86SVineet Gupta	and	r1, r3, PAGE_MASK
202cc562d2eSVineet Gupta
203cc562d2eSVineet Gupta	; Get the PTE entry: The idea is
204cc562d2eSVineet Gupta	; (1) x = addr >> PAGE_SHIFT 	-> masks page-off bits from @fault-addr
205cc562d2eSVineet Gupta	; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
20625d46418SVineet Gupta	; (3) z = (pgtbl + y * 4)
207cc562d2eSVineet Gupta
2085a364c2aSVineet Gupta#ifdef CONFIG_ARC_HAS_PAE40
2095a364c2aSVineet Gupta#define PTE_SIZE_LOG	3	/* 8 == 2 ^ 3 */
2105a364c2aSVineet Gupta#else
21125d46418SVineet Gupta#define PTE_SIZE_LOG	2	/* 4 == 2 ^ 2 */
2125a364c2aSVineet Gupta#endif
21325d46418SVineet Gupta
21425d46418SVineet Gupta	; multiply in step (3) above avoided by shifting lesser in step (1)
21525d46418SVineet Gupta	lsr     r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
21625d46418SVineet Gupta	and     r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
2175a364c2aSVineet Gupta	ld.aw   r0, [r1, r0]            ; r0: PTE (lower word only for PAE40)
21825d46418SVineet Gupta					; r1: PTE ptr
219fe6c1b86SVineet Gupta
220fe6c1b86SVineet Gupta2:
221fe6c1b86SVineet Gupta
222cc562d2eSVineet Gupta.endm
223cc562d2eSVineet Gupta
224cc562d2eSVineet Gupta;-----------------------------------------------------------------
225cc562d2eSVineet Gupta; Convert Linux PTE entry into TLB entry
226cc562d2eSVineet Gupta; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
2275a364c2aSVineet Gupta;    (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
228cc562d2eSVineet Gupta; IN: r0 = PTE, r1 = ptr to PTE
229cc562d2eSVineet Gupta
230cc562d2eSVineet Gupta.macro CONV_PTE_TO_TLB
23164b703efSVineet Gupta	and    r3, r0, PTE_BITS_RWX	;          r  w  x
232a6416f57SVineet Gupta	asl    r2, r3, 3		; Kr Kw Kx 0  0  0 (GLOBAL, kernel only)
23364b703efSVineet Gupta	and.f  0,  r0, _PAGE_GLOBAL
23425d46418SVineet Gupta	or.z   r2, r2, r3		; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
23564b703efSVineet Gupta
23664b703efSVineet Gupta	and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
23764b703efSVineet Gupta	or  r3, r3, r2
23864b703efSVineet Gupta
23925d46418SVineet Gupta	sr  r3, [ARC_REG_TLBPD1]    	; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
2405a364c2aSVineet Gupta#ifdef	CONFIG_ARC_HAS_PAE40
2415a364c2aSVineet Gupta	ld	r3, [r1, 4]		; paddr[39..32]
2425a364c2aSVineet Gupta	sr	r3, [ARC_REG_TLBPD1HI]
2435a364c2aSVineet Gupta#endif
244cc562d2eSVineet Gupta
245cc562d2eSVineet Gupta	and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
246cc562d2eSVineet Gupta
247cc562d2eSVineet Gupta	lr  r3,[ARC_REG_TLBPD0]     ; MMU prepares PD0 with vaddr and asid
248cc562d2eSVineet Gupta
249cc562d2eSVineet Gupta	or  r3, r3, r2              ; S | vaddr | {sasid|asid}
250cc562d2eSVineet Gupta	sr  r3,[ARC_REG_TLBPD0]     ; rewrite PD0
251cc562d2eSVineet Gupta.endm
252cc562d2eSVineet Gupta
253cc562d2eSVineet Gupta;-----------------------------------------------------------------
254cc562d2eSVineet Gupta; Commit the TLB entry into MMU
255cc562d2eSVineet Gupta
256cc562d2eSVineet Gupta.macro COMMIT_ENTRY_TO_MMU
257288ff7deSVineet Gupta#ifdef CONFIG_ARC_MMU_V3
258cc562d2eSVineet Gupta
259cc562d2eSVineet Gupta	/* Get free TLB slot: Set = computed from vaddr, way = random */
260cc562d2eSVineet Gupta	sr  TLBGetIndex, [ARC_REG_TLBCOMMAND]
261cc562d2eSVineet Gupta
262cc562d2eSVineet Gupta	/* Commit the Write */
263cc562d2eSVineet Gupta	sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
264d7a512bfSVineet Gupta
265d7a512bfSVineet Gupta#else
266d7a512bfSVineet Gupta	sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
267d7a512bfSVineet Gupta#endif
26830b7af25SNoam Camus
26930b7af25SNoam Camus88:
270cc562d2eSVineet Gupta.endm
271cc562d2eSVineet Gupta
272cc562d2eSVineet Gupta
2738b5850f8SVineet GuptaARCFP_CODE	;Fast Path Code, candidate for ICCM
274cc562d2eSVineet Gupta
275cc562d2eSVineet Gupta;-----------------------------------------------------------------------------
276cc562d2eSVineet Gupta; I-TLB Miss Exception Handler
277cc562d2eSVineet Gupta;-----------------------------------------------------------------------------
278cc562d2eSVineet Gupta
279ec7ac6afSVineet GuptaENTRY(EV_TLBMissI)
280cc562d2eSVineet Gupta
281cc562d2eSVineet Gupta	TLBMISS_FREEUP_REGS
282cc562d2eSVineet Gupta
283cc562d2eSVineet Gupta	;----------------------------------------------------------------
284dc81df24SVineet Gupta	; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
285cc562d2eSVineet Gupta	LOAD_FAULT_PTE
286cc562d2eSVineet Gupta
287cc562d2eSVineet Gupta	;----------------------------------------------------------------
288cc562d2eSVineet Gupta	; VERIFY_PTE: Check if PTE permissions approp for executing code
289cc562d2eSVineet Gupta	cmp_s   r2, VMALLOC_START
29064b703efSVineet Gupta	mov_s   r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
29164b703efSVineet Gupta	or.hs   r2, r2, _PAGE_GLOBAL
292cc562d2eSVineet Gupta
293cc562d2eSVineet Gupta	and     r3, r0, r2  ; Mask out NON Flag bits from PTE
294cc562d2eSVineet Gupta	xor.f   r3, r3, r2  ; check ( ( pte & flags_test ) == flags_test )
295cc562d2eSVineet Gupta	bnz     do_slow_path_pf
296cc562d2eSVineet Gupta
297cc562d2eSVineet Gupta	; Let Linux VM know that the page was accessed
298c3e757a7SVineet Gupta	or      r0, r0, _PAGE_ACCESSED  ; set Accessed Bit
299cc562d2eSVineet Gupta	st_s    r0, [r1]                ; Write back PTE
300cc562d2eSVineet Gupta
301cc562d2eSVineet Gupta	CONV_PTE_TO_TLB
302cc562d2eSVineet Gupta	COMMIT_ENTRY_TO_MMU
303cc562d2eSVineet Gupta	TLBMISS_RESTORE_REGS
3042924cd18SRuud DerwigEV_TLBMissI_fast_ret:	; additional label for VDK OS-kit instrumentation
305cc562d2eSVineet Gupta	rtie
306cc562d2eSVineet Gupta
307ec7ac6afSVineet GuptaEND(EV_TLBMissI)
308cc562d2eSVineet Gupta
309cc562d2eSVineet Gupta;-----------------------------------------------------------------------------
310cc562d2eSVineet Gupta; D-TLB Miss Exception Handler
311cc562d2eSVineet Gupta;-----------------------------------------------------------------------------
312cc562d2eSVineet Gupta
313ec7ac6afSVineet GuptaENTRY(EV_TLBMissD)
314cc562d2eSVineet Gupta
315cc562d2eSVineet Gupta	TLBMISS_FREEUP_REGS
316cc562d2eSVineet Gupta
317cc562d2eSVineet Gupta	;----------------------------------------------------------------
318cc562d2eSVineet Gupta	; Get the PTE corresponding to V-addr accessed
319dc81df24SVineet Gupta	; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
320cc562d2eSVineet Gupta	LOAD_FAULT_PTE
321cc562d2eSVineet Gupta
322cc562d2eSVineet Gupta	;----------------------------------------------------------------
323cc562d2eSVineet Gupta	; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
324cc562d2eSVineet Gupta
32564b703efSVineet Gupta	cmp_s	r2, VMALLOC_START
32664b703efSVineet Gupta	mov_s   r2, _PAGE_PRESENT	; common bit for K/U PTE
32764b703efSVineet Gupta	or.hs	r2, r2, _PAGE_GLOBAL	; kernel PTE only
32864b703efSVineet Gupta
32964b703efSVineet Gupta	; Linux PTE [RWX] bits are semantically overloaded:
33064b703efSVineet Gupta	; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
33164b703efSVineet Gupta	; -Otherwise they are user-mode permissions, and those are exactly
33264b703efSVineet Gupta	;  same for kernel mode as well (e.g. copy_(to|from)_user)
33364b703efSVineet Gupta
334cc562d2eSVineet Gupta	lr      r3, [ecr]
335cc562d2eSVineet Gupta	btst_s  r3, ECR_C_BIT_DTLB_LD_MISS	; Read Access
33664b703efSVineet Gupta	or.nz   r2, r2, _PAGE_READ      	; chk for Read flag in PTE
337cc562d2eSVineet Gupta	btst_s  r3, ECR_C_BIT_DTLB_ST_MISS	; Write Access
33864b703efSVineet Gupta	or.nz   r2, r2, _PAGE_WRITE     	; chk for Write flag in PTE
33964b703efSVineet Gupta	; Above laddering takes care of XCHG access (both R and W)
340cc562d2eSVineet Gupta
341cc562d2eSVineet Gupta	; By now, r2 setup with all the Flags we need to check in PTE
342cc562d2eSVineet Gupta	and     r3, r0, r2              ; Mask out NON Flag bits from PTE
343cc562d2eSVineet Gupta	brne.d  r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
344cc562d2eSVineet Gupta
345cc562d2eSVineet Gupta	;----------------------------------------------------------------
346cc562d2eSVineet Gupta	; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
347c3e757a7SVineet Gupta	or      r0, r0, _PAGE_ACCESSED        ; Accessed bit always
348129cbed5SVineet Gupta	or.nz   r0, r0, _PAGE_DIRTY           ; if Write, set Dirty bit as well
349cc562d2eSVineet Gupta	st_s    r0, [r1]                      ; Write back PTE
350cc562d2eSVineet Gupta
351cc562d2eSVineet Gupta	CONV_PTE_TO_TLB
352cc562d2eSVineet Gupta
353cc562d2eSVineet Gupta	COMMIT_ENTRY_TO_MMU
354cc562d2eSVineet Gupta	TLBMISS_RESTORE_REGS
3552924cd18SRuud DerwigEV_TLBMissD_fast_ret:	; additional label for VDK OS-kit instrumentation
356cc562d2eSVineet Gupta	rtie
357cc562d2eSVineet Gupta
358cc562d2eSVineet Gupta;-------- Common routine to call Linux Page Fault Handler -----------
359cc562d2eSVineet Guptado_slow_path_pf:
360cc562d2eSVineet Gupta
36123c0cbd0SVineet Gupta#ifdef CONFIG_ISA_ARCV2
36223c0cbd0SVineet Gupta	; Set Z flag if exception in U mode. Hardware micro-ops do this on any
36323c0cbd0SVineet Gupta	; taken interrupt/exception, and thus is already the case at the entry
36423c0cbd0SVineet Gupta	; above, but ensuing code would have already clobbered.
36523c0cbd0SVineet Gupta	; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set
36623c0cbd0SVineet Gupta
36723c0cbd0SVineet Gupta	lr	r2, [erstatus]
36823c0cbd0SVineet Gupta	and	r2, r2, STATUS_U_MASK
36923c0cbd0SVineet Gupta	bxor.f	0, r2, STATUS_U_BIT
37023c0cbd0SVineet Gupta#endif
37123c0cbd0SVineet Gupta
372cc562d2eSVineet Gupta	; Restore the 4-scratch regs saved by fast path miss handler
373cc562d2eSVineet Gupta	TLBMISS_RESTORE_REGS
374cc562d2eSVineet Gupta
375cc562d2eSVineet Gupta	; Slow path TLB Miss handled as a regular ARC Exception
376cc562d2eSVineet Gupta	; (stack switching / save the complete reg-file).
377a615b47dSVineet Gupta	b  call_do_page_fault
378ec7ac6afSVineet GuptaEND(EV_TLBMissD)
379