1cc562d2eSVineet Gupta/* 2cc562d2eSVineet Gupta * TLB Exception Handling for ARC 3cc562d2eSVineet Gupta * 4cc562d2eSVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5cc562d2eSVineet Gupta * 6cc562d2eSVineet Gupta * This program is free software; you can redistribute it and/or modify 7cc562d2eSVineet Gupta * it under the terms of the GNU General Public License version 2 as 8cc562d2eSVineet Gupta * published by the Free Software Foundation. 9cc562d2eSVineet Gupta * 10cc562d2eSVineet Gupta * Vineetg: April 2011 : 11cc562d2eSVineet Gupta * -MMU v1: moved out legacy code into a seperate file 12cc562d2eSVineet Gupta * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, 13cc562d2eSVineet Gupta * helps avoid a shift when preparing PD0 from PTE 14cc562d2eSVineet Gupta * 15cc562d2eSVineet Gupta * Vineetg: July 2009 16cc562d2eSVineet Gupta * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB 17cc562d2eSVineet Gupta * entry, so that it doesn't knock out it's I-TLB entry 18cc562d2eSVineet Gupta * -Some more fine tuning: 19cc562d2eSVineet Gupta * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc 20cc562d2eSVineet Gupta * 21cc562d2eSVineet Gupta * Vineetg: July 2009 22cc562d2eSVineet Gupta * -Practically rewrote the I/D TLB Miss handlers 23cc562d2eSVineet Gupta * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. 24cc562d2eSVineet Gupta * Hence Leaner by 1.5 K 25cc562d2eSVineet Gupta * Used Conditional arithmetic to replace excessive branching 26cc562d2eSVineet Gupta * Also used short instructions wherever possible 27cc562d2eSVineet Gupta * 28cc562d2eSVineet Gupta * Vineetg: Aug 13th 2008 29cc562d2eSVineet Gupta * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing 30cc562d2eSVineet Gupta * more information in case of a Fatality 31cc562d2eSVineet Gupta * 32cc562d2eSVineet Gupta * Vineetg: March 25th Bug #92690 33cc562d2eSVineet Gupta * -Added Debug Code to check if sw-ASID == hw-ASID 34cc562d2eSVineet Gupta 35cc562d2eSVineet Gupta * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 36cc562d2eSVineet Gupta */ 37cc562d2eSVineet Gupta 38cc562d2eSVineet Gupta .cpu A7 39cc562d2eSVineet Gupta 40cc562d2eSVineet Gupta#include <linux/linkage.h> 41cc562d2eSVineet Gupta#include <asm/entry.h> 42da1677b0SVineet Gupta#include <asm/mmu.h> 43cc562d2eSVineet Gupta#include <asm/pgtable.h> 44cc562d2eSVineet Gupta#include <asm/arcregs.h> 45cc562d2eSVineet Gupta#include <asm/cache.h> 46cc562d2eSVineet Gupta#include <asm/processor.h> 47cc562d2eSVineet Gupta#include <asm/tlb-mmu1.h> 48cc562d2eSVineet Gupta 494b06ff35SVineet Gupta;----------------------------------------------------------------- 504b06ff35SVineet Gupta; ARC700 Exception Handling doesn't auto-switch stack and it only provides 514b06ff35SVineet Gupta; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" 524b06ff35SVineet Gupta; 534b06ff35SVineet Gupta; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a 544b06ff35SVineet Gupta; "global" is used to free-up FIRST core reg to be able to code the rest of 554b06ff35SVineet Gupta; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). 564b06ff35SVineet Gupta; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 574b06ff35SVineet Gupta; need to be saved as well by extending the "global" to be 4 words. Hence 584b06ff35SVineet Gupta; ".size ex_saved_reg1, 16" 594b06ff35SVineet Gupta; [All of this dance is to avoid stack switching for each TLB Miss, since we 604b06ff35SVineet Gupta; only need to save only a handful of regs, as opposed to complete reg file] 614b06ff35SVineet Gupta; 624b06ff35SVineet Gupta; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST 634b06ff35SVineet Gupta; core reg as it will not be SMP safe. 644b06ff35SVineet Gupta; Thus scratch AUX reg is used (and no longer used to cache task PGD). 654b06ff35SVineet Gupta; To save the rest of 3 regs - per cpu, the global is made "per-cpu". 664b06ff35SVineet Gupta; Epilogue thus has to locate the "per-cpu" storage for regs. 674b06ff35SVineet Gupta; To avoid cache line bouncing the per-cpu global is aligned/sized per 684b06ff35SVineet Gupta; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence 694b06ff35SVineet Gupta; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" 704b06ff35SVineet Gupta 714b06ff35SVineet Gupta; As simple as that.... 72cc562d2eSVineet Gupta;-------------------------------------------------------------------------- 73cc562d2eSVineet Gupta 744b06ff35SVineet Gupta; scratch memory to save [r0-r3] used to code TLB refill Handler 758b5850f8SVineet GuptaARCFP_DATA ex_saved_reg1 764b06ff35SVineet Gupta .align 1 << L1_CACHE_SHIFT 77cc562d2eSVineet Gupta .type ex_saved_reg1, @object 7841195d23SVineet Gupta#ifdef CONFIG_SMP 7941195d23SVineet Gupta .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 8041195d23SVineet Guptaex_saved_reg1: 8141195d23SVineet Gupta .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 8241195d23SVineet Gupta#else 83cc562d2eSVineet Gupta .size ex_saved_reg1, 16 84cc562d2eSVineet Guptaex_saved_reg1: 85cc562d2eSVineet Gupta .zero 16 8641195d23SVineet Gupta#endif 87cc562d2eSVineet Gupta 884b06ff35SVineet Gupta.macro TLBMISS_FREEUP_REGS 894b06ff35SVineet Gupta#ifdef CONFIG_SMP 904b06ff35SVineet Gupta sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with 914b06ff35SVineet Gupta GET_CPU_ID r0 ; get to per cpu scratch mem, 924b06ff35SVineet Gupta lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu 934b06ff35SVineet Gupta add r0, @ex_saved_reg1, r0 944b06ff35SVineet Gupta#else 954b06ff35SVineet Gupta st r0, [@ex_saved_reg1] 964b06ff35SVineet Gupta mov_s r0, @ex_saved_reg1 974b06ff35SVineet Gupta#endif 984b06ff35SVineet Gupta st_s r1, [r0, 4] 994b06ff35SVineet Gupta st_s r2, [r0, 8] 1004b06ff35SVineet Gupta st_s r3, [r0, 12] 1014b06ff35SVineet Gupta 1024b06ff35SVineet Gupta ; VERIFY if the ASID in MMU-PID Reg is same as 1034b06ff35SVineet Gupta ; one in Linux data structures 1044b06ff35SVineet Gupta 1055bd87adfSVineet Gupta tlb_paranoid_check_asm 1064b06ff35SVineet Gupta.endm 1074b06ff35SVineet Gupta 1084b06ff35SVineet Gupta.macro TLBMISS_RESTORE_REGS 1094b06ff35SVineet Gupta#ifdef CONFIG_SMP 1104b06ff35SVineet Gupta GET_CPU_ID r0 ; get to per cpu scratch mem 1114b06ff35SVineet Gupta lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide 1124b06ff35SVineet Gupta add r0, @ex_saved_reg1, r0 1134b06ff35SVineet Gupta ld_s r3, [r0,12] 1144b06ff35SVineet Gupta ld_s r2, [r0, 8] 1154b06ff35SVineet Gupta ld_s r1, [r0, 4] 1164b06ff35SVineet Gupta lr r0, [ARC_REG_SCRATCH_DATA0] 1174b06ff35SVineet Gupta#else 1184b06ff35SVineet Gupta mov_s r0, @ex_saved_reg1 1194b06ff35SVineet Gupta ld_s r3, [r0,12] 1204b06ff35SVineet Gupta ld_s r2, [r0, 8] 1214b06ff35SVineet Gupta ld_s r1, [r0, 4] 1224b06ff35SVineet Gupta ld_s r0, [r0] 1234b06ff35SVineet Gupta#endif 1244b06ff35SVineet Gupta.endm 1254b06ff35SVineet Gupta 126cc562d2eSVineet Gupta;============================================================================ 127cc562d2eSVineet Gupta; Troubleshooting Stuff 128cc562d2eSVineet Gupta;============================================================================ 129cc562d2eSVineet Gupta 130cc562d2eSVineet Gupta; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid 131cc562d2eSVineet Gupta; When Creating TLB Entries, instead of doing 3 dependent loads from memory, 132cc562d2eSVineet Gupta; we use the MMU PID Reg to get current ASID. 133cc562d2eSVineet Gupta; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. 134cc562d2eSVineet Gupta; So we try to detect this in TLB Mis shandler 135cc562d2eSVineet Gupta 1365bd87adfSVineet Gupta.macro tlb_paranoid_check_asm 137cc562d2eSVineet Gupta 138cc562d2eSVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_PARANOIA 139cc562d2eSVineet Gupta 140cc562d2eSVineet Gupta GET_CURR_TASK_ON_CPU r3 141cc562d2eSVineet Gupta ld r0, [r3, TASK_ACT_MM] 142cc562d2eSVineet Gupta ld r0, [r0, MM_CTXT+MM_CTXT_ASID] 143947bf103SVineet Gupta breq r0, 0, 55f ; Error if no ASID allocated 144cc562d2eSVineet Gupta 145cc562d2eSVineet Gupta lr r1, [ARC_REG_PID] 146cc562d2eSVineet Gupta and r1, r1, 0xFF 1475bd87adfSVineet Gupta 148947bf103SVineet Gupta and r2, r0, 0xFF ; MMU PID bits only for comparison 149947bf103SVineet Gupta breq r1, r2, 5f 150cc562d2eSVineet Gupta 151947bf103SVineet Gupta55: 152cc562d2eSVineet Gupta ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode 1535bd87adfSVineet Gupta lr r2, [erstatus] 1545bd87adfSVineet Gupta bbit0 r2, STATUS_U_BIT, 5f 155cc562d2eSVineet Gupta 156cc562d2eSVineet Gupta ; We sure are in troubled waters, Flag the error, but to do so 157cc562d2eSVineet Gupta ; need to switch to kernel mode stack to call error routine 158cc562d2eSVineet Gupta GET_TSK_STACK_BASE r3, sp 159cc562d2eSVineet Gupta 160cc562d2eSVineet Gupta ; Call printk to shoutout aloud 1615bd87adfSVineet Gupta mov r2, 1 162cc562d2eSVineet Gupta j print_asid_mismatch 163cc562d2eSVineet Gupta 164cc562d2eSVineet Gupta5: ; ASIDs match so proceed normally 165cc562d2eSVineet Gupta nop 166cc562d2eSVineet Gupta 167cc562d2eSVineet Gupta#endif 168cc562d2eSVineet Gupta 169cc562d2eSVineet Gupta.endm 170cc562d2eSVineet Gupta 171cc562d2eSVineet Gupta;============================================================================ 172cc562d2eSVineet Gupta;TLB Miss handling Code 173cc562d2eSVineet Gupta;============================================================================ 174cc562d2eSVineet Gupta 175cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 176cc562d2eSVineet Gupta; This macro does the page-table lookup for the faulting address. 177cc562d2eSVineet Gupta; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address 178cc562d2eSVineet Gupta.macro LOAD_FAULT_PTE 179cc562d2eSVineet Gupta 180cc562d2eSVineet Gupta lr r2, [efa] 181cc562d2eSVineet Gupta 18241195d23SVineet Gupta#ifndef CONFIG_SMP 183cc562d2eSVineet Gupta lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd 18441195d23SVineet Gupta#else 18541195d23SVineet Gupta GET_CURR_TASK_ON_CPU r1 18641195d23SVineet Gupta ld r1, [r1, TASK_ACT_MM] 18741195d23SVineet Gupta ld r1, [r1, MM_PGD] 18841195d23SVineet Gupta#endif 189cc562d2eSVineet Gupta 190cc562d2eSVineet Gupta lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD 191cc562d2eSVineet Gupta ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr 192cc562d2eSVineet Gupta and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags 193cc562d2eSVineet Gupta ; contains Ptr to Page Table 194cc562d2eSVineet Gupta bz.d do_slow_path_pf ; if no Page Table, do page fault 195cc562d2eSVineet Gupta 196cc562d2eSVineet Gupta ; Get the PTE entry: The idea is 197cc562d2eSVineet Gupta ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr 198cc562d2eSVineet Gupta ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index 199cc562d2eSVineet Gupta ; (3) z = pgtbl[y] 200cc562d2eSVineet Gupta ; To avoid the multiply by in end, we do the -2, <<2 below 201cc562d2eSVineet Gupta 202cc562d2eSVineet Gupta lsr r0, r2, (PAGE_SHIFT - 2) 203cc562d2eSVineet Gupta and r0, r0, ( (PTRS_PER_PTE - 1) << 2) 204cc562d2eSVineet Gupta ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr 2050ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 2060ef88a54SVineet Gupta and.f 0, r0, _PAGE_PRESENT 2070ef88a54SVineet Gupta bz 1f 208dc81df24SVineet Gupta ld r3, [num_pte_not_present] 209dc81df24SVineet Gupta add r3, r3, 1 210dc81df24SVineet Gupta st r3, [num_pte_not_present] 2110ef88a54SVineet Gupta1: 2120ef88a54SVineet Gupta#endif 213cc562d2eSVineet Gupta 214cc562d2eSVineet Gupta.endm 215cc562d2eSVineet Gupta 216cc562d2eSVineet Gupta;----------------------------------------------------------------- 217cc562d2eSVineet Gupta; Convert Linux PTE entry into TLB entry 218cc562d2eSVineet Gupta; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu 219cc562d2eSVineet Gupta; IN: r0 = PTE, r1 = ptr to PTE 220cc562d2eSVineet Gupta 221cc562d2eSVineet Gupta.macro CONV_PTE_TO_TLB 22264b703efSVineet Gupta and r3, r0, PTE_BITS_RWX ; r w x 22364b703efSVineet Gupta lsl r2, r3, 3 ; r w x 0 0 0 22464b703efSVineet Gupta and.f 0, r0, _PAGE_GLOBAL 22564b703efSVineet Gupta or.z r2, r2, r3 ; r w x r w x 22664b703efSVineet Gupta 22764b703efSVineet Gupta and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE 22864b703efSVineet Gupta or r3, r3, r2 22964b703efSVineet Gupta 230cc562d2eSVineet Gupta sr r3, [ARC_REG_TLBPD1] ; these go in PD1 231cc562d2eSVineet Gupta 232cc562d2eSVineet Gupta and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 233cc562d2eSVineet Gupta 234cc562d2eSVineet Gupta lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid 235cc562d2eSVineet Gupta 236cc562d2eSVineet Gupta or r3, r3, r2 ; S | vaddr | {sasid|asid} 237cc562d2eSVineet Gupta sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 238cc562d2eSVineet Gupta.endm 239cc562d2eSVineet Gupta 240cc562d2eSVineet Gupta;----------------------------------------------------------------- 241cc562d2eSVineet Gupta; Commit the TLB entry into MMU 242cc562d2eSVineet Gupta 243cc562d2eSVineet Gupta.macro COMMIT_ENTRY_TO_MMU 244cc562d2eSVineet Gupta 245cc562d2eSVineet Gupta /* Get free TLB slot: Set = computed from vaddr, way = random */ 246cc562d2eSVineet Gupta sr TLBGetIndex, [ARC_REG_TLBCOMMAND] 247cc562d2eSVineet Gupta 248cc562d2eSVineet Gupta /* Commit the Write */ 249cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */ 250cc562d2eSVineet Gupta sr TLBWriteNI, [ARC_REG_TLBCOMMAND] 251cc562d2eSVineet Gupta#else 252cc562d2eSVineet Gupta sr TLBWrite, [ARC_REG_TLBCOMMAND] 253cc562d2eSVineet Gupta#endif 254cc562d2eSVineet Gupta.endm 255cc562d2eSVineet Gupta 256cc562d2eSVineet Gupta 2578b5850f8SVineet GuptaARCFP_CODE ;Fast Path Code, candidate for ICCM 258cc562d2eSVineet Gupta 259cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 260cc562d2eSVineet Gupta; I-TLB Miss Exception Handler 261cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 262cc562d2eSVineet Gupta 263*ec7ac6afSVineet GuptaENTRY(EV_TLBMissI) 264cc562d2eSVineet Gupta 265cc562d2eSVineet Gupta TLBMISS_FREEUP_REGS 266cc562d2eSVineet Gupta 2670ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 2680ef88a54SVineet Gupta ld r0, [@numitlb] 2690ef88a54SVineet Gupta add r0, r0, 1 2700ef88a54SVineet Gupta st r0, [@numitlb] 2710ef88a54SVineet Gupta#endif 2720ef88a54SVineet Gupta 273cc562d2eSVineet Gupta ;---------------------------------------------------------------- 274dc81df24SVineet Gupta ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 275cc562d2eSVineet Gupta LOAD_FAULT_PTE 276cc562d2eSVineet Gupta 277cc562d2eSVineet Gupta ;---------------------------------------------------------------- 278cc562d2eSVineet Gupta ; VERIFY_PTE: Check if PTE permissions approp for executing code 279cc562d2eSVineet Gupta cmp_s r2, VMALLOC_START 28064b703efSVineet Gupta mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) 28164b703efSVineet Gupta or.hs r2, r2, _PAGE_GLOBAL 282cc562d2eSVineet Gupta 283cc562d2eSVineet Gupta and r3, r0, r2 ; Mask out NON Flag bits from PTE 284cc562d2eSVineet Gupta xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 285cc562d2eSVineet Gupta bnz do_slow_path_pf 286cc562d2eSVineet Gupta 287cc562d2eSVineet Gupta ; Let Linux VM know that the page was accessed 288c3e757a7SVineet Gupta or r0, r0, _PAGE_ACCESSED ; set Accessed Bit 289cc562d2eSVineet Gupta st_s r0, [r1] ; Write back PTE 290cc562d2eSVineet Gupta 291cc562d2eSVineet Gupta CONV_PTE_TO_TLB 292cc562d2eSVineet Gupta COMMIT_ENTRY_TO_MMU 293cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 294cc562d2eSVineet Gupta rtie 295cc562d2eSVineet Gupta 296*ec7ac6afSVineet GuptaEND(EV_TLBMissI) 297cc562d2eSVineet Gupta 298cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 299cc562d2eSVineet Gupta; D-TLB Miss Exception Handler 300cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 301cc562d2eSVineet Gupta 302*ec7ac6afSVineet GuptaENTRY(EV_TLBMissD) 303cc562d2eSVineet Gupta 304cc562d2eSVineet Gupta TLBMISS_FREEUP_REGS 305cc562d2eSVineet Gupta 3060ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 3070ef88a54SVineet Gupta ld r0, [@numdtlb] 3080ef88a54SVineet Gupta add r0, r0, 1 3090ef88a54SVineet Gupta st r0, [@numdtlb] 3100ef88a54SVineet Gupta#endif 3110ef88a54SVineet Gupta 312cc562d2eSVineet Gupta ;---------------------------------------------------------------- 313cc562d2eSVineet Gupta ; Get the PTE corresponding to V-addr accessed 314dc81df24SVineet Gupta ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA 315cc562d2eSVineet Gupta LOAD_FAULT_PTE 316cc562d2eSVineet Gupta 317cc562d2eSVineet Gupta ;---------------------------------------------------------------- 318cc562d2eSVineet Gupta ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) 319cc562d2eSVineet Gupta 32064b703efSVineet Gupta cmp_s r2, VMALLOC_START 32164b703efSVineet Gupta mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE 32264b703efSVineet Gupta or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only 32364b703efSVineet Gupta 32464b703efSVineet Gupta ; Linux PTE [RWX] bits are semantically overloaded: 32564b703efSVineet Gupta ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) 32664b703efSVineet Gupta ; -Otherwise they are user-mode permissions, and those are exactly 32764b703efSVineet Gupta ; same for kernel mode as well (e.g. copy_(to|from)_user) 32864b703efSVineet Gupta 329cc562d2eSVineet Gupta lr r3, [ecr] 330cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access 33164b703efSVineet Gupta or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE 332cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access 33364b703efSVineet Gupta or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE 33464b703efSVineet Gupta ; Above laddering takes care of XCHG access (both R and W) 335cc562d2eSVineet Gupta 336cc562d2eSVineet Gupta ; By now, r2 setup with all the Flags we need to check in PTE 337cc562d2eSVineet Gupta and r3, r0, r2 ; Mask out NON Flag bits from PTE 338cc562d2eSVineet Gupta brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) 339cc562d2eSVineet Gupta 340cc562d2eSVineet Gupta ;---------------------------------------------------------------- 341cc562d2eSVineet Gupta ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty 342cc562d2eSVineet Gupta lr r3, [ecr] 343c3e757a7SVineet Gupta or r0, r0, _PAGE_ACCESSED ; Accessed bit always 344cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? 345cc562d2eSVineet Gupta or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well 346cc562d2eSVineet Gupta st_s r0, [r1] ; Write back PTE 347cc562d2eSVineet Gupta 348cc562d2eSVineet Gupta CONV_PTE_TO_TLB 349cc562d2eSVineet Gupta 350cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER == 1) 351cc562d2eSVineet Gupta ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of 352cc562d2eSVineet Gupta ; memcpy where 3 parties contend for 2 ways, ensuing a livelock. 353cc562d2eSVineet Gupta ; But only for old MMU or one with Metal Fix 354cc562d2eSVineet Gupta TLB_WRITE_HEURISTICS 355cc562d2eSVineet Gupta#endif 356cc562d2eSVineet Gupta 357cc562d2eSVineet Gupta COMMIT_ENTRY_TO_MMU 358cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 359cc562d2eSVineet Gupta rtie 360cc562d2eSVineet Gupta 361cc562d2eSVineet Gupta;-------- Common routine to call Linux Page Fault Handler ----------- 362cc562d2eSVineet Guptado_slow_path_pf: 363cc562d2eSVineet Gupta 364cc562d2eSVineet Gupta ; Restore the 4-scratch regs saved by fast path miss handler 365cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 366cc562d2eSVineet Gupta 367cc562d2eSVineet Gupta ; Slow path TLB Miss handled as a regular ARC Exception 368cc562d2eSVineet Gupta ; (stack switching / save the complete reg-file). 36937f3ac49SVineet Gupta EXCEPTION_PROLOGUE 370cc562d2eSVineet Gupta 371cc562d2eSVineet Gupta ; ------- setup args for Linux Page fault Hanlder --------- 37221a63b56SVineet Gupta mov_s r1, sp 37321a63b56SVineet Gupta lr r0, [efa] 374cc562d2eSVineet Gupta 375cc562d2eSVineet Gupta ; We don't want exceptions to be disabled while the fault is handled. 376cc562d2eSVineet Gupta ; Now that we have saved the context we return from exception hence 377cc562d2eSVineet Gupta ; exceptions get re-enable 378cc562d2eSVineet Gupta 379cc562d2eSVineet Gupta FAKE_RET_FROM_EXCPN r9 380cc562d2eSVineet Gupta 381cc562d2eSVineet Gupta bl do_page_fault 382cc562d2eSVineet Gupta b ret_from_exception 383cc562d2eSVineet Gupta 384*ec7ac6afSVineet GuptaEND(EV_TLBMissD) 385