1cc562d2eSVineet Gupta/* 2cc562d2eSVineet Gupta * TLB Exception Handling for ARC 3cc562d2eSVineet Gupta * 4cc562d2eSVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5cc562d2eSVineet Gupta * 6cc562d2eSVineet Gupta * This program is free software; you can redistribute it and/or modify 7cc562d2eSVineet Gupta * it under the terms of the GNU General Public License version 2 as 8cc562d2eSVineet Gupta * published by the Free Software Foundation. 9cc562d2eSVineet Gupta * 10cc562d2eSVineet Gupta * Vineetg: April 2011 : 11cc562d2eSVineet Gupta * -MMU v1: moved out legacy code into a seperate file 12cc562d2eSVineet Gupta * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, 13cc562d2eSVineet Gupta * helps avoid a shift when preparing PD0 from PTE 14cc562d2eSVineet Gupta * 15cc562d2eSVineet Gupta * Vineetg: July 2009 16cc562d2eSVineet Gupta * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB 17cc562d2eSVineet Gupta * entry, so that it doesn't knock out it's I-TLB entry 18cc562d2eSVineet Gupta * -Some more fine tuning: 19cc562d2eSVineet Gupta * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc 20cc562d2eSVineet Gupta * 21cc562d2eSVineet Gupta * Vineetg: July 2009 22cc562d2eSVineet Gupta * -Practically rewrote the I/D TLB Miss handlers 23cc562d2eSVineet Gupta * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. 24cc562d2eSVineet Gupta * Hence Leaner by 1.5 K 25cc562d2eSVineet Gupta * Used Conditional arithmetic to replace excessive branching 26cc562d2eSVineet Gupta * Also used short instructions wherever possible 27cc562d2eSVineet Gupta * 28cc562d2eSVineet Gupta * Vineetg: Aug 13th 2008 29cc562d2eSVineet Gupta * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing 30cc562d2eSVineet Gupta * more information in case of a Fatality 31cc562d2eSVineet Gupta * 32cc562d2eSVineet Gupta * Vineetg: March 25th Bug #92690 33cc562d2eSVineet Gupta * -Added Debug Code to check if sw-ASID == hw-ASID 34cc562d2eSVineet Gupta 35cc562d2eSVineet Gupta * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 36cc562d2eSVineet Gupta */ 37cc562d2eSVineet Gupta 38cc562d2eSVineet Gupta .cpu A7 39cc562d2eSVineet Gupta 40cc562d2eSVineet Gupta#include <linux/linkage.h> 41cc562d2eSVineet Gupta#include <asm/entry.h> 42da1677b0SVineet Gupta#include <asm/mmu.h> 43cc562d2eSVineet Gupta#include <asm/pgtable.h> 44cc562d2eSVineet Gupta#include <asm/arcregs.h> 45cc562d2eSVineet Gupta#include <asm/cache.h> 46cc562d2eSVineet Gupta#include <asm/processor.h> 47cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER == 1) 48cc562d2eSVineet Gupta#include <asm/tlb-mmu1.h> 49cc562d2eSVineet Gupta#endif 50cc562d2eSVineet Gupta 51cc562d2eSVineet Gupta;-------------------------------------------------------------------------- 52cc562d2eSVineet Gupta; scratch memory to save the registers (r0-r3) used to code TLB refill Handler 53cc562d2eSVineet Gupta; For details refer to comments before TLBMISS_FREEUP_REGS below 54cc562d2eSVineet Gupta;-------------------------------------------------------------------------- 55cc562d2eSVineet Gupta 568b5850f8SVineet GuptaARCFP_DATA ex_saved_reg1 57cc562d2eSVineet Gupta .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned 58cc562d2eSVineet Gupta .type ex_saved_reg1, @object 5941195d23SVineet Gupta#ifdef CONFIG_SMP 6041195d23SVineet Gupta .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 6141195d23SVineet Guptaex_saved_reg1: 6241195d23SVineet Gupta .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) 6341195d23SVineet Gupta#else 64cc562d2eSVineet Gupta .size ex_saved_reg1, 16 65cc562d2eSVineet Guptaex_saved_reg1: 66cc562d2eSVineet Gupta .zero 16 6741195d23SVineet Gupta#endif 68cc562d2eSVineet Gupta 69cc562d2eSVineet Gupta;============================================================================ 70cc562d2eSVineet Gupta; Troubleshooting Stuff 71cc562d2eSVineet Gupta;============================================================================ 72cc562d2eSVineet Gupta 73cc562d2eSVineet Gupta; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid 74cc562d2eSVineet Gupta; When Creating TLB Entries, instead of doing 3 dependent loads from memory, 75cc562d2eSVineet Gupta; we use the MMU PID Reg to get current ASID. 76cc562d2eSVineet Gupta; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. 77cc562d2eSVineet Gupta; So we try to detect this in TLB Mis shandler 78cc562d2eSVineet Gupta 79cc562d2eSVineet Gupta 80cc562d2eSVineet Gupta.macro DBG_ASID_MISMATCH 81cc562d2eSVineet Gupta 82cc562d2eSVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_PARANOIA 83cc562d2eSVineet Gupta 84cc562d2eSVineet Gupta ; make sure h/w ASID is same as s/w ASID 85cc562d2eSVineet Gupta 86cc562d2eSVineet Gupta GET_CURR_TASK_ON_CPU r3 87cc562d2eSVineet Gupta ld r0, [r3, TASK_ACT_MM] 88cc562d2eSVineet Gupta ld r0, [r0, MM_CTXT+MM_CTXT_ASID] 89cc562d2eSVineet Gupta 90cc562d2eSVineet Gupta lr r1, [ARC_REG_PID] 91cc562d2eSVineet Gupta and r1, r1, 0xFF 92cc562d2eSVineet Gupta breq r1, r0, 5f 93cc562d2eSVineet Gupta 94cc562d2eSVineet Gupta ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode 95cc562d2eSVineet Gupta lr r0, [erstatus] 96cc562d2eSVineet Gupta bbit0 r0, STATUS_U_BIT, 5f 97cc562d2eSVineet Gupta 98cc562d2eSVineet Gupta ; We sure are in troubled waters, Flag the error, but to do so 99cc562d2eSVineet Gupta ; need to switch to kernel mode stack to call error routine 100cc562d2eSVineet Gupta GET_TSK_STACK_BASE r3, sp 101cc562d2eSVineet Gupta 102cc562d2eSVineet Gupta ; Call printk to shoutout aloud 103cc562d2eSVineet Gupta mov r0, 1 104cc562d2eSVineet Gupta j print_asid_mismatch 105cc562d2eSVineet Gupta 106cc562d2eSVineet Gupta5: ; ASIDs match so proceed normally 107cc562d2eSVineet Gupta nop 108cc562d2eSVineet Gupta 109cc562d2eSVineet Gupta#endif 110cc562d2eSVineet Gupta 111cc562d2eSVineet Gupta.endm 112cc562d2eSVineet Gupta 113cc562d2eSVineet Gupta;============================================================================ 114cc562d2eSVineet Gupta;TLB Miss handling Code 115cc562d2eSVineet Gupta;============================================================================ 116cc562d2eSVineet Gupta 117cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 118cc562d2eSVineet Gupta; This macro does the page-table lookup for the faulting address. 119cc562d2eSVineet Gupta; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address 120cc562d2eSVineet Gupta.macro LOAD_FAULT_PTE 121cc562d2eSVineet Gupta 122cc562d2eSVineet Gupta lr r2, [efa] 123cc562d2eSVineet Gupta 12441195d23SVineet Gupta#ifndef CONFIG_SMP 125cc562d2eSVineet Gupta lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd 12641195d23SVineet Gupta#else 12741195d23SVineet Gupta GET_CURR_TASK_ON_CPU r1 12841195d23SVineet Gupta ld r1, [r1, TASK_ACT_MM] 12941195d23SVineet Gupta ld r1, [r1, MM_PGD] 13041195d23SVineet Gupta#endif 131cc562d2eSVineet Gupta 132cc562d2eSVineet Gupta lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD 133cc562d2eSVineet Gupta ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr 134cc562d2eSVineet Gupta and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags 135cc562d2eSVineet Gupta ; contains Ptr to Page Table 136cc562d2eSVineet Gupta bz.d do_slow_path_pf ; if no Page Table, do page fault 137cc562d2eSVineet Gupta 138cc562d2eSVineet Gupta ; Get the PTE entry: The idea is 139cc562d2eSVineet Gupta ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr 140cc562d2eSVineet Gupta ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index 141cc562d2eSVineet Gupta ; (3) z = pgtbl[y] 142cc562d2eSVineet Gupta ; To avoid the multiply by in end, we do the -2, <<2 below 143cc562d2eSVineet Gupta 144cc562d2eSVineet Gupta lsr r0, r2, (PAGE_SHIFT - 2) 145cc562d2eSVineet Gupta and r0, r0, ( (PTRS_PER_PTE - 1) << 2) 146cc562d2eSVineet Gupta ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr 1470ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 1480ef88a54SVineet Gupta and.f 0, r0, _PAGE_PRESENT 1490ef88a54SVineet Gupta bz 1f 150*dc81df24SVineet Gupta ld r3, [num_pte_not_present] 151*dc81df24SVineet Gupta add r3, r3, 1 152*dc81df24SVineet Gupta st r3, [num_pte_not_present] 1530ef88a54SVineet Gupta1: 1540ef88a54SVineet Gupta#endif 155cc562d2eSVineet Gupta 156cc562d2eSVineet Gupta.endm 157cc562d2eSVineet Gupta 158cc562d2eSVineet Gupta;----------------------------------------------------------------- 159cc562d2eSVineet Gupta; Convert Linux PTE entry into TLB entry 160cc562d2eSVineet Gupta; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu 161cc562d2eSVineet Gupta; IN: r0 = PTE, r1 = ptr to PTE 162cc562d2eSVineet Gupta 163cc562d2eSVineet Gupta.macro CONV_PTE_TO_TLB 164cc562d2eSVineet Gupta and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE 165cc562d2eSVineet Gupta sr r3, [ARC_REG_TLBPD1] ; these go in PD1 166cc562d2eSVineet Gupta 167cc562d2eSVineet Gupta and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 168cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ 169cc562d2eSVineet Gupta lsr r2, r2 ; shift PTE flags to match layout in PD0 170cc562d2eSVineet Gupta#endif 171cc562d2eSVineet Gupta 172cc562d2eSVineet Gupta lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid 173cc562d2eSVineet Gupta 174cc562d2eSVineet Gupta or r3, r3, r2 ; S | vaddr | {sasid|asid} 175cc562d2eSVineet Gupta sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 176cc562d2eSVineet Gupta.endm 177cc562d2eSVineet Gupta 178cc562d2eSVineet Gupta;----------------------------------------------------------------- 179cc562d2eSVineet Gupta; Commit the TLB entry into MMU 180cc562d2eSVineet Gupta 181cc562d2eSVineet Gupta.macro COMMIT_ENTRY_TO_MMU 182cc562d2eSVineet Gupta 183cc562d2eSVineet Gupta /* Get free TLB slot: Set = computed from vaddr, way = random */ 184cc562d2eSVineet Gupta sr TLBGetIndex, [ARC_REG_TLBCOMMAND] 185cc562d2eSVineet Gupta 186cc562d2eSVineet Gupta /* Commit the Write */ 187cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */ 188cc562d2eSVineet Gupta sr TLBWriteNI, [ARC_REG_TLBCOMMAND] 189cc562d2eSVineet Gupta#else 190cc562d2eSVineet Gupta sr TLBWrite, [ARC_REG_TLBCOMMAND] 191cc562d2eSVineet Gupta#endif 192cc562d2eSVineet Gupta.endm 193cc562d2eSVineet Gupta 194cc562d2eSVineet Gupta;----------------------------------------------------------------- 195cc562d2eSVineet Gupta; ARC700 Exception Handling doesn't auto-switch stack and it only provides 196cc562d2eSVineet Gupta; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" 197cc562d2eSVineet Gupta; 198cc562d2eSVineet Gupta; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a 199cc562d2eSVineet Gupta; "global" is used to free-up FIRST core reg to be able to code the rest of 200cc562d2eSVineet Gupta; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). 201cc562d2eSVineet Gupta; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 202cc562d2eSVineet Gupta; need to be saved as well by extending the "global" to be 4 words. Hence 203cc562d2eSVineet Gupta; ".size ex_saved_reg1, 16" 204cc562d2eSVineet Gupta; [All of this dance is to avoid stack switching for each TLB Miss, since we 205cc562d2eSVineet Gupta; only need to save only a handful of regs, as opposed to complete reg file] 20641195d23SVineet Gupta; 20741195d23SVineet Gupta; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST 20841195d23SVineet Gupta; core reg as it will not be SMP safe. 20941195d23SVineet Gupta; Thus scratch AUX reg is used (and no longer used to cache task PGD). 21041195d23SVineet Gupta; To save the rest of 3 regs - per cpu, the global is made "per-cpu". 21141195d23SVineet Gupta; Epilogue thus has to locate the "per-cpu" storage for regs. 21241195d23SVineet Gupta; To avoid cache line bouncing the per-cpu global is aligned/sized per 21341195d23SVineet Gupta; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence 21441195d23SVineet Gupta; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" 215cc562d2eSVineet Gupta 216cc562d2eSVineet Gupta; As simple as that.... 217cc562d2eSVineet Gupta 218cc562d2eSVineet Gupta.macro TLBMISS_FREEUP_REGS 21941195d23SVineet Gupta#ifdef CONFIG_SMP 22041195d23SVineet Gupta sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with 22141195d23SVineet Gupta GET_CPU_ID r0 ; get to per cpu scratch mem, 22241195d23SVineet Gupta lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu 22341195d23SVineet Gupta add r0, @ex_saved_reg1, r0 22441195d23SVineet Gupta#else 225cc562d2eSVineet Gupta st r0, [@ex_saved_reg1] 226cc562d2eSVineet Gupta mov_s r0, @ex_saved_reg1 22741195d23SVineet Gupta#endif 228cc562d2eSVineet Gupta st_s r1, [r0, 4] 229cc562d2eSVineet Gupta st_s r2, [r0, 8] 230cc562d2eSVineet Gupta st_s r3, [r0, 12] 231cc562d2eSVineet Gupta 232cc562d2eSVineet Gupta ; VERIFY if the ASID in MMU-PID Reg is same as 233cc562d2eSVineet Gupta ; one in Linux data structures 234cc562d2eSVineet Gupta 235cc562d2eSVineet Gupta DBG_ASID_MISMATCH 236cc562d2eSVineet Gupta.endm 237cc562d2eSVineet Gupta 238cc562d2eSVineet Gupta;----------------------------------------------------------------- 239cc562d2eSVineet Gupta.macro TLBMISS_RESTORE_REGS 24041195d23SVineet Gupta#ifdef CONFIG_SMP 24141195d23SVineet Gupta GET_CPU_ID r0 ; get to per cpu scratch mem 24241195d23SVineet Gupta lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide 24341195d23SVineet Gupta add r0, @ex_saved_reg1, r0 24441195d23SVineet Gupta ld_s r3, [r0,12] 24541195d23SVineet Gupta ld_s r2, [r0, 8] 24641195d23SVineet Gupta ld_s r1, [r0, 4] 24741195d23SVineet Gupta lr r0, [ARC_REG_SCRATCH_DATA0] 24841195d23SVineet Gupta#else 249cc562d2eSVineet Gupta mov_s r0, @ex_saved_reg1 250cc562d2eSVineet Gupta ld_s r3, [r0,12] 251cc562d2eSVineet Gupta ld_s r2, [r0, 8] 252cc562d2eSVineet Gupta ld_s r1, [r0, 4] 253cc562d2eSVineet Gupta ld_s r0, [r0] 25441195d23SVineet Gupta#endif 255cc562d2eSVineet Gupta.endm 256cc562d2eSVineet Gupta 2578b5850f8SVineet GuptaARCFP_CODE ;Fast Path Code, candidate for ICCM 258cc562d2eSVineet Gupta 259cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 260cc562d2eSVineet Gupta; I-TLB Miss Exception Handler 261cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 262cc562d2eSVineet Gupta 263cc562d2eSVineet GuptaARC_ENTRY EV_TLBMissI 264cc562d2eSVineet Gupta 265cc562d2eSVineet Gupta TLBMISS_FREEUP_REGS 266cc562d2eSVineet Gupta 2670ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 2680ef88a54SVineet Gupta ld r0, [@numitlb] 2690ef88a54SVineet Gupta add r0, r0, 1 2700ef88a54SVineet Gupta st r0, [@numitlb] 2710ef88a54SVineet Gupta#endif 2720ef88a54SVineet Gupta 273cc562d2eSVineet Gupta ;---------------------------------------------------------------- 274*dc81df24SVineet Gupta ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 275cc562d2eSVineet Gupta LOAD_FAULT_PTE 276cc562d2eSVineet Gupta 277cc562d2eSVineet Gupta ;---------------------------------------------------------------- 278cc562d2eSVineet Gupta ; VERIFY_PTE: Check if PTE permissions approp for executing code 279cc562d2eSVineet Gupta cmp_s r2, VMALLOC_START 280c3e757a7SVineet Gupta mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) 281c3e757a7SVineet Gupta mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) 282cc562d2eSVineet Gupta 283cc562d2eSVineet Gupta and r3, r0, r2 ; Mask out NON Flag bits from PTE 284cc562d2eSVineet Gupta xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 285cc562d2eSVineet Gupta bnz do_slow_path_pf 286cc562d2eSVineet Gupta 287cc562d2eSVineet Gupta ; Let Linux VM know that the page was accessed 288c3e757a7SVineet Gupta or r0, r0, _PAGE_ACCESSED ; set Accessed Bit 289cc562d2eSVineet Gupta st_s r0, [r1] ; Write back PTE 290cc562d2eSVineet Gupta 291cc562d2eSVineet Gupta CONV_PTE_TO_TLB 292cc562d2eSVineet Gupta COMMIT_ENTRY_TO_MMU 293cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 294cc562d2eSVineet Gupta rtie 295cc562d2eSVineet Gupta 296cc562d2eSVineet GuptaARC_EXIT EV_TLBMissI 297cc562d2eSVineet Gupta 298cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 299cc562d2eSVineet Gupta; D-TLB Miss Exception Handler 300cc562d2eSVineet Gupta;----------------------------------------------------------------------------- 301cc562d2eSVineet Gupta 302cc562d2eSVineet GuptaARC_ENTRY EV_TLBMissD 303cc562d2eSVineet Gupta 304cc562d2eSVineet Gupta TLBMISS_FREEUP_REGS 305cc562d2eSVineet Gupta 3060ef88a54SVineet Gupta#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT 3070ef88a54SVineet Gupta ld r0, [@numdtlb] 3080ef88a54SVineet Gupta add r0, r0, 1 3090ef88a54SVineet Gupta st r0, [@numdtlb] 3100ef88a54SVineet Gupta#endif 3110ef88a54SVineet Gupta 312cc562d2eSVineet Gupta ;---------------------------------------------------------------- 313cc562d2eSVineet Gupta ; Get the PTE corresponding to V-addr accessed 314*dc81df24SVineet Gupta ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA 315cc562d2eSVineet Gupta LOAD_FAULT_PTE 316cc562d2eSVineet Gupta 317cc562d2eSVineet Gupta ;---------------------------------------------------------------- 318cc562d2eSVineet Gupta ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) 319cc562d2eSVineet Gupta 320cc562d2eSVineet Gupta mov_s r2, 0 321cc562d2eSVineet Gupta lr r3, [ecr] 322cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access 323a950549cSVineet Gupta or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE 324cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access 325a950549cSVineet Gupta or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE 326cc562d2eSVineet Gupta ; Above laddering takes care of XCHG access 327cc562d2eSVineet Gupta ; which is both Read and Write 328cc562d2eSVineet Gupta 329cc562d2eSVineet Gupta ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx 330cc562d2eSVineet Gupta ; For copy_(to|from)_user, despite exception taken in kernel mode, 331cc562d2eSVineet Gupta ; this code is not hit, because EFA would still be the user mode 332cc562d2eSVineet Gupta ; address (EFA < 0x6000_0000). 333cc562d2eSVineet Gupta ; This code is for legit kernel mode faults, vmalloc specifically 334cc562d2eSVineet Gupta ; (EFA: 0x7000_0000 to 0x7FFF_FFFF) 335cc562d2eSVineet Gupta 336cc562d2eSVineet Gupta lr r3, [efa] 337cc562d2eSVineet Gupta cmp r3, VMALLOC_START - 1 ; If kernel mode access 338cc562d2eSVineet Gupta asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx 339cc562d2eSVineet Gupta or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode 340cc562d2eSVineet Gupta 341cc562d2eSVineet Gupta ; By now, r2 setup with all the Flags we need to check in PTE 342cc562d2eSVineet Gupta and r3, r0, r2 ; Mask out NON Flag bits from PTE 343cc562d2eSVineet Gupta brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) 344cc562d2eSVineet Gupta 345cc562d2eSVineet Gupta ;---------------------------------------------------------------- 346cc562d2eSVineet Gupta ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty 347cc562d2eSVineet Gupta lr r3, [ecr] 348c3e757a7SVineet Gupta or r0, r0, _PAGE_ACCESSED ; Accessed bit always 349cc562d2eSVineet Gupta btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? 350cc562d2eSVineet Gupta or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well 351cc562d2eSVineet Gupta st_s r0, [r1] ; Write back PTE 352cc562d2eSVineet Gupta 353cc562d2eSVineet Gupta CONV_PTE_TO_TLB 354cc562d2eSVineet Gupta 355cc562d2eSVineet Gupta#if (CONFIG_ARC_MMU_VER == 1) 356cc562d2eSVineet Gupta ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of 357cc562d2eSVineet Gupta ; memcpy where 3 parties contend for 2 ways, ensuing a livelock. 358cc562d2eSVineet Gupta ; But only for old MMU or one with Metal Fix 359cc562d2eSVineet Gupta TLB_WRITE_HEURISTICS 360cc562d2eSVineet Gupta#endif 361cc562d2eSVineet Gupta 362cc562d2eSVineet Gupta COMMIT_ENTRY_TO_MMU 363cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 364cc562d2eSVineet Gupta rtie 365cc562d2eSVineet Gupta 366cc562d2eSVineet Gupta;-------- Common routine to call Linux Page Fault Handler ----------- 367cc562d2eSVineet Guptado_slow_path_pf: 368cc562d2eSVineet Gupta 369cc562d2eSVineet Gupta ; Restore the 4-scratch regs saved by fast path miss handler 370cc562d2eSVineet Gupta TLBMISS_RESTORE_REGS 371cc562d2eSVineet Gupta 372cc562d2eSVineet Gupta ; Slow path TLB Miss handled as a regular ARC Exception 373cc562d2eSVineet Gupta ; (stack switching / save the complete reg-file). 374cc562d2eSVineet Gupta ; That requires freeing up r9 375cc562d2eSVineet Gupta EXCPN_PROLOG_FREEUP_REG r9 376cc562d2eSVineet Gupta 377cc562d2eSVineet Gupta lr r9, [erstatus] 378cc562d2eSVineet Gupta 379cc562d2eSVineet Gupta SWITCH_TO_KERNEL_STK 380cc562d2eSVineet Gupta SAVE_ALL_SYS 381cc562d2eSVineet Gupta 382cc562d2eSVineet Gupta ; ------- setup args for Linux Page fault Hanlder --------- 383cc562d2eSVineet Gupta mov_s r0, sp 3843e1ae441SVineet Gupta lr r1, [efa] 385cc562d2eSVineet Gupta 386cc562d2eSVineet Gupta ; We don't want exceptions to be disabled while the fault is handled. 387cc562d2eSVineet Gupta ; Now that we have saved the context we return from exception hence 388cc562d2eSVineet Gupta ; exceptions get re-enable 389cc562d2eSVineet Gupta 390cc562d2eSVineet Gupta FAKE_RET_FROM_EXCPN r9 391cc562d2eSVineet Gupta 392cc562d2eSVineet Gupta bl do_page_fault 393cc562d2eSVineet Gupta b ret_from_exception 394cc562d2eSVineet Gupta 395cc562d2eSVineet GuptaARC_EXIT EV_TLBMissD 396cc562d2eSVineet Gupta 397cc562d2eSVineet GuptaARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr 398