Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2008-2009 PetaLogix
17 #include <asm-generic/pgtable-nopmd.h>
60 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
64 * We use the hash table as an extended TLB, i.e. a cache of currently
65 * active mappings. We maintain a two-level page table tree, much
67 * management code. Low-level assembler code in hashtable.S
74 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
75 * instruction and data sides share a unified, 64-entry, semi-associative
76 * TLB which is maintained totally under software control. In addition, the
77 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
78 * TLB which serves as a first level to the shared TLB. These two TLBs are
83 * The normal case is that PTEs are 32-bits and we have a 1-page
84 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
88 /* PGDIR_SHIFT determines what a top-level page table entry can map */
91 #define PGDIR_MASK (~(PGDIR_SIZE-1))
94 * entries per page directory level: our page-table tree is two-level, so
99 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
105 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
115 * Bits in a linux-style PTE. These match the bits in the
116 * (hardware-defined) PTE as closely as possible.
123 * RPN..................... 0 0 EX WR ZSEL....... W I M G
127 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
130 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
131 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
134 * - PRESENT *must* be in the bottom two bits because swap PTEs use the top
136 * borrow it for PAGE_PRESENT. Bit 30 is cleared in the TLB miss handler
137 * before the TLB entry is loaded.
138 * - All other bits of the PTE are loaded into TLBLO without
148 #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
149 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
183 * another purpose. -- paulus.
214 * for zero-mapped memory areas etc..
232 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
291 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
305 * pte_update clears and sets bit atomically, and returns
307 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
322 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) in pte_update()
388 /* Find an entry in the third-level page table.. */
398 * <------------------ offset -------------------> E < type -> 0 0