| /linux/arch/mips/include/asm/ |
| H A D | pgtable.h | 45 extern void __update_cache(unsigned long address, pte_t pte); 111 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) argument 113 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) argument 116 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) argument 117 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) argument 119 static inline void set_pte(pte_t *ptep, pte_t pte) argument 121 ptep->pte_high = pte.pte_high; 123 ptep->pte_low = pte.pte_low; 126 if (pte.pte_high & _PAGE_GLOBAL) { 128 if (pte.pte_low & _PAGE_GLOBAL) { [all …]
|
| /linux/arch/m68k/include/asm/ |
| H A D | mcf_pgtable.h | 10 * after masking from the pte. 99 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument 101 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify() 102 return pte; in pte_modify() 112 #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK)) argument 115 static inline int pte_none(pte_t pte) in pte_none() argument 117 return !pte_val(pte); in pte_none() 120 static inline int pte_present(pte_t pte) in pte_present() argument 122 return pte_val(pte) & CF_PAGE_VALID; in pte_present() 131 #define pte_page(pte) virt_to_page(__pte_page(pte)) argument [all …]
|
| H A D | sun3_pgtable.h | 29 /* Page protection values within PTE. */ 79 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument 81 pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify() 82 return pte; in pte_modify() 87 #define __pte_page(pte) \ argument 88 (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT)) 95 static inline int pte_none (pte_t pte) { return !pte_val (pte); } in pte_none() argument 96 static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; } in pte_present() argument 103 #define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK) argument 107 #define pte_page(pte) virt_to_page(__pte_page(pte)) argument [all …]
|
| H A D | motorola_pgtable.h | 84 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument 86 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify() 87 return pte; in pte_modify() 100 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) argument 105 #define pte_none(pte) (!pte_val(pte)) argument 106 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) argument 110 #define pte_page(pte) virt_to_page(__va(pte_val(pte))) argument 111 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) argument 135 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 146 static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); } in pte_write() argument [all …]
|
| /linux/arch/hexagon/include/asm/ |
| H A D | pgtable.h | 21 * The PTE model described here is that of the Hexagon Virtual Machine, 30 * To maximize the comfort level for the PTE manipulation macros, 39 * We have a total of 4 "soft" bits available in the abstract PTE. 43 * the PTE describes MMU programming or swap space. 99 /* Any bigger and the PTE disappears. */ 136 #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) argument 143 extern void sync_icache_dcache(pte_t pte); 145 #define pte_present_exec_user(pte) \ argument 146 ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \ 160 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE [all …]
|
| /linux/arch/powerpc/include/asm/nohash/ |
| H A D | pgtable.h | 35 static inline unsigned long pte_huge_size(pte_t pte) in pte_huge_size() argument 42 * PTE updates. This function is called whenever an existing 43 * valid PTE is updated. This does -not- include set_pte_at() 44 * which nowadays only sets a new PTE. 47 * and the PTE may be either 32 or 64 bit wide. In the later case, 48 * when using atomic updates, only the low part of the PTE is 134 /* Set the dirty and/or accessed bits atomically in a linux PTE */ 151 /* Generic accessors to PTE bits */ 153 static inline pte_t pte_mkwrite_novma(pte_t pte) in pte_mkwrite_novma() argument 158 return __pte(pte_val(pte) | _PAGE_RW); in pte_mkwrite_novma() [all …]
|
| /linux/arch/arm/include/asm/ |
| H A D | pgtable.h | 60 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) argument 167 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) argument 170 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) argument 174 #define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ argument 175 : !!(pte_val(pte) & (val))) 176 #define pte_isclear(pte, val) (!(pte_val(pte) & (val))) argument 178 #define pte_none(pte) (!pte_val(pte)) argument 179 #define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) argument 180 #define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) argument 181 #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) argument [all …]
|
| /linux/arch/arm64/include/asm/ |
| H A D | pgtable.h | 46 * These barriers are emitted under certain conditions after a pte entry in emit_pte_barriers() 51 * setting the pte to valid won't cause a spurious fault. If the thread in emit_pte_barriers() 152 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 155 static inline phys_addr_t __pte_to_phys(pte_t pte) in __pte_to_phys() argument 157 pte_val(pte) &= ~PTE_MAYBE_SHARED; in __pte_to_phys() 158 return (pte_val(pte) & PTE_ADDR_LOW) | in __pte_to_phys() 159 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); in __pte_to_phys() 166 static inline phys_addr_t __pte_to_phys(pte_t pte) in __pte_to_phys() argument 168 return pte_val(pte) & PTE_ADDR_LOW; in __pte_to_phys() 177 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) argument [all …]
|
| /linux/arch/csky/include/asm/ |
| H A D | pgtable.h | 27 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 36 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) argument 37 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) argument 42 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) argument 85 static inline void set_pte(pte_t *p, pte_t pte) in set_pte() argument 87 *p = pte; in set_pte() 141 static inline int pte_read(pte_t pte) in pte_read() argument 143 return pte.pte_low & _PAGE_READ; in pte_read() 146 static inline int pte_write(pte_t pte) in pte_write() argument 148 return (pte).pte_low & _PAGE_WRITE; in pte_write() [all …]
|
| /linux/arch/um/include/asm/ |
| H A D | pgtable.h | 113 static inline int pte_none(pte_t pte) in pte_none() argument 115 return pte_is_zero(pte); in pte_none() 122 static inline int pte_read(pte_t pte) in pte_read() argument 124 return((pte_get_bits(pte, _PAGE_USER)) && in pte_read() 125 !(pte_get_bits(pte, _PAGE_PROTNONE))); in pte_read() 128 static inline int pte_exec(pte_t pte){ in pte_exec() argument 129 return((pte_get_bits(pte, _PAGE_USER)) && in pte_exec() 130 !(pte_get_bits(pte, _PAGE_PROTNONE))); in pte_exec() 133 static inline int pte_write(pte_t pte) in pte_write() argument 135 return((pte_get_bits(pte, _PAGE_RW)) && in pte_write() [all …]
|
| /linux/arch/openrisc/include/asm/ |
| H A D | pgtable.h | 44 /* Certain architectures need to do special things when pte's 102 * An OR32 PTE looks like this: 117 * PTE as per above 120 #define _PAGE_CC 0x001 /* software: pte contains a translation */ 200 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } in pte_read() argument 201 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } in pte_write() argument 202 static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } in pte_exec() argument 203 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument 204 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument 206 static inline pte_t pte_wrprotect(pte_t pte) in pte_wrprotect() argument [all …]
|
| /linux/arch/microblaze/include/asm/ |
| H A D | pgtable.h | 84 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 107 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \ 114 * Bits in a linux-style PTE. These match the bits in the 115 * (hardware-defined) PTE as closely as possible. 124 * Where possible we make the Linux PTE bits match up with this 137 * - All other bits of the PTE are loaded into TLBLO without 139 * software PTE bits. We actually use bits 21, 24, 25, and 146 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 180 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 181 * to have it in the Linux PTE, and in fact the bit could be reused for [all …]
|
| /linux/arch/nios2/include/asm/ |
| H A D | pgtable.h | 87 static inline int pte_write(pte_t pte) \ in pte_write() argument 88 { return pte_val(pte) & _PAGE_WRITE; } in pte_write() 89 static inline int pte_dirty(pte_t pte) \ in pte_dirty() argument 90 { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() 91 static inline int pte_young(pte_t pte) \ in pte_young() argument 92 { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() 105 static inline int pte_none(pte_t pte) in pte_none() argument 107 return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf)); in pte_none() 110 static inline int pte_present(pte_t pte) \ in pte_present() argument 111 { return pte_val(pte) & _PAGE_PRESENT; } in pte_present() [all …]
|
| /linux/drivers/iommu/intel/ |
| H A D | pasid.c | 243 struct pasid_entry *pte; in intel_pasid_tear_down_entry() local 247 pte = intel_pasid_get_entry(dev, pasid); in intel_pasid_tear_down_entry() 248 if (WARN_ON(!pte)) { in intel_pasid_tear_down_entry() 253 if (!pasid_pte_is_present(pte)) { in intel_pasid_tear_down_entry() 254 if (!pasid_pte_is_fault_disabled(pte)) { in intel_pasid_tear_down_entry() 255 WARN_ON(READ_ONCE(pte->val[0]) != 0); in intel_pasid_tear_down_entry() 266 pasid_clear_entry(pte); in intel_pasid_tear_down_entry() 273 did = pasid_get_domain_id(pte); in intel_pasid_tear_down_entry() 274 pgtt = pasid_pte_get_pgtt(pte); in intel_pasid_tear_down_entry() 279 clflush_cache_range(pte, sizeof(*pte)); in intel_pasid_tear_down_entry() [all …]
|
| /linux/arch/powerpc/include/asm/book3s/32/ |
| H A D | pgtable.h | 21 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ 22 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ 35 /* We never clear the high word of the pte */ 53 * Location of the PFN in the PTE. Most 32-bit platforms use the same 111 /* Bits to mask out from a PMD to get to the PTE page */ 120 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 125 * level has 2048 entries and the second level has 512 64-bit PTE entries. 212 * Bits in a linux-style PTE. These match the bits in the 213 * (hardware-defined) PowerPC PTE as closely as possible. 250 * PTE updates. This function is called whenever an existing [all …]
|
| /linux/arch/powerpc/mm/ |
| H A D | pgtable.c | 29 #include <asm/pte-walk.h> 45 * reasonably "normal" PTEs. We currently require a PTE to be present 46 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that 49 static inline int pte_looks_normal(pte_t pte, unsigned long addr) in pte_looks_normal() argument 52 if (pte_present(pte) && !pte_special(pte)) { in pte_looks_normal() 53 if (pte_ci(pte)) in pte_looks_normal() 61 static struct folio *maybe_pte_to_folio(pte_t pte) in maybe_pte_to_folio() argument 63 unsigned long pfn = pte_pfn(pte); in maybe_pte_to_folio() 82 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) in set_pte_filter_hash() argument 84 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); in set_pte_filter_hash() [all …]
|
| /linux/arch/sparc/include/asm/ |
| H A D | pgtable_64.h | 111 /* PTE bits which are the same in SUN4U and SUN4V format. */ 118 /* SUN4U pte bits... */ 149 /* SUN4V pte bits... */ 232 pte_t pte = pfn_pte(page_nr, pgprot); in pfn_pmd() local 234 return __pmd(pte_val(pte)); in pfn_pmd() 239 static inline unsigned long pte_pfn(pte_t pte) in pte_pfn() argument 252 : "r" (pte_val(pte)), in pte_pfn() 260 static inline pte_t pte_modify(pte_t pte, pgprot_t prot) in pte_modify() argument 315 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); in pte_modify() 321 pte_t pte = __pte(pmd_val(pmd)); in pmd_modify() local [all …]
|
| /linux/include/asm-generic/ |
| H A D | hugetlb.h | 8 static inline unsigned long huge_pte_write(pte_t pte) in huge_pte_write() argument 10 return pte_write(pte); in huge_pte_write() 13 static inline unsigned long huge_pte_dirty(pte_t pte) in huge_pte_dirty() argument 15 return pte_dirty(pte); in huge_pte_dirty() 18 static inline pte_t huge_pte_mkwrite(pte_t pte) in huge_pte_mkwrite() argument 20 return pte_mkwrite_novma(pte); in huge_pte_mkwrite() 24 static inline pte_t huge_pte_wrprotect(pte_t pte) in huge_pte_wrprotect() argument 26 return pte_wrprotect(pte); in huge_pte_wrprotect() 30 static inline pte_t huge_pte_mkdirty(pte_t pte) in huge_pte_mkdirty() argument 32 return pte_mkdirty(pte); in huge_pte_mkdirty() [all …]
|
| /linux/arch/parisc/include/asm/ |
| H A D | pgtable.h | 64 extern void __update_cache(pte_t pte); 79 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 104 * done to get usable bits out of the PTE) */ 182 /* this defines the shift to the usable bits in the PTE it is set so 187 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ 315 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument 316 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument 317 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } in pte_write() argument 318 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } in pte_special() argument 320 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } in pte_mkclean() argument [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | hugetlbpage.c | 18 pte_t pte = ptep_get(ptep); in huge_ptep_get() local 20 if (pte_dirty(pte)) in huge_ptep_get() 23 if (pte_young(pte)) in huge_ptep_get() 36 pte_t *pte = NULL; in huge_pte_alloc() local 52 pte = (pte_t *)pud; in huge_pte_alloc() 58 pte = huge_pmd_share(mm, vma, addr, pud); in huge_pte_alloc() 60 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pte_alloc() 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc() 76 if (pte) { in huge_pte_alloc() 77 pte_t pteval = ptep_get_lockless(pte); in huge_pte_alloc() [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | pgtable.h | 68 #define set_pte(ptep, pte) native_set_pte(ptep, pte) argument 70 #define set_pte_atomic(ptep, pte) \ argument 71 native_set_pte_atomic(ptep, pte) 155 static inline bool pte_dirty(pte_t pte) in pte_dirty() argument 157 return pte_flags(pte) & _PAGE_DIRTY_BITS; in pte_dirty() 160 static inline bool pte_shstk(pte_t pte) in pte_shstk() argument 163 (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY; in pte_shstk() 166 static inline int pte_young(pte_t pte) in pte_young() argument 168 return pte_flags(pte) & _PAGE_ACCESSED; in pte_young() 171 static inline bool pte_decrypted(pte_t pte) in pte_decrypted() argument [all …]
|
| /linux/arch/loongarch/include/asm/ |
| H A D | pgtable.h | 113 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 261 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT)) 294 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } in mk_swap_pte() local 299 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) argument 304 static inline bool pte_swp_exclusive(pte_t pte) in pte_swp_exclusive() argument 306 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; in pte_swp_exclusive() 309 static inline pte_t pte_swp_mkexclusive(pte_t pte) in pte_swp_mkexclusive() argument 311 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; in pte_swp_mkexclusive() 312 return pte; in pte_swp_mkexclusive() 315 static inline pte_t pte_swp_clear_exclusive(pte_t pte) in pte_swp_clear_exclusive() argument [all …]
|
| /linux/arch/xtensa/include/asm/ |
| H A D | pgtable.h | 32 * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables 42 * PTE tables (page table entry), ie. 1st-level page tables: 43 * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE 85 * For the Xtensa architecture, the PTE layout is as follows: 144 /* We use invalid attribute values to distinguish special pte entries */ 208 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 225 * The pmd contains the kernel virtual address of the pte page. 232 * pte status. 234 # define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER)) argument 236 # define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) argument [all …]
|
| /linux/arch/riscv/include/asm/ |
| H A D | pgtable.h | 299 static inline unsigned long pte_napot(pte_t pte) in pte_napot() argument 301 return pte_val(pte) & _PAGE_NAPOT; in pte_napot() 304 static inline pte_t pte_mknapot(pte_t pte, unsigned int order) in pte_mknapot() argument 310 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); in pte_mknapot() 317 static inline unsigned long pte_napot(pte_t pte) in pte_napot() argument 325 static inline unsigned long pte_pfn(pte_t pte) in pte_pfn() argument 327 unsigned long res = __page_val_to_pfn(pte_val(pte)); in pte_pfn() 329 if (has_svnapot() && pte_napot(pte)) in pte_pfn() 348 static inline pgprot_t pte_pgprot(pte_t pte) in pte_pgprot() argument 350 unsigned long pfn = pte_pfn(pte); in pte_pgprot() [all …]
|
| /linux/arch/alpha/include/asm/ |
| H A D | pgtable.h | 169 #define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT) argument 171 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) argument 174 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } in pfn_pte() local 176 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument 177 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } in pte_modify() 201 extern inline int pte_none(pte_t pte) { return !pte_val(pte); } in pte_none() argument 202 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } in pte_present() argument 222 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } in pte_write() argument 223 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument 224 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument [all …]
|