Home
last modified time | relevance | path

Searched full:pte (Results 1 – 25 of 662) sorted by relevance

12345678910>>...27

/linux/arch/mips/include/asm/
H A Dpgtable.h45 extern void __update_cache(unsigned long address, pte_t pte);
111 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) argument
113 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) argument
116 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) argument
117 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) argument
119 static inline void set_pte(pte_t *ptep, pte_t pte) argument
121 ptep->pte_high = pte.pte_high;
123 ptep->pte_low = pte.pte_low;
126 if (pte.pte_high & _PAGE_GLOBAL) {
128 if (pte.pte_low & _PAGE_GLOBAL) {
[all …]
/linux/arch/m68k/include/asm/
H A Dmcf_pgtable.h10 * after masking from the pte.
99 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument
101 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify()
102 return pte; in pte_modify()
112 #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK)) argument
115 static inline int pte_none(pte_t pte) in pte_none() argument
117 return !pte_val(pte); in pte_none()
120 static inline int pte_present(pte_t pte) in pte_present() argument
122 return pte_val(pte) & CF_PAGE_VALID; in pte_present()
131 #define pte_page(pte) virt_to_page(__pte_page(pte)) argument
[all …]
H A Dsun3_pgtable.h29 /* Page protection values within PTE. */
79 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument
81 pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify()
82 return pte; in pte_modify()
87 #define __pte_page(pte) \ argument
88 (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
95 static inline int pte_none (pte_t pte) { return !pte_val (pte); } in pte_none() argument
96 static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; } in pte_present() argument
103 #define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK) argument
107 #define pte_page(pte) virt_to_page(__pte_page(pte)) argument
[all …]
H A Dmotorola_pgtable.h84 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument
86 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); in pte_modify()
87 return pte; in pte_modify()
100 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK)) argument
105 #define pte_none(pte) (!pte_val(pte)) argument
106 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) argument
110 #define pte_page(pte) virt_to_page(__va(pte_val(pte))) argument
111 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) argument
135 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
146 static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); } in pte_write() argument
[all …]
/linux/arch/hexagon/include/asm/
H A Dpgtable.h21 * The PTE model described here is that of the Hexagon Virtual Machine,
30 * To maximize the comfort level for the PTE manipulation macros,
39 * We have a total of 4 "soft" bits available in the abstract PTE.
43 * the PTE describes MMU programming or swap space.
99 /* Any bigger and the PTE disappears. */
136 #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) argument
143 extern void sync_icache_dcache(pte_t pte);
145 #define pte_present_exec_user(pte) \ argument
146 ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
160 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
[all …]
/linux/arch/powerpc/include/asm/nohash/
H A Dpgtable.h35 static inline unsigned long pte_huge_size(pte_t pte) in pte_huge_size() argument
42 * PTE updates. This function is called whenever an existing
43 * valid PTE is updated. This does -not- include set_pte_at()
44 * which nowadays only sets a new PTE.
47 * and the PTE may be either 32 or 64 bit wide. In the later case,
48 * when using atomic updates, only the low part of the PTE is
134 /* Set the dirty and/or accessed bits atomically in a linux PTE */
151 /* Generic accessors to PTE bits */
153 static inline pte_t pte_mkwrite_novma(pte_t pte) in pte_mkwrite_novma() argument
158 return __pte(pte_val(pte) | _PAGE_RW); in pte_mkwrite_novma()
[all …]
/linux/arch/arm/include/asm/
H A Dpgtable.h60 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) argument
167 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) argument
170 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) argument
174 #define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ argument
175 : !!(pte_val(pte) & (val)))
176 #define pte_isclear(pte, val) (!(pte_val(pte) & (val))) argument
178 #define pte_none(pte) (!pte_val(pte)) argument
179 #define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) argument
180 #define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) argument
181 #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) argument
[all …]
/linux/arch/arm64/include/asm/
H A Dpgtable.h46 * These barriers are emitted under certain conditions after a pte entry in emit_pte_barriers()
51 * setting the pte to valid won't cause a spurious fault. If the thread in emit_pte_barriers()
148 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
151 static inline phys_addr_t __pte_to_phys(pte_t pte) in __pte_to_phys() argument
153 pte_val(pte) &= ~PTE_MAYBE_SHARED; in __pte_to_phys()
154 return (pte_val(pte) & PTE_ADDR_LOW) | in __pte_to_phys()
155 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); in __pte_to_phys()
162 static inline phys_addr_t __pte_to_phys(pte_t pte) in __pte_to_phys() argument
164 return pte_val(pte) & PTE_ADDR_LOW; in __pte_to_phys()
173 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) argument
[all …]
/linux/arch/csky/include/asm/
H A Dpgtable.h27 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
36 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) argument
37 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) argument
42 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) argument
85 static inline void set_pte(pte_t *p, pte_t pte) in set_pte() argument
87 *p = pte; in set_pte()
141 static inline int pte_read(pte_t pte) in pte_read() argument
143 return pte.pte_low & _PAGE_READ; in pte_read()
146 static inline int pte_write(pte_t pte) in pte_write() argument
148 return (pte).pte_low & _PAGE_WRITE; in pte_write()
[all …]
/linux/arch/um/include/asm/
H A Dpgtable.h111 static inline int pte_none(pte_t pte) in pte_none() argument
113 return pte_is_zero(pte); in pte_none()
120 static inline int pte_read(pte_t pte) in pte_read() argument
122 return((pte_get_bits(pte, _PAGE_USER)) && in pte_read()
123 !(pte_get_bits(pte, _PAGE_PROTNONE))); in pte_read()
126 static inline int pte_exec(pte_t pte){ in pte_exec() argument
127 return((pte_get_bits(pte, _PAGE_USER)) && in pte_exec()
128 !(pte_get_bits(pte, _PAGE_PROTNONE))); in pte_exec()
131 static inline int pte_write(pte_t pte) in pte_write() argument
133 return((pte_get_bits(pte, _PAGE_RW)) && in pte_write()
[all …]
/linux/arch/openrisc/include/asm/
H A Dpgtable.h44 /* Certain architectures need to do special things when pte's
102 * An OR32 PTE looks like this:
117 * PTE as per above
120 #define _PAGE_CC 0x001 /* software: pte contains a translation */
200 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } in pte_read() argument
201 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } in pte_write() argument
202 static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } in pte_exec() argument
203 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument
204 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument
206 static inline pte_t pte_wrprotect(pte_t pte) in pte_wrprotect() argument
[all …]
/linux/arch/microblaze/include/asm/
H A Dpgtable.h84 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
107 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
114 * Bits in a linux-style PTE. These match the bits in the
115 * (hardware-defined) PTE as closely as possible.
124 * Where possible we make the Linux PTE bits match up with this
137 * - All other bits of the PTE are loaded into TLBLO without
139 * software PTE bits. We actually use bits 21, 24, 25, and
146 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
180 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
181 * to have it in the Linux PTE, and in fact the bit could be reused for
[all …]
/linux/arch/nios2/include/asm/
H A Dpgtable.h87 static inline int pte_write(pte_t pte) \ in pte_write() argument
88 { return pte_val(pte) & _PAGE_WRITE; } in pte_write()
89 static inline int pte_dirty(pte_t pte) \ in pte_dirty() argument
90 { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty()
91 static inline int pte_young(pte_t pte) \ in pte_young() argument
92 { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young()
105 static inline int pte_none(pte_t pte) in pte_none() argument
107 return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf)); in pte_none()
110 static inline int pte_present(pte_t pte) \ in pte_present() argument
111 { return pte_val(pte) & _PAGE_PRESENT; } in pte_present()
[all …]
/linux/drivers/iommu/intel/
H A Dpasid.c243 struct pasid_entry *pte; in intel_pasid_tear_down_entry() local
247 pte = intel_pasid_get_entry(dev, pasid); in intel_pasid_tear_down_entry()
248 if (WARN_ON(!pte)) { in intel_pasid_tear_down_entry()
253 if (!pasid_pte_is_present(pte)) { in intel_pasid_tear_down_entry()
254 if (!pasid_pte_is_fault_disabled(pte)) { in intel_pasid_tear_down_entry()
255 WARN_ON(READ_ONCE(pte->val[0]) != 0); in intel_pasid_tear_down_entry()
266 pasid_clear_entry(pte); in intel_pasid_tear_down_entry()
273 did = pasid_get_domain_id(pte); in intel_pasid_tear_down_entry()
274 pgtt = pasid_pte_get_pgtt(pte); in intel_pasid_tear_down_entry()
279 clflush_cache_range(pte, sizeof(*pte)); in intel_pasid_tear_down_entry()
[all …]
/linux/drivers/iommu/amd/
H A Dio_pgtable_v2.c44 static inline bool is_large_pte(u64 pte) in is_large_pte() argument
46 return (pte & IOMMU_PAGE_PSE); in is_large_pte()
59 static inline void *get_pgtable_pte(u64 pte) in get_pgtable_pte() argument
61 return iommu_phys_to_virt(pte & PM_ADDR_MASK); in get_pgtable_pte()
66 u64 pte; in set_pte_attr() local
68 pte = __sme_set(paddr & PM_ADDR_MASK); in set_pte_attr()
69 pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER; in set_pte_attr()
70 pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; in set_pte_attr()
73 pte |= IOMMU_PAGE_RW; in set_pte_attr()
77 pte |= IOMMU_PAGE_PSE; in set_pte_attr()
[all …]
/linux/arch/powerpc/include/asm/book3s/32/
H A Dpgtable.h21 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
22 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
35 /* We never clear the high word of the pte */
53 * Location of the PFN in the PTE. Most 32-bit platforms use the same
111 /* Bits to mask out from a PMD to get to the PTE page */
120 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
125 * level has 2048 entries and the second level has 512 64-bit PTE entries.
212 * Bits in a linux-style PTE. These match the bits in the
213 * (hardware-defined) PowerPC PTE as closely as possible.
250 * PTE updates. This function is called whenever an existing
[all …]
/linux/arch/powerpc/mm/
H A Dpgtable.c29 #include <asm/pte-walk.h>
45 * reasonably "normal" PTEs. We currently require a PTE to be present
46 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
49 static inline int pte_looks_normal(pte_t pte, unsigned long addr) in pte_looks_normal() argument
52 if (pte_present(pte) && !pte_special(pte)) { in pte_looks_normal()
53 if (pte_ci(pte)) in pte_looks_normal()
61 static struct folio *maybe_pte_to_folio(pte_t pte) in maybe_pte_to_folio() argument
63 unsigned long pfn = pte_pfn(pte); in maybe_pte_to_folio()
82 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) in set_pte_filter_hash() argument
84 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); in set_pte_filter_hash()
[all …]
/linux/arch/parisc/include/asm/
H A Dpgtable.h64 extern void __update_cache(pte_t pte);
79 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
104 * done to get usable bits out of the PTE) */
182 /* this defines the shift to the usable bits in the PTE it is set so
187 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
315 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument
316 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument
317 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } in pte_write() argument
318 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } in pte_special() argument
320 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } in pte_mkclean() argument
[all …]
/linux/arch/riscv/mm/
H A Dhugetlbpage.c18 pte_t pte = ptep_get(ptep); in huge_ptep_get() local
20 if (pte_dirty(pte)) in huge_ptep_get()
23 if (pte_young(pte)) in huge_ptep_get()
36 pte_t *pte = NULL; in huge_pte_alloc() local
52 pte = (pte_t *)pud; in huge_pte_alloc()
58 pte = huge_pmd_share(mm, vma, addr, pud); in huge_pte_alloc()
60 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pte_alloc()
70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); in huge_pte_alloc()
76 if (pte) { in huge_pte_alloc()
77 pte_t pteval = ptep_get_lockless(pte); in huge_pte_alloc()
[all …]
/linux/arch/x86/include/asm/
H A Dpgtable.h68 #define set_pte(ptep, pte) native_set_pte(ptep, pte) argument
70 #define set_pte_atomic(ptep, pte) \ argument
71 native_set_pte_atomic(ptep, pte)
155 static inline bool pte_dirty(pte_t pte) in pte_dirty() argument
157 return pte_flags(pte) & _PAGE_DIRTY_BITS; in pte_dirty()
160 static inline bool pte_shstk(pte_t pte) in pte_shstk() argument
163 (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY; in pte_shstk()
166 static inline int pte_young(pte_t pte) in pte_young() argument
168 return pte_flags(pte) & _PAGE_ACCESSED; in pte_young()
171 static inline bool pte_decrypted(pte_t pte) in pte_decrypted() argument
[all …]
/linux/arch/xtensa/include/asm/
H A Dpgtable.h32 * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
42 * PTE tables (page table entry), ie. 1st-level page tables:
43 * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
85 * For the Xtensa architecture, the PTE layout is as follows:
144 /* We use invalid attribute values to distinguish special pte entries */
208 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
225 * The pmd contains the kernel virtual address of the pte page.
232 * pte status.
234 # define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER)) argument
236 # define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) argument
[all …]
/linux/arch/loongarch/include/asm/
H A Dpgtable.h113 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
261 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
294 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } in mk_swap_pte() local
299 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) argument
304 static inline bool pte_swp_exclusive(pte_t pte) in pte_swp_exclusive() argument
306 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; in pte_swp_exclusive()
309 static inline pte_t pte_swp_mkexclusive(pte_t pte) in pte_swp_mkexclusive() argument
311 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; in pte_swp_mkexclusive()
312 return pte; in pte_swp_mkexclusive()
315 static inline pte_t pte_swp_clear_exclusive(pte_t pte) in pte_swp_clear_exclusive() argument
[all …]
/linux/arch/alpha/include/asm/
H A Dpgtable.h169 #define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT) argument
171 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) argument
174 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } in pfn_pte() local
176 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument
177 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } in pte_modify()
201 extern inline int pte_none(pte_t pte) { return !pte_val(pte); } in pte_none() argument
202 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } in pte_present() argument
222 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } in pte_write() argument
223 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } in pte_dirty() argument
224 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } in pte_young() argument
[all …]
/linux/arch/powerpc/include/asm/book3s/64/
H A Dpgtable.h42 #define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
44 * We need to mark a pmd pte invalid while splitting. We can do that by clearing
45 * the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
48 * We do that temporary invalidate for regular pte entry in ptep_set_access_flags
93 * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
95 * maps CI pte mapping.
99 * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
163 * Because of use of pte fragments and THP, size of page table
220 /* Bits to mask out from a PMD to get to the PTE page */
389 static inline int pte_write(pte_t pte) in pte_write() argument
[all …]
/linux/arch/powerpc/kvm/
H A Dtrace_pr.h64 TP_PROTO(struct hpte_cache *pte),
65 TP_ARGS(pte),
77 __entry->host_vpn = pte->host_vpn;
78 __entry->pfn = pte->pfn;
79 __entry->eaddr = pte->pte.eaddr;
80 __entry->vpage = pte->pte.vpage;
81 __entry->raddr = pte->pte.raddr;
82 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
83 (pte->pte.may_write ? 0x2 : 0) |
84 (pte->pte.may_execute ? 0x1 : 0);
[all …]

12345678910>>...27