Lines Matching +full:entry +full:- +full:address
1 /* SPDX-License-Identifier: GPL-2.0 */
9 * Derived from "include/asm-i386/pgtable.h"
18 #include <linux/page-flags.h>
19 #include <linux/radix-tree.h>
50 #define update_mmu_cache(vma, address, ptep) do { } while (0) argument
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) argument
56 * for zero-mapped memory areas etc..
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
111 #define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
131 * A 64 bit pagetable entry of S390 has following format:
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
138 * C Change-bit override: HW is not required to set change bit
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
155 * I Segment-Invalid Bit: Segment is not available for address-translation
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
170 * TL Table-Length:
181 /* Hardware bits in the page table entry */
182 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
183 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
187 /* Software bits in the page table entry */
219 * distinguish present from not-present ptes. It is changed only with the page
232 * prot-none, clean, old .11.xx0000.1
233 * prot-none, clean, young .11.xx0001.1
234 * prot-none, dirty, old .11.xx0010.1
235 * prot-none, dirty, young .11.xx0011.1
236 * read-only, clean, old .11.xx0100.1
237 * read-only, clean, young .01.xx0101.1
238 * read-only, dirty, old .11.xx0110.1
239 * read-only, dirty, young .01.xx0111.1
240 * read-write, clean, old .11.xx1100.1
241 * read-write, clean, young .01.xx1101.1
242 * read-write, dirty, old .10.xx1110.1
243 * read-write, dirty, young .00.xx1111.1
244 * HW-bits: R read-only, I invalid
245 * SW-bits: p present, y young, d dirty, r read, w write, s special,
253 /* Bits in the segment/region table address-space-control-element */
266 /* Bits in the region table entry */
269 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
271 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
288 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
291 #define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */
292 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
305 * SW region present bit. For non-leaf region-third-table entries, bits 62-63
312 /* Bits in the segment table entry */
316 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
319 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
320 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
329 #define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */
330 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
343 #define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */
344 #define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */
368 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
369 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
370 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
371 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
395 * Segment table and region3 table entry encoding
396 * (R = read-only, I = invalid, y = young bit):
398 * prot-none, clean, old 00..1...1...00
399 * prot-none, clean, young 01..1...1...00
400 * prot-none, dirty, old 10..1...1...00
401 * prot-none, dirty, young 11..1...1...00
402 * read-only, clean, old 00..1...1...01
403 * read-only, clean, young 01..1...0...01
404 * read-only, dirty, old 10..1...1...01
405 * read-only, dirty, young 11..1...0...01
406 * read-write, clean, old 00..1...1...11
407 * read-write, clean, young 01..1...0...11
408 * read-write, dirty, old 10..0...1...11
409 * read-write, dirty, young 11..0...0...11
411 * read-write, old segment table entries (origin!=0)
412 * HW-bits: R read-only, I invalid
413 * SW-bits: y young, d dirty, r read, w write
439 * A user page table pointer has the space-switch-event bit, the
440 * private-space-control bit and the storage-alteration-event-control
479 * Segment entry (large page) protection definitions.
525 * Region3 entry (large page) protection definitions.
553 return mm->context.asce_limit <= _REGION1_SIZE; in mm_p4d_folded()
559 return mm->context.asce_limit <= _REGION2_SIZE; in mm_pud_folded()
565 return mm->context.asce_limit <= _REGION3_SIZE; in mm_pmd_folded()
572 if (unlikely(mm->context.has_pgste)) in mm_has_pgste()
581 if (unlikely(atomic_read(&mm->context.protected_count))) in mm_is_protected()
636 if (!mm->context.allow_cow_sharing) in mm_forbids_zeropage()
645 if (mm->context.uses_skeys) in mm_uses_skeys()
654 unsigned long address = (unsigned long)ptr | 1; in csp() local
657 " csp %[r1],%[address]" in csp()
659 : [address] "d" (address) in csp()
664 * cspg() - Compare and Swap and Purge (CSPG)
674 unsigned long address = (unsigned long)ptr | 1; in cspg() local
677 " cspg %[r1],%[address]" in cspg()
679 : [address] "d" (address) in cspg()
691 * crdte() - Compare and Replace DAT Table Entry
696 * @address: The address mapped by the entry to be replaced
697 * @asce: The ASCE of this entry
703 unsigned long address, unsigned long asce) in crdte() argument
706 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, }; in crdte()
993 * usable for kernel address space mappings where fault driven dirty and
1157 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); in __ptep_rdp()
1164 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, in __ptep_ipte() argument
1174 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), in __ptep_ipte()
1183 : [r2] "+a" (address), [r3] "+a" (opt) in __ptep_ipte()
1187 static __always_inline void __ptep_ipte_range(unsigned long address, int nr, in __ptep_ipte_range() argument
1196 : [r2] "+a" (address), [r3] "+a" (nr) in __ptep_ipte_range()
1223 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
1229 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument
1231 return ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
1258 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); in ptep_clear_flush()
1260 if (mm_is_protected(vma->vm_mm) && pte_present(res)) in ptep_clear_flush()
1331 unsigned long address, in flush_tlb_fix_spurious_fault() argument
1345 __ptep_rdp(address, ptep, 0, 0, 1); in flush_tlb_fix_spurious_fault()
1355 pte_t entry, int dirty) in ptep_set_access_flags() argument
1357 if (pte_same(*ptep, entry)) in ptep_set_access_flags()
1359 if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) in ptep_set_access_flags()
1360 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); in ptep_set_access_flags()
1362 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); in ptep_set_access_flags()
1370 pte_t *ptep, pte_t entry);
1383 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1414 pte_t *ptep, pte_t entry, unsigned int nr) in set_ptes() argument
1416 if (pte_present(entry)) in set_ptes()
1417 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); in set_ptes()
1420 ptep_set_pte_at(mm, addr, ptep, entry); in set_ptes()
1421 if (--nr == 0) in set_ptes()
1424 entry = __pte(pte_val(entry) + PAGE_SIZE); in set_ptes()
1429 set_pte(ptep, entry); in set_ptes()
1430 if (--nr == 0) in set_ptes()
1433 entry = __pte(pte_val(entry) + PAGE_SIZE); in set_ptes()
1440 * Conversion functions: convert a page and protection to a page entry,
1441 * and a page entry and page directory to the page they refer to.
1451 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) argument
1452 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) argument
1453 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) argument
1454 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) argument
1491 * The pgd_offset function *always* adds the index for the top-level
1494 * pgdp = pgd_offset(current->mm, addr);
1501 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) in pgd_offset_raw() argument
1506 /* Get the first entry of the top level table */ in pgd_offset_raw()
1508 /* Pick up the shift from the table type of the first entry */ in pgd_offset_raw()
1510 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); in pgd_offset_raw()
1513 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) argument
1515 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) in p4d_offset_lockless() argument
1518 return (p4d_t *) pgd_deref(pgd) + p4d_index(address); in p4d_offset_lockless()
1523 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) in p4d_offset() argument
1525 return p4d_offset_lockless(pgdp, *pgdp, address); in p4d_offset()
1528 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) in pud_offset_lockless() argument
1531 return (pud_t *) p4d_deref(p4d) + pud_index(address); in pud_offset_lockless()
1536 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) in pud_offset() argument
1538 return pud_offset_lockless(p4dp, *p4dp, address); in pud_offset()
1542 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) in pmd_offset_lockless() argument
1545 return (pmd_t *) pud_deref(pud) + pmd_index(address); in pmd_offset_lockless()
1550 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) in pmd_offset() argument
1552 return pmd_offset_lockless(pudp, *pudp, address); in pmd_offset()
1563 return end <= current->mm->context.asce_limit; in gup_fast_permitted()
1637 * (see __Pxxx / __Sxxx). Convert to segment table entry format. in massage_pgprot_pmd()
1708 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t); in __pmdp_idte()
1734 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t); in __pudp_idte()
1771 pmd_t entry, int dirty) in pmdp_set_access_flags() argument
1775 entry = pmd_mkyoung(entry); in pmdp_set_access_flags()
1777 entry = pmd_mkdirty(entry); in pmdp_set_access_flags()
1778 if (pmd_val(*pmdp) == pmd_val(entry)) in pmdp_set_access_flags()
1780 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); in pmdp_set_access_flags()
1790 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
1803 pmd_t *pmdp, pmd_t entry) in set_pmd_at() argument
1805 set_pmd(pmdp, entry); in set_pmd_at()
1832 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); in pmdp_huge_get_and_clear_full()
1839 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in pmdp_huge_clear_flush()
1850 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); in pmdp_invalidate()
1864 unsigned long address, in pmdp_collapse_flush() argument
1867 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
1886 * 64 bit swap entry format:
1887 * A page-table entry has some bits we have to treat in a special way.
1895 * Bits 0-51 store the offset.
1897 * Bits 57-61 store the type.
1902 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1904 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1917 static inline unsigned long __swp_type(swp_entry_t entry) in __swp_type() argument
1919 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; in __swp_type()
1922 static inline unsigned long __swp_offset(swp_entry_t entry) in __swp_offset() argument
1924 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; in __swp_offset()
1936 * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE)
1937 * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste
1939 * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010
1944 * Bits 0-51 store the offset.
1945 * Bits 53-57 store the type.
1947 * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT.
1951 #define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1)
1953 #define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1)
1970 static inline unsigned long __swp_type_rste(swp_entry_t entry) in __swp_type_rste() argument
1972 return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE; in __swp_type_rste()
1975 static inline unsigned long __swp_offset_rste(swp_entry_t entry) in __swp_offset_rste() argument
1977 return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE; in __swp_offset_rste()
1997 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))