Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/mm.h>
26 #include <asm/page-states.h>
40 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
46 opt = 0; in ptep_ipte_local()
47 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
48 if (asce == 0UL || nodat) in ptep_ipte_local()
50 if (asce != -1UL) { in ptep_ipte_local()
51 asce = asce ? : mm->context.asce; in ptep_ipte_local()
56 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL); in ptep_ipte_local()
60 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
66 opt = 0; in ptep_ipte_global()
67 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
68 if (asce == 0UL || nodat) in ptep_ipte_global()
70 if (asce != -1UL) { in ptep_ipte_global()
71 asce = asce ? : mm->context.asce; in ptep_ipte_global()
76 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); in ptep_ipte_global()
80 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
89 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
91 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
92 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
94 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_direct()
95 atomic_dec(&mm->context.flush_count); in ptep_flush_direct()
99 static inline pte_t ptep_flush_lazy(struct mm_struct *mm, in ptep_flush_lazy() argument
108 atomic_inc(&mm->context.flush_count); in ptep_flush_lazy()
109 if (cpumask_equal(&mm->context.cpu_attach_mask, in ptep_flush_lazy()
112 mm->context.flush_mm = 1; in ptep_flush_lazy()
114 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_lazy()
115 atomic_dec(&mm->context.flush_count); in ptep_flush_lazy()
121 unsigned long pgste = 0; in pgste_get()
136 struct mm_struct *mm) in pgste_update_all() argument
141 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
157 struct mm_struct *mm) in pgste_set_key() argument
163 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
170 * key C/R to 0. in pgste_set_key()
174 page_set_storage_key(address, nkey, 0); in pgste_set_key()
186 * Without enhanced suppression-on-protection force in pgste_set_pte()
193 /* This pte allows write access, set user-dirty */ in pgste_set_pte()
201 static inline pgste_t pgste_pte_notify(struct mm_struct *mm, in pgste_pte_notify() argument
211 ptep_notify(mm, addr, ptep, bits); in pgste_pte_notify()
217 static inline pgste_t ptep_xchg_start(struct mm_struct *mm, in ptep_xchg_start() argument
220 pgste_t pgste = __pgste(0); in ptep_xchg_start()
222 if (mm_has_pgste(mm)) { in ptep_xchg_start()
224 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_xchg_start()
229 static inline pte_t ptep_xchg_commit(struct mm_struct *mm, in ptep_xchg_commit() argument
233 if (mm_has_pgste(mm)) { in ptep_xchg_commit()
235 pgste_set_key(ptep, pgste, new, mm); in ptep_xchg_commit()
237 pgste = pgste_update_all(old, pgste, mm); in ptep_xchg_commit()
250 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, in ptep_xchg_direct() argument
258 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_direct()
260 old = ptep_flush_direct(mm, addr, ptep, nodat); in ptep_xchg_direct()
261 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_direct()
271 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, in ptep_reset_dat_prot() argument
275 atomic_inc(&mm->context.flush_count); in ptep_reset_dat_prot()
276 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_reset_dat_prot()
277 __ptep_rdp(addr, ptep, 0, 0, 1); in ptep_reset_dat_prot()
279 __ptep_rdp(addr, ptep, 0, 0, 0); in ptep_reset_dat_prot()
287 atomic_dec(&mm->context.flush_count); in ptep_reset_dat_prot()
292 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, in ptep_xchg_lazy() argument
300 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_lazy()
302 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_xchg_lazy()
303 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_lazy()
315 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_start() local
317 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_modify_prot_start()
319 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_modify_prot_start()
320 if (mm_has_pgste(mm)) { in ptep_modify_prot_start()
321 pgste = pgste_update_all(old, pgste, mm); in ptep_modify_prot_start()
331 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_commit() local
333 if (mm_has_pgste(mm)) { in ptep_modify_prot_commit()
335 pgste_set_key(ptep, pgste, pte, mm); in ptep_modify_prot_commit()
343 static inline void pmdp_idte_local(struct mm_struct *mm, in pmdp_idte_local() argument
348 mm->context.asce, IDTE_LOCAL); in pmdp_idte_local()
350 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); in pmdp_idte_local()
351 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_local()
352 gmap_pmdp_idte_local(mm, addr); in pmdp_idte_local()
355 static inline void pmdp_idte_global(struct mm_struct *mm, in pmdp_idte_global() argument
360 mm->context.asce, IDTE_GLOBAL); in pmdp_idte_global()
361 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
362 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
364 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); in pmdp_idte_global()
365 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
366 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
369 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
370 gmap_pmdp_csp(mm, addr); in pmdp_idte_global()
374 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, in pmdp_flush_direct() argument
382 atomic_inc(&mm->context.flush_count); in pmdp_flush_direct()
384 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pmdp_flush_direct()
385 pmdp_idte_local(mm, addr, pmdp); in pmdp_flush_direct()
387 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_direct()
388 atomic_dec(&mm->context.flush_count); in pmdp_flush_direct()
392 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, in pmdp_flush_lazy() argument
400 atomic_inc(&mm->context.flush_count); in pmdp_flush_lazy()
401 if (cpumask_equal(&mm->context.cpu_attach_mask, in pmdp_flush_lazy()
404 mm->context.flush_mm = 1; in pmdp_flush_lazy()
405 if (mm_has_pgste(mm)) in pmdp_flush_lazy()
406 gmap_pmdp_invalidate(mm, addr); in pmdp_flush_lazy()
408 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_lazy()
410 atomic_dec(&mm->context.flush_count); in pmdp_flush_lazy()
415 static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) in pmd_lookup() argument
423 vma = vma_lookup(mm, addr); in pmd_lookup()
425 return -EFAULT; in pmd_lookup()
427 pgd = pgd_offset(mm, addr); in pmd_lookup()
429 return -ENOENT; in pmd_lookup()
433 return -ENOENT; in pmd_lookup()
437 return -ENOENT; in pmd_lookup()
441 return -EFAULT; in pmd_lookup()
444 return 0; in pmd_lookup()
448 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_direct() argument
454 old = pmdp_flush_direct(mm, addr, pmdp); in pmdp_xchg_direct()
461 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_lazy() argument
467 old = pmdp_flush_lazy(mm, addr, pmdp); in pmdp_xchg_lazy()
474 static inline void pudp_idte_local(struct mm_struct *mm, in pudp_idte_local() argument
479 mm->context.asce, IDTE_LOCAL); in pudp_idte_local()
481 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL); in pudp_idte_local()
484 static inline void pudp_idte_global(struct mm_struct *mm, in pudp_idte_global() argument
489 mm->context.asce, IDTE_GLOBAL); in pudp_idte_global()
491 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); in pudp_idte_global()
500 static inline pud_t pudp_flush_direct(struct mm_struct *mm, in pudp_flush_direct() argument
508 atomic_inc(&mm->context.flush_count); in pudp_flush_direct()
510 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pudp_flush_direct()
511 pudp_idte_local(mm, addr, pudp); in pudp_flush_direct()
513 pudp_idte_global(mm, addr, pudp); in pudp_flush_direct()
514 atomic_dec(&mm->context.flush_count); in pudp_flush_direct()
518 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pudp_xchg_direct() argument
524 old = pudp_flush_direct(mm, addr, pudp); in pudp_xchg_direct()
532 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
537 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
540 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
543 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
544 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
547 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
553 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
556 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
559 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
561 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
573 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, in ptep_set_pte_at() argument
582 pgste_set_key(ptep, pgste, entry, mm); in ptep_set_pte_at()
588 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_notify() argument
600 * ptep_force_prot - change access rights of a locked pte
601 * @mm: pointer to the process mm_struct
607 * Returns 0 if the access rights were changed and -EAGAIN if the current
610 int ptep_force_prot(struct mm_struct *mm, unsigned long addr, in ptep_force_prot() argument
625 return -EAGAIN; in ptep_force_prot()
630 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
631 pgste = pgste_update_all(entry, pgste, mm); in ptep_force_prot()
635 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
642 return 0; in ptep_force_prot()
645 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, in ptep_shadow_pte() argument
650 int rc = -EAGAIN; in ptep_shadow_pte()
653 return 0; /* already shadowed */ in ptep_shadow_pte()
663 /* don't touch the storage key - it belongs to parent pgste */ in ptep_shadow_pte()
672 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) in ptep_unshadow_pte() argument
680 ptep_flush_direct(mm, saddr, ptep, nodat); in ptep_unshadow_pte()
681 /* don't touch the storage key - it belongs to parent pgste */ in ptep_unshadow_pte()
686 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) in ptep_zap_swap_entry() argument
689 dec_mm_counter(mm, MM_SWAPENTS); in ptep_zap_swap_entry()
693 dec_mm_counter(mm, mm_counter(folio)); in ptep_zap_swap_entry()
698 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, in ptep_zap_unused() argument
705 /* Zap unused and logically-zero pages */ in ptep_zap_unused()
713 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); in ptep_zap_unused()
714 pte_clear(mm, addr, ptep); in ptep_zap_unused()
722 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_zap_key() argument
734 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); in ptep_zap_key()
742 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, in ptep_test_and_clear_uc() argument
755 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_test_and_clear_uc()
757 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_test_and_clear_uc()
769 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
780 * we can ignore attempts to set the key to 0, because it already is 0. in set_guest_storage_key()
782 switch (pmd_lookup(mm, addr, &pmdp)) { in set_guest_storage_key()
783 case -ENOENT: in set_guest_storage_key()
784 return key ? -EFAULT : 0; in set_guest_storage_key()
785 case 0: in set_guest_storage_key()
788 return -EFAULT; in set_guest_storage_key()
791 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key()
794 return key ? -EFAULT : 0; in set_guest_storage_key()
806 return 0; in set_guest_storage_key()
810 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key()
838 return 0; in set_guest_storage_key()
846 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
847 * storage key was updated and -EFAULT on access errors.
849 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in cond_set_guest_storage_key() argument
858 rc = get_guest_storage_key(current->mm, addr, &tmp); in cond_set_guest_storage_key()
868 return 0; in cond_set_guest_storage_key()
870 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
871 return rc < 0 ? rc : 1; in cond_set_guest_storage_key()
878 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
880 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) in reset_guest_reference_bit() argument
887 int cc = 0; in reset_guest_reference_bit()
891 * the storage key is 0 and there is nothing for us to do. in reset_guest_reference_bit()
893 switch (pmd_lookup(mm, addr, &pmdp)) { in reset_guest_reference_bit()
894 case -ENOENT: in reset_guest_reference_bit()
895 return 0; in reset_guest_reference_bit()
896 case 0: in reset_guest_reference_bit()
899 return -EFAULT; in reset_guest_reference_bit()
902 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit()
905 return 0; in reset_guest_reference_bit()
917 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in reset_guest_reference_bit()
927 /* Merge real referenced bit into host-set */ in reset_guest_reference_bit()
942 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, in get_guest_storage_key() argument
953 * the storage key is 0. in get_guest_storage_key()
955 *key = 0; in get_guest_storage_key()
957 switch (pmd_lookup(mm, addr, &pmdp)) { in get_guest_storage_key()
958 case -ENOENT: in get_guest_storage_key()
959 return 0; in get_guest_storage_key()
960 case 0: in get_guest_storage_key()
963 return -EFAULT; in get_guest_storage_key()
966 ptl = pmd_lock(mm, pmdp); in get_guest_storage_key()
969 return 0; in get_guest_storage_key()
977 return 0; in get_guest_storage_key()
981 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in get_guest_storage_key()
993 return 0; in get_guest_storage_key()
998 * pgste_perform_essa - perform ESSA actions on the PGSTE.
999 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1005 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
1006 * or < 0 in case of error. -EINVAL is returned for invalid values
1007 * of orc, -EFAULT for invalid addresses.
1009 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, in pgste_perform_essa() argument
1017 int res = 0; in pgste_perform_essa()
1021 return -EINVAL; in pgste_perform_essa()
1023 vma = vma_lookup(mm, hva); in pgste_perform_essa()
1025 return -EFAULT; in pgste_perform_essa()
1026 ptep = get_locked_pte(mm, hva, &ptl); in pgste_perform_essa()
1028 return -EFAULT; in pgste_perform_essa()
1106 * set_pgste_bits - set specific PGSTE bits.
1107 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1113 * Return: 0 on success, < 0 in case of error.
1115 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, in set_pgste_bits() argument
1123 vma = vma_lookup(mm, hva); in set_pgste_bits()
1125 return -EFAULT; in set_pgste_bits()
1126 ptep = get_locked_pte(mm, hva, &ptl); in set_pgste_bits()
1128 return -EFAULT; in set_pgste_bits()
1136 return 0; in set_pgste_bits()
1141 * get_pgste - get the current PGSTE for the given address.
1142 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1146 * Return: 0 on success, < 0 in case of error.
1148 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) in get_pgste() argument
1154 vma = vma_lookup(mm, hva); in get_pgste()
1156 return -EFAULT; in get_pgste()
1157 ptep = get_locked_pte(mm, hva, &ptl); in get_pgste()
1159 return -EFAULT; in get_pgste()
1162 return 0; in get_pgste()