Lines Matching refs:mm

47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,  in ptep_ipte_local()  argument
54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
58 asce = asce ? : mm->context.asce; in ptep_ipte_local()
67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
78 asce = asce ? : mm->context.asce; in ptep_ipte_global()
87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
99 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
101 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_direct()
102 atomic_dec(&mm->context.flush_count); in ptep_flush_direct()
106 static inline pte_t ptep_flush_lazy(struct mm_struct *mm, in ptep_flush_lazy() argument
115 atomic_inc(&mm->context.flush_count); in ptep_flush_lazy()
116 if (cpumask_equal(&mm->context.cpu_attach_mask, in ptep_flush_lazy()
119 mm->context.flush_mm = 1; in ptep_flush_lazy()
121 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_lazy()
122 atomic_dec(&mm->context.flush_count); in ptep_flush_lazy()
165 struct mm_struct *mm) in pgste_update_all() argument
170 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
186 struct mm_struct *mm) in pgste_set_key() argument
192 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
230 static inline pgste_t pgste_pte_notify(struct mm_struct *mm, in pgste_pte_notify() argument
240 ptep_notify(mm, addr, ptep, bits); in pgste_pte_notify()
246 static inline pgste_t ptep_xchg_start(struct mm_struct *mm, in ptep_xchg_start() argument
251 if (mm_has_pgste(mm)) { in ptep_xchg_start()
253 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_xchg_start()
258 static inline pte_t ptep_xchg_commit(struct mm_struct *mm, in ptep_xchg_commit() argument
262 if (mm_has_pgste(mm)) { in ptep_xchg_commit()
264 pgste_set_key(ptep, pgste, new, mm); in ptep_xchg_commit()
266 pgste = pgste_update_all(old, pgste, mm); in ptep_xchg_commit()
279 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, in ptep_xchg_direct() argument
287 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_direct()
289 old = ptep_flush_direct(mm, addr, ptep, nodat); in ptep_xchg_direct()
290 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_direct()
300 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, in ptep_reset_dat_prot() argument
304 atomic_inc(&mm->context.flush_count); in ptep_reset_dat_prot()
305 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_reset_dat_prot()
316 atomic_dec(&mm->context.flush_count); in ptep_reset_dat_prot()
321 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, in ptep_xchg_lazy() argument
329 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_lazy()
331 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_xchg_lazy()
332 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_lazy()
344 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_start() local
347 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_modify_prot_start()
349 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_modify_prot_start()
350 if (mm_has_pgste(mm)) { in ptep_modify_prot_start()
351 pgste = pgste_update_all(old, pgste, mm); in ptep_modify_prot_start()
361 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_commit() local
365 if (mm_has_pgste(mm)) { in ptep_modify_prot_commit()
367 pgste_set_key(ptep, pgste, pte, mm); in ptep_modify_prot_commit()
376 static inline void pmdp_idte_local(struct mm_struct *mm, in pmdp_idte_local() argument
381 mm->context.asce, IDTE_LOCAL); in pmdp_idte_local()
384 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_local()
385 gmap_pmdp_idte_local(mm, addr); in pmdp_idte_local()
388 static inline void pmdp_idte_global(struct mm_struct *mm, in pmdp_idte_global() argument
393 mm->context.asce, IDTE_GLOBAL); in pmdp_idte_global()
394 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
395 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
398 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
399 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
402 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
403 gmap_pmdp_csp(mm, addr); in pmdp_idte_global()
407 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, in pmdp_flush_direct() argument
415 atomic_inc(&mm->context.flush_count); in pmdp_flush_direct()
417 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pmdp_flush_direct()
418 pmdp_idte_local(mm, addr, pmdp); in pmdp_flush_direct()
420 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_direct()
421 atomic_dec(&mm->context.flush_count); in pmdp_flush_direct()
425 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, in pmdp_flush_lazy() argument
433 atomic_inc(&mm->context.flush_count); in pmdp_flush_lazy()
434 if (cpumask_equal(&mm->context.cpu_attach_mask, in pmdp_flush_lazy()
437 mm->context.flush_mm = 1; in pmdp_flush_lazy()
438 if (mm_has_pgste(mm)) in pmdp_flush_lazy()
439 gmap_pmdp_invalidate(mm, addr); in pmdp_flush_lazy()
441 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_lazy()
443 atomic_dec(&mm->context.flush_count); in pmdp_flush_lazy()
448 static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) in pmd_lookup() argument
456 vma = vma_lookup(mm, addr); in pmd_lookup()
460 pgd = pgd_offset(mm, addr); in pmd_lookup()
481 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_direct() argument
487 old = pmdp_flush_direct(mm, addr, pmdp); in pmdp_xchg_direct()
494 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_lazy() argument
500 old = pmdp_flush_lazy(mm, addr, pmdp); in pmdp_xchg_lazy()
507 static inline void pudp_idte_local(struct mm_struct *mm, in pudp_idte_local() argument
512 mm->context.asce, IDTE_LOCAL); in pudp_idte_local()
517 static inline void pudp_idte_global(struct mm_struct *mm, in pudp_idte_global() argument
522 mm->context.asce, IDTE_GLOBAL); in pudp_idte_global()
533 static inline pud_t pudp_flush_direct(struct mm_struct *mm, in pudp_flush_direct() argument
541 atomic_inc(&mm->context.flush_count); in pudp_flush_direct()
543 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pudp_flush_direct()
544 pudp_idte_local(mm, addr, pudp); in pudp_flush_direct()
546 pudp_idte_global(mm, addr, pudp); in pudp_flush_direct()
547 atomic_dec(&mm->context.flush_count); in pudp_flush_direct()
551 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pudp_xchg_direct() argument
557 old = pudp_flush_direct(mm, addr, pudp); in pudp_xchg_direct()
565 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
570 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
573 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
576 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
577 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
580 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
586 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
589 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
592 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
594 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
606 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, in ptep_set_pte_at() argument
615 pgste_set_key(ptep, pgste, entry, mm); in ptep_set_pte_at()
621 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_notify() argument
643 int ptep_force_prot(struct mm_struct *mm, unsigned long addr, in ptep_force_prot() argument
663 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
664 pgste = pgste_update_all(entry, pgste, mm); in ptep_force_prot()
668 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
678 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, in ptep_shadow_pte() argument
705 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) in ptep_unshadow_pte() argument
713 ptep_flush_direct(mm, saddr, ptep, nodat); in ptep_unshadow_pte()
719 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) in ptep_zap_swap_entry() argument
722 dec_mm_counter(mm, MM_SWAPENTS); in ptep_zap_swap_entry()
726 dec_mm_counter(mm, mm_counter(folio)); in ptep_zap_swap_entry()
731 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, in ptep_zap_unused() argument
746 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); in ptep_zap_unused()
747 pte_clear(mm, addr, ptep); in ptep_zap_unused()
755 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_zap_key() argument
775 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, in ptep_test_and_clear_uc() argument
788 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_test_and_clear_uc()
790 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_test_and_clear_uc()
802 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
815 switch (pmd_lookup(mm, addr, &pmdp)) { in set_guest_storage_key()
824 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key()
843 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key()
882 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in cond_set_guest_storage_key() argument
891 rc = get_guest_storage_key(current->mm, addr, &tmp); in cond_set_guest_storage_key()
903 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
913 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) in reset_guest_reference_bit() argument
926 switch (pmd_lookup(mm, addr, &pmdp)) { in reset_guest_reference_bit()
935 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit()
950 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in reset_guest_reference_bit()
975 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, in get_guest_storage_key() argument
990 switch (pmd_lookup(mm, addr, &pmdp)) { in get_guest_storage_key()
999 ptl = pmd_lock(mm, pmdp); in get_guest_storage_key()
1014 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in get_guest_storage_key()
1042 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, in pgste_perform_essa() argument
1056 vma = vma_lookup(mm, hva); in pgste_perform_essa()
1059 ptep = get_locked_pte(mm, hva, &ptl); in pgste_perform_essa()
1148 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, in set_pgste_bits() argument
1156 vma = vma_lookup(mm, hva); in set_pgste_bits()
1159 ptep = get_locked_pte(mm, hva, &ptl); in set_pgste_bits()
1181 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) in get_pgste() argument
1187 vma = vma_lookup(mm, hva); in get_pgste()
1190 ptep = get_locked_pte(mm, hva, &ptl); in get_pgste()