Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #include <linux/mm.h>
12 #include <linux/sched/mm.h>
15 #include <asm/ppc-opcode.h>
38 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) in tlbiel_radix_set_isa300()
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); in tlbiel_all_isa300()
61 tlbiel_radix_set_isa300(set, is, 0, in tlbiel_all_isa300()
62 RIC_FLUSH_TLB, 0); in tlbiel_all_isa300()
67 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); in tlbiel_all_isa300()
71 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); in tlbiel_all_isa300()
95 WARN(1, "%s called on pre-POWER9 CPU\n", __func__); in radix__tlbiel_all()
111 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) in __tlbiel_pid()
113 trace_tlbie(0, 1, rb, rs, ric, prs, r); in __tlbiel_pid()
125 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_pid()
127 trace_tlbie(0, 0, rb, rs, ric, prs, r); in __tlbie_pid()
136 prs = 0; /* partition scoped */ in __tlbie_lpid()
139 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_lpid()
141 trace_tlbie(lpid, 0, rb, rs, ric, prs, r); in __tlbie_lpid()
153 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_lpid_guest()
155 trace_tlbie(lpid, 0, rb, rs, ric, prs, r); in __tlbie_lpid_guest()
169 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) in __tlbiel_va()
171 trace_tlbie(0, 1, rb, rs, ric, prs, r); in __tlbiel_va()
185 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_va()
187 trace_tlbie(0, 0, rb, rs, ric, prs, r); in __tlbie_va()
198 prs = 0; /* partition scoped */ in __tlbie_lpid_va()
201 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_lpid_va()
203 trace_tlbie(lpid, 0, rb, rs, ric, prs, r); in __tlbie_lpid_va()
212 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB); in fixup_tlbie_va()
226 __tlbie_pid(0, RIC_FLUSH_TLB); in fixup_tlbie_va_range()
241 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_pid()
245 __tlbie_pid(0, RIC_FLUSH_TLB); in fixup_tlbie_pid()
259 __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB); in fixup_tlbie_lpid_va()
274 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_lpid()
278 __tlbie_lpid(0, RIC_FLUSH_TLB); in fixup_tlbie_lpid()
300 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in _tlbiel_pid()
304 __tlbiel_pid(pid, 0, RIC_FLUSH_TLB); in _tlbiel_pid()
313 __tlbiel_pid(pid, 0, RIC_FLUSH_ALL); in _tlbiel_pid()
332 * must be a compile-time constraint to match the "i" constraint in _tlbie_pid()
360 if (t->ric == RIC_FLUSH_TLB) in do_tlbiel_pid()
361 _tlbiel_pid(t->pid, RIC_FLUSH_TLB); in do_tlbiel_pid()
362 else if (t->ric == RIC_FLUSH_PWC) in do_tlbiel_pid()
363 _tlbiel_pid(t->pid, RIC_FLUSH_PWC); in do_tlbiel_pid()
365 _tlbiel_pid(t->pid, RIC_FLUSH_ALL); in do_tlbiel_pid()
368 static inline void _tlbiel_pid_multicast(struct mm_struct *mm, in _tlbiel_pid_multicast() argument
371 struct cpumask *cpus = mm_cpumask(mm); in _tlbiel_pid_multicast()
380 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_pid_multicast()
390 * must be a compile-time contraint to match the "i" constraint in _tlbie_lpid()
413 * must be a compile-time contraint to match the "i" constraint in _tlbie_lpid_guest()
458 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in _tlbiel_va_range()
473 fixup_tlbie_va_range(addr - page_size, pid, ap); in __tlbie_va_range()
498 if (t->ric == RIC_FLUSH_TLB) in do_tlbiel_va()
499 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB); in do_tlbiel_va()
500 else if (t->ric == RIC_FLUSH_PWC) in do_tlbiel_va()
501 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC); in do_tlbiel_va()
503 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL); in do_tlbiel_va()
506 static inline void _tlbiel_va_multicast(struct mm_struct *mm, in _tlbiel_va_multicast() argument
510 struct cpumask *cpus = mm_cpumask(mm); in _tlbiel_va_multicast()
513 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_va_multicast()
530 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size, in do_tlbiel_va_range()
531 t->psize, t->also_pwc); in do_tlbiel_va_range()
556 static inline void _tlbiel_va_range_multicast(struct mm_struct *mm, in _tlbiel_va_range_multicast() argument
561 struct cpumask *cpus = mm_cpumask(mm); in _tlbiel_va_range_multicast()
567 if (atomic_read(&mm->context.copros) > 0) in _tlbiel_va_range_multicast()
574 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
575 * - flush_tlb_page(vma, vmaddr) flushes one page
576 * - flush_tlb_range(vma, start, end) flushes a range of pages
577 * - flush_tlb_kernel_range(start, end) flushes kernel pages
579 * - local_* variants of page and mm only apply to the current
582 void radix__local_flush_tlb_mm(struct mm_struct *mm) in radix__local_flush_tlb_mm() argument
584 unsigned long pid = mm->context.id; in radix__local_flush_tlb_mm()
596 void radix__local_flush_all_mm(struct mm_struct *mm) in radix__local_flush_all_mm() argument
598 unsigned long pid = mm->context.id; in radix__local_flush_all_mm()
609 static void __flush_all_mm(struct mm_struct *mm, bool fullmm) in __flush_all_mm() argument
611 radix__local_flush_all_mm(mm); in __flush_all_mm()
615 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, in radix__local_flush_tlb_page_psize() argument
618 unsigned long pid = mm->context.id; in radix__local_flush_tlb_page_psize()
635 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__local_flush_tlb_page()
639 static bool mm_needs_flush_escalation(struct mm_struct *mm) in mm_needs_flush_escalation() argument
643 * and not flushing them when RIC = 0 for a PID/LPID invalidate. in mm_needs_flush_escalation()
647 * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes in mm_needs_flush_escalation()
654 if (atomic_read(&mm->context.copros) > 0) in mm_needs_flush_escalation()
663 void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush) in exit_lazy_flush_tlb() argument
665 unsigned long pid = mm->context.id; in exit_lazy_flush_tlb()
671 * interrupted here. In that case, current->mm will be set to mm, in exit_lazy_flush_tlb()
672 * because kthread_use_mm() setting ->mm and switching to the mm is in exit_lazy_flush_tlb()
675 if (current->mm == mm) in exit_lazy_flush_tlb()
678 if (current->active_mm == mm) { in exit_lazy_flush_tlb()
681 WARN_ON_ONCE(current->mm != NULL); in exit_lazy_flush_tlb()
683 * It is a kernel thread and is using mm as the lazy tlb, so in exit_lazy_flush_tlb()
689 current->active_mm = &init_mm; in exit_lazy_flush_tlb()
690 switch_mm_irqs_off(mm, &init_mm, current); in exit_lazy_flush_tlb()
691 mmdrop_lazy_tlb(mm); in exit_lazy_flush_tlb()
697 * running the mm, so there may be a racing IPI that comes after in exit_lazy_flush_tlb()
703 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { in exit_lazy_flush_tlb()
704 dec_mm_active_cpus(mm); in exit_lazy_flush_tlb()
705 cpumask_clear_cpu(cpu, mm_cpumask(mm)); in exit_lazy_flush_tlb()
717 struct mm_struct *mm = arg; in do_exit_flush_lazy_tlb() local
718 exit_lazy_flush_tlb(mm, true); in do_exit_flush_lazy_tlb()
721 static void exit_flush_lazy_tlbs(struct mm_struct *mm) in exit_flush_lazy_tlbs() argument
730 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb, in exit_flush_lazy_tlbs()
731 (void *)mm, 1); in exit_flush_lazy_tlbs()
735 static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { } in exit_flush_lazy_tlbs() argument
742 * mm_cpumask can be trimmed for the case where it's not a single-threaded
743 * process flushing its own mm. The intent is to reduce the cost of later
753 __this_cpu_write(mm_cpumask_trim_clock, 0); in tick_and_test_trim_clock()
765 static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm) in flush_type_needed() argument
767 int active_cpus = atomic_read(&mm->context.active_cpus); in flush_type_needed()
770 if (active_cpus == 0) in flush_type_needed()
772 if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) { in flush_type_needed()
773 if (current->mm != mm) { in flush_type_needed()
780 exit_lazy_flush_tlb(mm, true); in flush_type_needed()
788 if (atomic_read(&mm->context.copros) > 0) in flush_type_needed()
793 * because the mm is being taken down anyway, and a TLBIE tends to in flush_type_needed()
800 * If we are running the only thread of a single-threaded process, in flush_type_needed()
805 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) { in flush_type_needed()
806 exit_flush_lazy_tlbs(mm); in flush_type_needed()
822 exit_flush_lazy_tlbs(mm); in flush_type_needed()
823 if (current->mm == mm) in flush_type_needed()
825 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) in flush_type_needed()
826 exit_lazy_flush_tlb(mm, true); in flush_type_needed()
834 void radix__flush_tlb_mm(struct mm_struct *mm) in radix__flush_tlb_mm() argument
839 pid = mm->context.id; in radix__flush_tlb_mm()
850 type = flush_type_needed(mm, false); in radix__flush_tlb_mm()
857 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_mm()
860 H_RPTI_PAGE_ALL, 0, -1UL); in radix__flush_tlb_mm()
862 if (mm_needs_flush_escalation(mm)) in radix__flush_tlb_mm()
867 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); in radix__flush_tlb_mm()
871 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); in radix__flush_tlb_mm()
875 static void __flush_all_mm(struct mm_struct *mm, bool fullmm) in __flush_all_mm() argument
880 pid = mm->context.id; in __flush_all_mm()
886 type = flush_type_needed(mm, fullmm); in __flush_all_mm()
895 if (atomic_read(&mm->context.copros) > 0) in __flush_all_mm()
898 H_RPTI_PAGE_ALL, 0, -1UL); in __flush_all_mm()
902 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); in __flush_all_mm()
905 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); in __flush_all_mm()
908 void radix__flush_all_mm(struct mm_struct *mm) in radix__flush_all_mm() argument
910 __flush_all_mm(mm, false); in radix__flush_all_mm()
914 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, in radix__flush_tlb_page_psize() argument
920 pid = mm->context.id; in radix__flush_tlb_page_psize()
926 type = flush_type_needed(mm, false); in radix__flush_tlb_page_psize()
937 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_page_psize()
945 _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB); in radix__flush_tlb_page_psize()
956 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize); in radix__flush_tlb_page()
964 _tlbiel_pid(0, RIC_FLUSH_ALL); in do_tlbiel_kernel()
976 _tlbie_pid(0, RIC_FLUSH_ALL); in _tlbiel_kernel_broadcast()
992 pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL, in radix__flush_tlb_kernel_range()
995 _tlbie_pid(0, RIC_FLUSH_ALL); in radix__flush_tlb_kernel_range()
1004 #define TLB_FLUSH_ALL -1UL
1013 * individual page flushes to full-pid flushes.
1018 static inline void __radix__flush_tlb_range(struct mm_struct *mm, in __radix__flush_tlb_range() argument
1024 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range()
1028 pid = mm->context.id; in __radix__flush_tlb_range()
1036 type = flush_type_needed(mm, false); in __radix__flush_tlb_range()
1049 if (!flush_pid && (end - start) >= PMD_SIZE) in __radix__flush_tlb_range()
1059 if (atomic_read(&mm->context.copros) > 0) in __radix__flush_tlb_range()
1074 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL); in __radix__flush_tlb_range()
1081 hstart = (start + PMD_SIZE - 1) & PMD_MASK; in __radix__flush_tlb_range()
1089 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); in __radix__flush_tlb_range()
1105 _tlbiel_va_range_multicast(mm, in __radix__flush_tlb_range()
1108 _tlbiel_va_range_multicast(mm, in __radix__flush_tlb_range()
1114 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); in __radix__flush_tlb_range()
1126 __radix__flush_tlb_range(vma->vm_mm, start, end); in radix__flush_tlb_range()
1141 return -1; in radix_get_mmu_psize()
1186 int psize = 0; in radix__tlb_flush()
1187 struct mm_struct *mm = tlb->mm; in radix__tlb_flush() local
1188 int page_size = tlb->page_size; in radix__tlb_flush()
1189 unsigned long start = tlb->start; in radix__tlb_flush()
1190 unsigned long end = tlb->end; in radix__tlb_flush()
1193 * if page size is not something we understand, do a full mm flush in radix__tlb_flush()
1199 if (tlb->fullmm) { in radix__tlb_flush()
1202 * Shootdown based lazy tlb mm refcounting means we in radix__tlb_flush()
1204 * when the mm goes away, so might as well do it as in radix__tlb_flush()
1214 exit_flush_lazy_tlbs(mm); in radix__tlb_flush()
1215 __flush_all_mm(mm, true); in radix__tlb_flush()
1219 __flush_all_mm(mm, true); in radix__tlb_flush()
1222 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { in radix__tlb_flush()
1223 if (!tlb->freed_tables) in radix__tlb_flush()
1224 radix__flush_tlb_mm(mm); in radix__tlb_flush()
1226 radix__flush_all_mm(mm); in radix__tlb_flush()
1228 if (!tlb->freed_tables) in radix__tlb_flush()
1229 radix__flush_tlb_range_psize(mm, start, end, psize); in radix__tlb_flush()
1231 radix__flush_tlb_pwc_range_psize(mm, start, end, psize); in radix__tlb_flush()
1235 static void __radix__flush_tlb_range_psize(struct mm_struct *mm, in __radix__flush_tlb_range_psize() argument
1242 unsigned long nr_pages = (end - start) >> page_shift; in __radix__flush_tlb_range_psize()
1246 pid = mm->context.id; in __radix__flush_tlb_range_psize()
1254 type = flush_type_needed(mm, false); in __radix__flush_tlb_range_psize()
1270 if (atomic_read(&mm->context.copros) > 0) in __radix__flush_tlb_range_psize()
1278 if (mm_needs_flush_escalation(mm)) in __radix__flush_tlb_range_psize()
1284 _tlbiel_pid_multicast(mm, pid, in __radix__flush_tlb_range_psize()
1295 _tlbiel_va_range_multicast(mm, in __radix__flush_tlb_range_psize()
1300 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); in __radix__flush_tlb_range_psize()
1303 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, in radix__flush_tlb_range_psize() argument
1306 return __radix__flush_tlb_range_psize(mm, start, end, psize, false); in radix__flush_tlb_range_psize()
1309 void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, in radix__flush_tlb_pwc_range_psize() argument
1312 __radix__flush_tlb_range_psize(mm, start, end, psize, true); in radix__flush_tlb_pwc_range_psize()
1316 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) in radix__flush_tlb_collapsed_pmd() argument
1321 pid = mm->context.id; in radix__flush_tlb_collapsed_pmd()
1326 if (PAGE_SIZE == 0x1000) { in radix__flush_tlb_collapsed_pmd()
1327 radix__flush_all_mm(mm); in radix__flush_tlb_collapsed_pmd()
1336 type = flush_type_needed(mm, false); in radix__flush_tlb_collapsed_pmd()
1348 if (atomic_read(&mm->context.copros) > 0) in radix__flush_tlb_collapsed_pmd()
1355 _tlbiel_va_range_multicast(mm, in radix__flush_tlb_collapsed_pmd()
1366 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); in radix__flush_pmd_tlb_range()
1373 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_1G); in radix__flush_pud_tlb_range()
1382 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ in radix__flush_tlb_all()
1383 prs = 0; /* partition scoped */ in radix__flush_tlb_all()
1385 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ in radix__flush_tlb_all()
1389 * now flush guest entries by passing PRS = 1 and LPID != 0 in radix__flush_tlb_all()
1391 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in radix__flush_tlb_all()
1394 * now flush host entires by passing PRS = 0 and LPID == 0 in radix__flush_tlb_all()
1396 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in radix__flush_tlb_all()
1397 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); in radix__flush_tlb_all()
1409 rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); in __tlbie_pid_lpid()
1413 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_pid_lpid()
1415 trace_tlbie(0, 0, rb, rs, ric, prs, r); in __tlbie_pid_lpid()
1426 rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); in __tlbie_va_lpid()
1430 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) in __tlbie_va_lpid()
1432 trace_tlbie(0, 0, rb, rs, ric, prs, r); in __tlbie_va_lpid()
1441 unsigned long va = ((1UL << 52) - 1); in fixup_tlbie_pid_lpid()
1445 __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); in fixup_tlbie_pid_lpid()
1462 * must be a compile-time contraint to match the "i" constraint in _tlbie_pid_lpid()
1488 __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); in fixup_tlbie_va_range_lpid()
1508 fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); in __tlbie_va_range_lpid()
1524 * Performs process-scoped invalidations for a given LPID
1548 if (start == 0 && end == -1) in do_h_rpt_invalidate_prt()
1552 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { in do_h_rpt_invalidate_prt()
1554 if (!(pg_sizes & def->h_rpt_pgsize)) in do_h_rpt_invalidate_prt()
1557 nr_pages = (end - start) >> def->shift; in do_h_rpt_invalidate_prt()
1571 (1UL << def->shift), psize, false); in do_h_rpt_invalidate_prt()
1584 return 0; in create_tlb_single_page_flush_ceiling()