1 /* 2 * Based on arch/arm/mm/context.c 3 * 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/sched.h> 22 #include <linux/mm.h> 23 #include <linux/smp.h> 24 #include <linux/percpu.h> 25 26 #include <asm/mmu_context.h> 27 #include <asm/tlbflush.h> 28 #include <asm/cachetype.h> 29 30 #define asid_bits(reg) \ 31 (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8) 32 33 #define ASID_FIRST_VERSION (1 << MAX_ASID_BITS) 34 35 static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 36 unsigned int cpu_last_asid = ASID_FIRST_VERSION; 37 38 /* 39 * We fork()ed a process, and we need a new context for the child to run in. 40 */ 41 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 42 { 43 mm->context.id = 0; 44 raw_spin_lock_init(&mm->context.id_lock); 45 } 46 47 static void flush_context(void) 48 { 49 /* set the reserved TTBR0 before flushing the TLB */ 50 cpu_set_reserved_ttbr0(); 51 flush_tlb_all(); 52 if (icache_is_aivivt()) 53 __flush_icache_all(); 54 } 55 56 static void set_mm_context(struct mm_struct *mm, unsigned int asid) 57 { 58 unsigned long flags; 59 60 /* 61 * Locking needed for multi-threaded applications where the same 62 * mm->context.id could be set from different CPUs during the 63 * broadcast. This function is also called via IPI so the 64 * mm->context.id_lock has to be IRQ-safe. 65 */ 66 raw_spin_lock_irqsave(&mm->context.id_lock, flags); 67 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { 68 /* 69 * Old version of ASID found. Set the new one and reset 70 * mm_cpumask(mm). 71 */ 72 mm->context.id = asid; 73 cpumask_clear(mm_cpumask(mm)); 74 } 75 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); 76 77 /* 78 * Set the mm_cpumask(mm) bit for the current CPU. 79 */ 80 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 81 } 82 83 /* 84 * Reset the ASID on the current CPU. This function call is broadcast from the 85 * CPU handling the ASID rollover and holding cpu_asid_lock. 86 */ 87 static void reset_context(void *info) 88 { 89 unsigned int asid; 90 unsigned int cpu = smp_processor_id(); 91 struct mm_struct *mm = current->active_mm; 92 93 /* 94 * current->active_mm could be init_mm for the idle thread immediately 95 * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to 96 * the reserved value, so no need to reset any context. 97 */ 98 if (mm == &init_mm) 99 return; 100 101 smp_rmb(); 102 asid = cpu_last_asid + cpu; 103 104 flush_context(); 105 set_mm_context(mm, asid); 106 107 /* set the new ASID */ 108 cpu_switch_mm(mm->pgd, mm); 109 } 110 111 void __new_context(struct mm_struct *mm) 112 { 113 unsigned int asid; 114 unsigned int bits = asid_bits(); 115 116 raw_spin_lock(&cpu_asid_lock); 117 /* 118 * Check the ASID again, in case the change was broadcast from another 119 * CPU before we acquired the lock. 120 */ 121 if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { 122 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 123 raw_spin_unlock(&cpu_asid_lock); 124 return; 125 } 126 /* 127 * At this point, it is guaranteed that the current mm (with an old 128 * ASID) isn't active on any other CPU since the ASIDs are changed 129 * simultaneously via IPI. 130 */ 131 asid = ++cpu_last_asid; 132 133 /* 134 * If we've used up all our ASIDs, we need to start a new version and 135 * flush the TLB. 136 */ 137 if (unlikely((asid & ((1 << bits) - 1)) == 0)) { 138 /* increment the ASID version */ 139 cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits); 140 if (cpu_last_asid == 0) 141 cpu_last_asid = ASID_FIRST_VERSION; 142 asid = cpu_last_asid + smp_processor_id(); 143 flush_context(); 144 smp_wmb(); 145 smp_call_function(reset_context, NULL, 1); 146 cpu_last_asid += NR_CPUS - 1; 147 } 148 149 set_mm_context(mm, asid); 150 raw_spin_unlock(&cpu_asid_lock); 151 } 152