xref: /linux/arch/csky/mm/asid.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1a231b883SGuo Ren // SPDX-License-Identifier: GPL-2.0
2a231b883SGuo Ren /*
3a231b883SGuo Ren  * Generic ASID allocator.
4a231b883SGuo Ren  *
5a231b883SGuo Ren  * Based on arch/arm/mm/context.c
6a231b883SGuo Ren  *
7a231b883SGuo Ren  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
8a231b883SGuo Ren  * Copyright (C) 2012 ARM Ltd.
9a231b883SGuo Ren  */
10a231b883SGuo Ren 
11a231b883SGuo Ren #include <linux/slab.h>
12a231b883SGuo Ren #include <linux/mm_types.h>
13a231b883SGuo Ren 
14a231b883SGuo Ren #include <asm/asid.h>
15a231b883SGuo Ren 
16a231b883SGuo Ren #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
17a231b883SGuo Ren 
18a231b883SGuo Ren #define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
19a231b883SGuo Ren #define ASID_FIRST_VERSION(info)	(1UL << ((info)->bits))
20a231b883SGuo Ren 
21a231b883SGuo Ren #define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
22a231b883SGuo Ren #define idx2asid(info, idx)		(((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
23a231b883SGuo Ren 
flush_context(struct asid_info * info)24a231b883SGuo Ren static void flush_context(struct asid_info *info)
25a231b883SGuo Ren {
26a231b883SGuo Ren 	int i;
27a231b883SGuo Ren 	u64 asid;
28a231b883SGuo Ren 
29a231b883SGuo Ren 	/* Update the list of reserved ASIDs and the ASID bitmap. */
30*49a1a3cfSChristophe JAILLET 	bitmap_zero(info->map, NUM_CTXT_ASIDS(info));
31a231b883SGuo Ren 
32a231b883SGuo Ren 	for_each_possible_cpu(i) {
33a231b883SGuo Ren 		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
34a231b883SGuo Ren 		/*
35a231b883SGuo Ren 		 * If this CPU has already been through a
36a231b883SGuo Ren 		 * rollover, but hasn't run another task in
37a231b883SGuo Ren 		 * the meantime, we must preserve its reserved
38a231b883SGuo Ren 		 * ASID, as this is the only trace we have of
39a231b883SGuo Ren 		 * the process it is still running.
40a231b883SGuo Ren 		 */
41a231b883SGuo Ren 		if (asid == 0)
42a231b883SGuo Ren 			asid = reserved_asid(info, i);
43a231b883SGuo Ren 		__set_bit(asid2idx(info, asid), info->map);
44a231b883SGuo Ren 		reserved_asid(info, i) = asid;
45a231b883SGuo Ren 	}
46a231b883SGuo Ren 
47a231b883SGuo Ren 	/*
48a231b883SGuo Ren 	 * Queue a TLB invalidation for each CPU to perform on next
49a231b883SGuo Ren 	 * context-switch
50a231b883SGuo Ren 	 */
51a231b883SGuo Ren 	cpumask_setall(&info->flush_pending);
52a231b883SGuo Ren }
53a231b883SGuo Ren 
check_update_reserved_asid(struct asid_info * info,u64 asid,u64 newasid)54a231b883SGuo Ren static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
55a231b883SGuo Ren 				       u64 newasid)
56a231b883SGuo Ren {
57a231b883SGuo Ren 	int cpu;
58a231b883SGuo Ren 	bool hit = false;
59a231b883SGuo Ren 
60a231b883SGuo Ren 	/*
61a231b883SGuo Ren 	 * Iterate over the set of reserved ASIDs looking for a match.
62a231b883SGuo Ren 	 * If we find one, then we can update our mm to use newasid
63a231b883SGuo Ren 	 * (i.e. the same ASID in the current generation) but we can't
64a231b883SGuo Ren 	 * exit the loop early, since we need to ensure that all copies
65a231b883SGuo Ren 	 * of the old ASID are updated to reflect the mm. Failure to do
66a231b883SGuo Ren 	 * so could result in us missing the reserved ASID in a future
67a231b883SGuo Ren 	 * generation.
68a231b883SGuo Ren 	 */
69a231b883SGuo Ren 	for_each_possible_cpu(cpu) {
70a231b883SGuo Ren 		if (reserved_asid(info, cpu) == asid) {
71a231b883SGuo Ren 			hit = true;
72a231b883SGuo Ren 			reserved_asid(info, cpu) = newasid;
73a231b883SGuo Ren 		}
74a231b883SGuo Ren 	}
75a231b883SGuo Ren 
76a231b883SGuo Ren 	return hit;
77a231b883SGuo Ren }
78a231b883SGuo Ren 
new_context(struct asid_info * info,atomic64_t * pasid,struct mm_struct * mm)79a231b883SGuo Ren static u64 new_context(struct asid_info *info, atomic64_t *pasid,
80a231b883SGuo Ren 		       struct mm_struct *mm)
81a231b883SGuo Ren {
82a231b883SGuo Ren 	static u32 cur_idx = 1;
83a231b883SGuo Ren 	u64 asid = atomic64_read(pasid);
84a231b883SGuo Ren 	u64 generation = atomic64_read(&info->generation);
85a231b883SGuo Ren 
86a231b883SGuo Ren 	if (asid != 0) {
87a231b883SGuo Ren 		u64 newasid = generation | (asid & ~ASID_MASK(info));
88a231b883SGuo Ren 
89a231b883SGuo Ren 		/*
90a231b883SGuo Ren 		 * If our current ASID was active during a rollover, we
91a231b883SGuo Ren 		 * can continue to use it and this was just a false alarm.
92a231b883SGuo Ren 		 */
93a231b883SGuo Ren 		if (check_update_reserved_asid(info, asid, newasid))
94a231b883SGuo Ren 			return newasid;
95a231b883SGuo Ren 
96a231b883SGuo Ren 		/*
97a231b883SGuo Ren 		 * We had a valid ASID in a previous life, so try to re-use
98a231b883SGuo Ren 		 * it if possible.
99a231b883SGuo Ren 		 */
100a231b883SGuo Ren 		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
101a231b883SGuo Ren 			return newasid;
102a231b883SGuo Ren 	}
103a231b883SGuo Ren 
104a231b883SGuo Ren 	/*
105a231b883SGuo Ren 	 * Allocate a free ASID. If we can't find one, take a note of the
106a231b883SGuo Ren 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
107a231b883SGuo Ren 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
108a231b883SGuo Ren 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
109a231b883SGuo Ren 	 * pairs.
110a231b883SGuo Ren 	 */
111a231b883SGuo Ren 	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
112a231b883SGuo Ren 	if (asid != NUM_CTXT_ASIDS(info))
113a231b883SGuo Ren 		goto set_asid;
114a231b883SGuo Ren 
115a231b883SGuo Ren 	/* We're out of ASIDs, so increment the global generation count */
116a231b883SGuo Ren 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
117a231b883SGuo Ren 						 &info->generation);
118a231b883SGuo Ren 	flush_context(info);
119a231b883SGuo Ren 
120a231b883SGuo Ren 	/* We have more ASIDs than CPUs, so this will always succeed */
121a231b883SGuo Ren 	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
122a231b883SGuo Ren 
123a231b883SGuo Ren set_asid:
124a231b883SGuo Ren 	__set_bit(asid, info->map);
125a231b883SGuo Ren 	cur_idx = asid;
126a231b883SGuo Ren 	cpumask_clear(mm_cpumask(mm));
127a231b883SGuo Ren 	return idx2asid(info, asid) | generation;
128a231b883SGuo Ren }
129a231b883SGuo Ren 
130a231b883SGuo Ren /*
131a231b883SGuo Ren  * Generate a new ASID for the context.
132a231b883SGuo Ren  *
133a231b883SGuo Ren  * @pasid: Pointer to the current ASID batch allocated. It will be updated
134a231b883SGuo Ren  * with the new ASID batch.
135a231b883SGuo Ren  * @cpu: current CPU ID. Must have been acquired through get_cpu()
136a231b883SGuo Ren  */
asid_new_context(struct asid_info * info,atomic64_t * pasid,unsigned int cpu,struct mm_struct * mm)137a231b883SGuo Ren void asid_new_context(struct asid_info *info, atomic64_t *pasid,
138a231b883SGuo Ren 		      unsigned int cpu, struct mm_struct *mm)
139a231b883SGuo Ren {
140a231b883SGuo Ren 	unsigned long flags;
141a231b883SGuo Ren 	u64 asid;
142a231b883SGuo Ren 
143a231b883SGuo Ren 	raw_spin_lock_irqsave(&info->lock, flags);
144a231b883SGuo Ren 	/* Check that our ASID belongs to the current generation. */
145a231b883SGuo Ren 	asid = atomic64_read(pasid);
146a231b883SGuo Ren 	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
147a231b883SGuo Ren 		asid = new_context(info, pasid, mm);
148a231b883SGuo Ren 		atomic64_set(pasid, asid);
149a231b883SGuo Ren 	}
150a231b883SGuo Ren 
151a231b883SGuo Ren 	if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
152a231b883SGuo Ren 		info->flush_cpu_ctxt_cb();
153a231b883SGuo Ren 
154a231b883SGuo Ren 	atomic64_set(&active_asid(info, cpu), asid);
155a231b883SGuo Ren 	cpumask_set_cpu(cpu, mm_cpumask(mm));
156a231b883SGuo Ren 	raw_spin_unlock_irqrestore(&info->lock, flags);
157a231b883SGuo Ren }
158a231b883SGuo Ren 
159a231b883SGuo Ren /*
160a231b883SGuo Ren  * Initialize the ASID allocator
161a231b883SGuo Ren  *
162a231b883SGuo Ren  * @info: Pointer to the asid allocator structure
163a231b883SGuo Ren  * @bits: Number of ASIDs available
164a231b883SGuo Ren  * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
165a231b883SGuo Ren  * allocated contiguously for a given context. This value should be a power of
166a231b883SGuo Ren  * 2.
167a231b883SGuo Ren  */
asid_allocator_init(struct asid_info * info,u32 bits,unsigned int asid_per_ctxt,void (* flush_cpu_ctxt_cb)(void))168a231b883SGuo Ren int asid_allocator_init(struct asid_info *info,
169a231b883SGuo Ren 			u32 bits, unsigned int asid_per_ctxt,
170a231b883SGuo Ren 			void (*flush_cpu_ctxt_cb)(void))
171a231b883SGuo Ren {
172a231b883SGuo Ren 	info->bits = bits;
173a231b883SGuo Ren 	info->ctxt_shift = ilog2(asid_per_ctxt);
174a231b883SGuo Ren 	info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
175a231b883SGuo Ren 	/*
176a231b883SGuo Ren 	 * Expect allocation after rollover to fail if we don't have at least
177a231b883SGuo Ren 	 * one more ASID than CPUs. ASID #0 is always reserved.
178a231b883SGuo Ren 	 */
179a231b883SGuo Ren 	WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
180a231b883SGuo Ren 	atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
181*49a1a3cfSChristophe JAILLET 	info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL);
182a231b883SGuo Ren 	if (!info->map)
183a231b883SGuo Ren 		return -ENOMEM;
184a231b883SGuo Ren 
185a231b883SGuo Ren 	raw_spin_lock_init(&info->lock);
186a231b883SGuo Ren 
187a231b883SGuo Ren 	return 0;
188a231b883SGuo Ren }
189